code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from convlab2.nlu.jointBERT.multiwoz import BERTNLU
from convlab2.nlu.milu.multiwoz import MILU
from convlab2.dst.rule.multiwoz import RuleDST
from convlab2.policy.rule.multiwoz import RulePolicy
from convlab2.nlg.template.multiwoz import TemplateNLG
from convlab2.dialog_agent import PipelineAgent, BiSession
from convlab2.evaluator.multiwoz_eval import MultiWozEvaluator
from pprint import pprint
import random
import numpy as np
import torch
sys_nlu = BERTNLU()
sys_dst = RuleDST()
sys_policy = RulePolicy()
sys_nlg = TemplateNLG(is_user=False)
sys_agent = PipelineAgent(sys_nlu, sys_dst, sys_policy, sys_nlg, name='sys')
sys_agent.response("I want to find a moderate hotel")
sys_agent.response("Which type of hotel is it ?")
sys_agent.response("OK , where is its address ?")
sys_agent.response("Thank you !")
sys_agent.response("Try to find me a chinese restaurant in south area .")
sys_agent.response("Which kind of food it provides ?")
sys_agent.response("Book a table for 5 , this Sunday .")
## Simulator
user_nlu = MILU()
user_dst = None
user_policy = RulePolicy(character='usr')
user_nlg = TemplateNLG(is_user=True)
user_agent = PipelineAgent(user_nlu, user_dst, user_policy, user_nlg, name='user')
evaluator = MultiWozEvaluator()
sess = BiSession(sys_agent=sys_agent, user_agent=user_agent, kb_query=None, evaluator=evaluator)
# +
def set_seed(r_seed):
random.seed(r_seed)
np.random.seed(r_seed)
torch.manual_seed(r_seed)
set_seed(20200131)
sys_response = ''
sess.init_session()
print("init goal:")
pprint(sess.evaluator.goal)
print("_"*50)
for i in range(20):
sys_response, user_response, session_over, reward = sess.next_turn(sys_response)
print('user:', user_response)
print('sys:', sys_response)
print()
if session_over is True:
break
print('task success:', sess.evaluator.task_success())
print('book rate:', sess.evaluator.book_rate())
print('inform precision/recall/f1:', sess.evaluator.inform_F1())
print('-'*50)
print('final goal:')
pprint(sess.evaluator.goal)
print('='*100)
# +
## Trying out new config
# +
from convlab2.nlu.svm.multiwoz import SVMNLU
from convlab2.nlu.jointBERT.multiwoz import BERTNLU
from convlab2.nlu.milu.multiwoz import MILU
from convlab2.dst.rule.multiwoz import RuleDST
#from convlab2.dst.mdbt.multiwoz import MDBT
from convlab2.dst.sumbt.multiwoz import SUMBT
from convlab2.dst.trade.multiwoz import TRADE
from convlab2.policy.rule.multiwoz import RulePolicy
from convlab2.policy.ppo.multiwoz import PPOPolicy
from convlab2.policy.pg.multiwoz import PGPolicy
from convlab2.policy.mle.multiwoz import MLEPolicy
from convlab2.policy.gdpl.multiwoz import GDPLPolicy
from convlab2.policy.vhus.multiwoz import UserPolicyVHUS
from convlab2.policy.mdrg.multiwoz import MDRGWordPolicy
from convlab2.policy.hdsa.multiwoz import HDSA
from convlab2.policy.larl.multiwoz import LaRL
from convlab2.nlg.template.multiwoz import TemplateNLG
from convlab2.nlg.sclstm.multiwoz import SCLSTM
from convlab2.e2e.sequicity.multiwoz import Sequicity
from convlab2.e2e.damd.multiwoz import Damd
# -
sys_nlu = MILU()
sys_dst = RuleDST()
sys_policy = RulePolicy()
sys_nlg = TemplateNLG(is_user=False)
sys_agent = PipelineAgent(sys_nlu, sys_dst, sys_policy, sys_nlg, 'sys')
sys_agent = Damd()
user_nlu = MILU()
user_dst = None
user_policy = RulePolicy(character='usr')
user_nlg = TemplateNLG(is_user=True)
user_agent = PipelineAgent(user_nlu, user_dst, user_policy, user_nlg, name='user')
# +
from convlab2.util.analysis_tool.analyzer import Analyzer
analyzer = Analyzer(user_agent=user_agent, dataset='multiwoz')
set_seed(20200131)
analyzer.comprehensive_analyze(sys_agent=sys_agent, model_name='sys_agent', total_dialog=100)
# -
| ws/Untitled.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import argparse
import copy
import collections
import numpy as np
import pandas as pd
from datetime import datetime
import tensorflow as tf
import tensorflow_addons as tfa
from tensorflow.keras import layers
from tensorflow.keras.utils import to_categorical
from sklearn import metrics
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split
from xgboost.sklearn import XGBClassifier
import scikitplot as skplt
import matplotlib.pyplot as plt
import seaborn as sns
from data_handler_detection import DataHandler
tf.keras.backend.clear_session() # For easy reset of notebook state.
# +
# Setup plots
# %matplotlib inline
plt.rcParams['figure.figsize'] = 8, 8
# %config InlineBackend.figure_format = 'retina'
sns.set()
# Utils
def plot_loss_acc(history):
"""Plot training and (optionally) validation loss and accuracy"""
loss = history.history['loss']
epochs = range(1, len(loss) + 1)
plt.figure(figsize=(10, 10))
plt.subplot(2, 1, 1)
plt.plot(epochs, loss, '.--', label='Training loss')
final_loss = loss[-1]
title = 'Training loss: {:.4f}'.format(final_loss)
plt.ylabel('Loss')
if 'val_loss' in history.history:
val_loss = history.history['val_loss']
plt.plot(epochs, val_loss, 'o-', label='Validation loss')
final_val_loss = val_loss[-1]
title += ', Validation loss: {:.4f}'.format(final_val_loss)
plt.title(title)
plt.legend()
acc = history.history['sparse_categorical_accuracy']
plt.subplot(2, 1, 2)
plt.plot(epochs, acc, '.--', label='Training acc')
final_acc = acc[-1]
title = 'Training accuracy: {:.2f}%'.format(final_acc * 100)
plt.xlabel('Epochs')
plt.ylabel('sparse_categorical_accuracy')
if 'val_acc' in history.history:
val_acc = history.history['val_acc']
plt.plot(epochs, val_acc, 'o-', label='Validation sparse_categorical_accuracy')
final_val_acc = val_acc[-1]
title += ', Validation accuracy: {:.2f}%'.format(final_val_acc * 100)
plt.title(title)
plt.legend()
# +
# parse arguments
## general
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('--working_path', default='.')
## data
arg_parser.add_argument('dataset_name', default='mimic3',
help='The data files should be saved in [working_path]/data/[dataset_name] directory.')
arg_parser.add_argument('label_name', default='mortality')
arg_parser.add_argument('--max_timesteps', type=int, default=200,
help='Time series of at most # time steps are used. Default: 200.')
arg_parser.add_argument('--max_timestamp', type=int, default=48*60*60,
help='Time series of at most # seconds are used. Default: 48 (hours).')
## model
arg_parser.add_argument('--recurrent_dim', type=lambda x: x and [int(xx) for xx in x.split(',')] or [], default='64')
arg_parser.add_argument('--hidden_dim', type=lambda x: x and [int(xx) for xx in x.split(',')] or [], default='64')
arg_parser.add_argument('--model', default='GRUD', choices=['GRUD', 'GRUforward', 'GRU0', 'GRUsimple'])
arg_parser.add_argument('--use_bidirectional_rnn', default=False)
## training
arg_parser.add_argument('--pretrained_model_file', default=None,
help='If pre-trained model is provided, training will be skipped.') # e.g., [model_name]_[i_fold].h5
arg_parser.add_argument('--epochs', type=int, default=100)
arg_parser.add_argument('--early_stopping_patience', type=int, default=10)
arg_parser.add_argument('--batch_size', type=int, default=2)
## set the actual arguments if running in notebook
if not (__name__ == '__main__' and '__file__' in globals()):
'''ARGS = arg_parser.parse_args([
'mimic3',
'mortality',
'--model', 'GRUD',
'--hidden_dim', '',
'--epochs', '100'
])'''
ARGS = arg_parser.parse_args([
'detection',
'phase',
'--model', 'GRUD',
'--hidden_dim', '',
'--max_timestamp', '5807537',
'--epochs', '100'
])
else:
ARGS = arg_parser.parse_args()
#print('Arguments:', ARGS)
# -
# get dataset
dataset = DataHandler(
data_path=os.path.join(ARGS.working_path, 'data', ARGS.dataset_name),
label_name=ARGS.label_name,
max_steps=ARGS.max_timesteps,
max_timestamp=ARGS.max_timestamp
)
# ### Embeding
df = pd.DataFrame(dataset._data['input'])
df.columns = ["timestamp","name", "latitude", "longitude", "step","gsr","heart_rate","skin_temp","calories","risk_situation"]
df = df[pd.notnull(df['risk_situation'])]
df.sample(100)
def embeding(df):
df_copy = copy.deepcopy(df)
for header, values in df_copy.items():
df_copy[header] = pd.Categorical(df_copy[header])
df_copy[header] = df_copy[header].cat.codes
return df_copy
df_copy = embeding(df)
df_copy.sample(5)
targets = df_copy.pop('risk_situation').to_numpy()
X_train, X_test, y_train, y_test = train_test_split(df_copy.values,
targets, test_size=0.2, random_state=0)
print(X_train.shape, y_train.shape)
print(X_test.shape, y_test.shape)
# <b>Could be an other way to load train and test</b><br>
# dataset = tf.data.Dataset.from_tensor_slices((X_train, y_train))<br>
# train_dataset = dataset.shuffle(len(X_train)).batch(500)<br>
# ### Simple NN
# +
def get_compiled_model(input_shape, output_shape):
model = tf.keras.Sequential([
tf.keras.layers.Dense(9, input_shape=input_shape, activation='relu'),
#tf.keras.layers.Dense(9, activation='relu'),
#tf.keras.layers.Dense(10, activation='softmax')
tf.keras.layers.Dense(output_shape, activation='softmax')
])
opt = tfa.optimizers.RectifiedAdam(lr=1e-3)
model.compile(optimizer=tf.keras.optimizers.RMSprop(learning_rate=1e-3),
loss=tf.keras.losses.sparse_categorical_crossentropy,
metrics=['sparse_categorical_accuracy'])
model.summary()
return model
output_shape = len(pd.Categorical(df['risk_situation']).categories)
input_shape = X_train.shape[1:]
model = get_compiled_model(input_shape, output_shape)
# -
history = model.fit(X_train, y_train, epochs=50, verbose=2)
# Show training results
plot_loss_acc(history)
# +
result = model.predict(X_test)
predictions = [np.argmax(result[i])for i in range(result.shape[0])]
print('result.shape',result.shape)
print('y_test.shape',y_test.shape)
print(confusion_matrix(y_test, predictions))
print(metrics.precision_score(y_test, predictions, average='weighted'))
print(metrics.recall_score(y_test, predictions, average='weighted'))
print(metrics.f1_score(y_test, predictions, average='weighted'))
# -
skplt.metrics.plot_confusion_matrix(y_test, predictions, normalize=True)
plt.show()
result = model.evaluate(X_test, y_test, batch_size=128)
# ### XGBoost
xgb2 = XGBClassifier(n_estimators=170, learning_rate=0.25)
histo_xgb = xgb2.fit(X_train, y_train)
print(xgb2.score(X_test, y_test))
predictions_xgb = histo_xgb.predict(X_test)
print('predicted',predictions_xgb)
print('true label',y_test)
# +
print(confusion_matrix(y_test, predictions_xgb))
print(metrics.precision_score(y_test, predictions_xgb,
average='weighted'))
print(metrics.recall_score(y_test, predictions_xgb, average='weighted'))
print(metrics.f1_score(y_test, predictions_xgb, average='weighted'))
# -
skplt.metrics.plot_confusion_matrix(y_test, predictions_xgb,
normalize=True)
plt.show()
# ### GRU
# +
X1 = X_train.reshape(len(X_train), 9, 1)
input_shape = X1.shape[1:]
y1 = y_train.reshape(len(y_train), 1)
output_shape = len(y1)
model = tf.keras.Sequential()
model.add(layers.GRU(256, input_shape=input_shape,return_sequences=True))
model.add(layers.Dropout(0.5))
model.add(layers.SimpleRNN(128))
model.add(layers.Dense(6, activation='softmax'))
opt = tfa.optimizers.RectifiedAdam(lr=1e-3)
model.compile(optimizer=tf.keras.optimizers.RMSprop(learning_rate=1e-3),
loss=tf.keras.losses.sparse_categorical_crossentropy,
metrics=['sparse_categorical_accuracy'])
model.summary()
# -
history_gru = model.fit(X1, y1, epochs=10, batch_size=600)
x_test = X_test.reshape(len(X_test), 9, 1)
Y_test = y_test.reshape(len(y_test), 1)
result_gru = model.predict(x_test)
predictions_gru = [np.argmax(result_gru[i])for i in range(result_gru.shape[0])]
print('predicted',len(predictions_gru))
print('true label',len(Y_test))
# +
print(confusion_matrix(Y_test, predictions_gru))
print('precision_score:',metrics.precision_score(Y_test, predictions_gru,
average='weighted'))
print('recall_score:',metrics.recall_score(Y_test, predictions_gru, average='weighted'))
print('f1_score:',metrics.f1_score(Y_test, predictions_gru, average='weighted'))
# -
skplt.metrics.plot_confusion_matrix(Y_test, predictions_gru,
normalize=True)
plt.show()
# ### LSTM
# +
X = X_train.reshape(len(X_train), 9, 1)
y = y_train.reshape(len(y_train), 1)
# define model
model = tf.keras.Sequential()
model.add(layers.LSTM(30, input_shape=(9, 1)))
model.add(layers.BatchNormalization())
model.add(layers.Dense(6))
model.add(layers.Dropout(0.5))
model.add(layers.Dense(6, activation='softmax'))
opt = tfa.optimizers.RectifiedAdam(lr=1e-3)
model.compile(optimizer=tf.keras.optimizers.RMSprop(learning_rate=1e-3),
loss=tf.keras.losses.sparse_categorical_crossentropy,
metrics=['sparse_categorical_accuracy'])
model.summary()
# fit model
history = model.fit(X, y, epochs=15, batch_size=1000, verbose=2)
# -
# evaluate model on new data
x_test = X_test.reshape(len(X_test), 9, 1)
Y_test = y_test.reshape(len(y_test), 1)
result_lstm = model.predict(x_test)
predictions_lstm = [np.argmax(result_lstm[i])for i in range(result_lstm.shape[0])]
print('predicted',len(predictions_lstm))
print('true label',len(Y_test))
# +
print(confusion_matrix(Y_test, predictions_lstm))
print('precision_score',metrics.precision_score(Y_test, predictions_lstm, average='weighted'))
print('recall_score:',metrics.recall_score(Y_test, predictions_lstm, average='weighted'))
print('f1_score:',metrics.f1_score(Y_test, predictions_lstm, average='weighted'))
# -
skplt.metrics.plot_confusion_matrix(Y_test, predictions_lstm,
normalize=False)
plt.show()
plot_loss_acc(history)
| Run_detection.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# default_exp examples.causal_lm_gpt2
# +
#all_slow
# -
#hide
# %reload_ext autoreload
# %autoreload 2
# %matplotlib inline
# # Causal Language Modeling with GPT-2
#
# > This notebook demonstrates how we can use Blurr to train, or fine-tune, a causal language model against examples defined in individual files (similar to how the raw wiki-103 data comes). We demonstrate how to use `get_text_files` and create a custom `splitter` function to build our train and validation datasets.
# +
#export
from transformers import *
from fastai.text.all import *
from blurr.utils import *
from blurr.data.core import *
from blurr.data.language_modeling import *
from blurr.modeling.core import *
from blurr.modeling.language_modeling import *
logging.set_verbosity_error()
# +
#hide_input
import pdb
from fastcore.test import *
from nbverbose.showdoc import show_doc
os.environ["TOKENIZERS_PARALLELISM"] = "false"
print("Here's what we're running with ...\n")
print_versions('torch fastai transformers')
# -
#cuda
#hide
torch.cuda.set_device(1)
print(f'Using GPU #{torch.cuda.current_device()}: {torch.cuda.get_device_name()}')
# ## Get your data
raw_data_path = Path('./data/task-language-modeling/pt-2/')
raw_data_path.ls()
(raw_data_path/'train').ls()
len((raw_data_path/'train').ls()), len((raw_data_path/'valid').ls())
# ## Get your HF objects
# +
model_cls = AutoModelForCausalLM
pretrained_model_name = "gpt2"
hf_arch, hf_config, hf_tokenizer, hf_model = BLURR.get_hf_objects(pretrained_model_name, model_cls=model_cls)
if (hf_tokenizer.pad_token is None):
hf_tokenizer.add_special_tokens({'pad_token': '<pad>'})
hf_config.pad_token_id = hf_tokenizer.get_vocab()['<pad>']
hf_model.resize_token_embeddings(len(hf_tokenizer))
# -
# ## Build your DataBlock
# ### Define how to get the raw data
# the folders we want to grab the files from
get_wiki_files = partial(get_text_files, folders=['train', 'valid'])
fnames = get_wiki_files(raw_data_path)
fnames[0]
# ### Define how we want to split our validation and training datasets
# custom splitter to split on parent folder name
splitter = FuncSplitter(lambda fpath: Path(fpath).parent.name == 'valid')
splitter(fnames)
# ### Define our DataBlock using the appropriate Blurr transforms
# +
# our before_batch_tfm and HF_TextBlock updated for causal modeling task
bbtfm = HF_LMBeforeBatchTransform(hf_arch, hf_config, hf_tokenizer, hf_model, lm_strategy_cls=CausalLMStrategy)
blocks = (HF_TextBlock(before_batch_tfm=bbtfm, input_return_type=HF_CausalLMInput), noop)
# our DataBlock
dblock = DataBlock(
blocks=blocks,
get_x=lambda x: x.read_text(), # read each text file
get_items=get_wiki_files, # grab the text files
splitter=splitter # split on parent folder name (validation = 'valid')
)
# +
# dblock.summary(raw_data_path)
# -
dls = dblock.dataloaders(raw_data_path, bs=2, val_bs=4)
b = dls.one_batch()
b[0]['input_ids'].shape, b[1].shape
dls.show_batch(dataloaders=dls, trunc_at=500, max_n=2)
# ## Train
# +
model = HF_BaseModelWrapper(hf_model)
fit_cbs = [LM_MetricsCallback()]
learn = Learner(dls,
model,
opt_func=partial(Adam),
loss_func=HF_PreCalculatedLoss(),
cbs=[HF_BaseModelCallback],
metrics=[perplexity],
splitter=hf_splitter).to_fp16()
# learn.freeze()
# -
learn.lr_find(suggest_funcs=[minimum, steep, valley, slide])
learn.fit_one_cycle(1, lr_max=3e-3, cbs=fit_cbs)
learn.show_results(learner=learn, max_n=2, trunc_at=500)
learn.blurr_generate('Itália ( ), oficialmente República Italiana', max_length=100, do_sample=True, top_k=25)
# ## Summary
# This example demonstrates how to train a causal language model where the raw data examples are in individual files (similar to how the standard wikitext-103 is defined). We also defined a custom `splitter` function so as to put all the files under `/valid` as part of the validation set and all the files under `/train` in the training set.
#hide
from nbdev.export import notebook2script
notebook2script()
| nbs/99e_examples-causal-lm-gpt2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Code explanation
#
# At [this blog](http://nnormandin.com/science/2017/07/01/cvae.html) quite a few details of typical Keras models are explained. Note that older Keras versions had different ways to handle merging of layers as for a variational autoencoder (see e.g. [here](https://github.com/keras-team/keras/issues/3921)).
#
# However, I don't get why there is a `sample_z` function. The purpose of an adversarial autoencoder is that it would not need differentiable probability densities in the latent layer, or that's what I thought. The latent representation should be compared to samples from a normal distribution by the discriminator.
#
# Ah, that is actually in the original paper! The authors distinguish three different autoencoders. (1) The deterministic autoencoder (that's if you skip the layer containing random variables altogether). (2) An autoencoder that uses a Gaussian posterior. In this case we can indeed use the same renormalization trick as in Kingma and Welling. (3) A general autoencoder with a "univeral approximate posterior" where we add noise to the input of the encoder.
#
# The network has to match q(z) to p(z) by only exploiting the stochasticity in the data distribution in the deterministic case. However, the authors found that for all different types an extensive sweep over hyperparameters did obtain similiar test-likelihoods. All their reported results where subsequently with a deterministic autoencoder.
from keras.datasets import mnist
from keras.layers import Input, Dense, Reshape, Flatten, Dropout, multiply, GaussianNoise
from keras.layers import BatchNormalization, Activation, Embedding, ZeroPadding2D
from keras.layers import MaxPooling2D
from keras.layers import Lambda
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.convolutional import UpSampling2D, Conv2D
from keras.models import Sequential, Model
from keras.optimizers import Adam
from keras import losses
from keras.utils import to_categorical
import keras.backend as K
import matplotlib.pyplot as plt
import numpy as np
# In the case of a non-deterministic autoencoder we have a layer with random variables where we sample from using the renormalization trick. See my website on [inference](https://www.annevanrossum.com/blog/2018/01/30/inference-in-deep-learning/) and other [variance reduction methods](https://www.annevanrossum.com/blog/2018/05/26/random-gradients/).
def sample_z(args):
mu, log_var = args
batch = K.shape(mu)[0]
eps = K.random_normal(shape=(batch, latent_dim), mean=0., stddev=1.)
return mu + K.exp(log_var / 2) * eps
# The encoder, discriminator, and decoder have layers of size 512 or 256 and are densely connected. I have not experimented much with the number of nodes. Regarding the activation function leaky rectifiers are used. A rectifier is a function of the form: $f(x) = \max(0,x)$, in other words, making sure the values don't go below zero, but not bounding it from above. The leaky rectifiers are defined through $f(x) = x$ for $x > 0$ and $f(x) = x \alpha$ otherwise. This makes it less likely to have them "stuck" when all there inputs become negative.
def build_encoder(latent_dim, img_shape):
deterministic = 1
img = Input(shape=img_shape)
h = Flatten()(img)
h = Dense(512)(h)
h = LeakyReLU(alpha=0.2)(h)
h = Dense(512)(h)
h = LeakyReLU(alpha=0.2)(h)
if deterministic:
latent_repr = Dense(latent_dim)(h)
else:
mu = Dense(latent_dim)(h)
log_var = Dense(latent_dim)(h)
latent_repr = Lambda(sample_z)([mu, log_var])
return Model(img, latent_repr)
def build_discriminator(latent_dim):
model = Sequential()
model.add(Dense(512, input_dim=latent_dim))
model.add(LeakyReLU(alpha=0.2))
model.add(Dense(256))
model.add(LeakyReLU(alpha=0.2))
model.add(Dense(1, activation="sigmoid"))
encoded_repr = Input(shape=(latent_dim, ))
validity = model(encoded_repr)
return Model(encoded_repr, validity)
def build_decoder(latent_dim, img_shape):
model = Sequential()
model.add(Dense(512, input_dim=latent_dim))
model.add(LeakyReLU(alpha=0.2))
model.add(Dense(512))
model.add(LeakyReLU(alpha=0.2))
model.add(Dense(np.prod(img_shape), activation='tanh'))
model.add(Reshape(img_shape))
z = Input(shape=(latent_dim,))
img = model(z)
return Model(z, img)
# The input are 28x28 images. The optimization used is Adam. The loss is binary cross-entropy.
# +
img_rows = 28
img_cols = 28
channels = 1
img_shape = (img_rows, img_cols, channels)
# Results can be found in just_2_rv
#latent_dim = 2
latent_dim = 8
optimizer = Adam(0.0002, 0.5)
# Build and compile the discriminator
discriminator = build_discriminator(latent_dim)
discriminator.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])
# -
# Build the encoder / decoder
encoder = build_encoder(latent_dim, img_shape)
decoder = build_decoder(latent_dim, img_shape)
# +
# The generator takes the image, encodes it and reconstructs it
# from the encoding
img = Input(shape=img_shape)
encoded_repr = encoder(img)
reconstructed_img = decoder(encoded_repr)
# For the adversarial_autoencoder model we will only train the generator
# It will say something like:
# UserWarning: Discrepancy between trainable weights and collected trainable weights,
# did you set `model.trainable` without calling `model.compile` after ?
# We only set trainable to false for the discriminator when it is part of the autoencoder...
discriminator.trainable = False
# The discriminator determines validity of the encoding
validity = discriminator(encoded_repr)
# The adversarial_autoencoder model (stacked generator and discriminator)
adversarial_autoencoder = Model(img, [reconstructed_img, validity])
adversarial_autoencoder.compile(loss=['mse', 'binary_crossentropy'], loss_weights=[0.999, 0.001], optimizer=optimizer)
# -
discriminator.summary()
# +
epochs=5000
batch_size=128
sample_interval=100
# Load the dataset
(X_train, _), (_, _) = mnist.load_data()
# Rescale -1 to 1
X_train = (X_train.astype(np.float32) - 127.5) / 127.5
X_train = np.expand_dims(X_train, axis=3)
# Adversarial ground truths
valid = np.ones((batch_size, 1))
fake = np.zeros((batch_size, 1))
# -
def sample_prior(latent_dim, batch_size):
return np.random.normal(size=(batch_size, latent_dim))
def sample_images(latent_dim, decoder, epoch):
r, c = 5, 5
z = sample_prior(r*c, latent_dim)
gen_imgs = decoder.predict(z)
gen_imgs = 0.5 * gen_imgs + 0.5
fig, axs = plt.subplots(r, c)
cnt = 0
for i in range(r):
for j in range(c):
axs[i,j].imshow(gen_imgs[cnt, :,:,0], cmap='gray')
axs[i,j].axis('off')
cnt += 1
fig.savefig("images/mnist_%d.png" % epoch)
plt.close()
# # Training
#
# Each epoch a batch is chosen from the images at random. The typical batch size is 128 items out of 60.000 images. The change to pick the same image is minimal (but not zero).
#
# The "real" latent variables for the encoder will be Normal distributed. They all have the same N(0,1) distribution, mu=0, sigma=1. The variables are returned in a 128x10 matrix if we use 10 latent variables.
#
# The discriminator doesn't know that there is order to the "real" and "fake" samples. We can just first train it on all the real ones and then all the fake ones. I don't know if it matters for the training, but we might try to actually build one data structure where this is randomized...
#
#
for epoch in range(epochs):
# ---------------------
# Train Discriminator
# ---------------------
# Select a random batch of images
idx = np.random.randint(0, X_train.shape[0], batch_size)
imgs = X_train[idx]
latent_fake = encoder.predict(imgs)
# Here we generate the "TRUE" samples
latent_real = sample_prior(latent_dim, batch_size)
# Train the discriminator
d_loss_real = discriminator.train_on_batch(latent_real, valid)
d_loss_fake = discriminator.train_on_batch(latent_fake, fake)
d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)
# ---------------------
# Train Generator
# ---------------------
# Train the generator
g_loss = adversarial_autoencoder.train_on_batch(imgs, [imgs, valid])
# Plot the progress (every 10th epoch)
if epoch % 10 == 0:
print ("%d [D loss: %f, acc: %.2f%%] [G loss: %f, mse: %f]" % (epoch, d_loss[0], 100*d_loss[1], g_loss[0], g_loss[1]))
# Save generated images (every sample interval, e.g. every 100th epoch)
if epoch % sample_interval == 0:
sample_images(latent_dim, decoder, epoch)
| Keras Adversarial Autoencoder MNIST.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # GSD: Rpb1 orthologs in PB genomes
#
# This collects Rpb1 gene and protein sequences from a collection of PacBio sequenced yeast genomes from [Yue et al 2017](https://www.ncbi.nlm.nih.gov/pubmed/28416820), and then estimates the heptad repeats. (Added in [SGD yeast nuclear reference genome](https://sgd-archive.yeastgenome.org/sequence/S288C_reference/chromosomes/fasta/) as well even though PacBio set includes its own version of S288C.) It builds on the notebook [Searching for coding sequences in genomes using BLAST and Python](../Searching%20for%20coding%20sequences%20in%20genomes%20using%20BLAST%20and%20Python.ipynb) and use of PatMatch along with Python, the basics of which I illustrated [here](https://github.com/fomightez/patmatch-binder). (The same overall process is used to analyze over 1000 genomes [here](GSD%20Rpb1_orthologs_in_1011_genomes.ipynb).)
#
# References for sequence data:
# - [Contrasting evolutionary genome dynamics between domesticated and wild yeasts.
# <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>. Nat Genet. 2017 Jun;49(6):913-924. doi: 10.1038/ng.3847. Epub 2017 Apr 17. PMID: 28416820](https://www.ncbi.nlm.nih.gov/pubmed/28416820)
#
#
# - [Life with 6000 genes. <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>. Science. 1996 Oct 25;274(5287):546, 563-7. PMID: 8849441](https://www.ncbi.nlm.nih.gov/pubmed/8849441)
#
#
# -----
# ## Overview
# 
# ## Preparation
#
# Get scripts and sequence data necessary.
#
# **DO NOT 'RUN ALL'. AN INTERACTION IS NECESSARY AT CELL FIVE. AFTER THAT INTERACTION, THE REST BELOW IT CAN BE RUN.**
#
# (Caveat: right now this is written for genes with no introns. Only a few hundred have in yeast and that is the organism in this example. Intron presence would only become important when trying to translate in late stages of this workflow.)
gene_name = "RPB1"
size_expected = 5202
get_seq_from_link = False
link_to_FASTA_of_gene = "https://gist.githubusercontent.com/fomightez/f46b0624f1d8e3abb6ff908fc447e63b/raw/625eaba76bb54e16032f90c8812350441b753a0c/uz_S288C_YOR270C_VPH1_coding.fsa"
#**Possible future enhancement would be to add getting the FASTA of the gene from Yeastmine with just systematic id**
# Get the genomes data, the `blast_to_df` script, and sequence to search for matches in the genomes by running these commands.
import os
file_needed = "blast_to_df.py"
if not os.path.isfile(file_needed):
# !curl -O https://raw.githubusercontent.com/fomightez/sequencework/master/blast-utilities/blast_to_df.py
import pandas as pd
# Prepare for getting PacBio (Yue et al 2017 sequences)
#make a list of the strain designations
yue_et_al_strains = ["S288C","DBVPG6044","DBVPG6765","SK1","Y12",
"YPS128","UWOPS034614","CBS432","N44","YPS138",
"UFRJ50816","UWOPS919171"]
# Get & unpack the genome sequences from strains
for s in yue_et_al_strains:
# !curl -LO http://yjx1217.github.io/Yeast_PacBio_2016/data/Nuclear_Genome/{s}.genome.fa.gz
# !gunzip -f {s}.genome.fa.gz
# +
# add identifiers to each `chr` so results for each strain clear later
chromosome_id_prefix = "chr"
def add_strain_id_to_description_line(file,strain_id):
'''
Takes a file and edits every description line to add
strain_id after the caret.
Saves the fixed file
'''
import sys
output_file_name = "temp.txt"
# prepare output file for saving so it will be open and ready
with open(output_file_name, 'w') as output_file:
# read in the input file
with open(file, 'r') as input_handler:
# prepare to give feeback later or allow skipping to certain start
lines_processed = 0
for line in input_handler:
lines_processed += 1
if line.startswith(">"):
rest_o_line = line.split(">")
new_line = ">"+strain_id + rest_o_line[1]
else:
new_line = line
# Send text to output
output_file.write(new_line)
# replace the original file with edited
# !mv temp.txt {file}
# Feedback
sys.stderr.write("\n{} chromosome identifiers tagged.".format(file))
for s in yue_et_al_strains:
add_strain_id_to_description_line(s+".genome.fa",s)
# -
# Cannot simply use `S288C_reference_sequence_R64-2-1_20150113.fsa` at https://sgd-archive.yeastgenome.org/sequence/S288C_reference/genome_releases/S288C_reference_genome_Current_Release.tgz
# because want nuclear only sequences here to match nuclear ones I obtaiend from
# https://yjx1217.github.io/Yeast_PacBio_2016/data/,
# where they keep nuclear and genome separate
chr_nums = ["01","02","03","04","05","06","07","08","09","10","11",
"12","13","14","15","16"]
g_fns = []
for chr_num in chr_nums:
fn = "chr"+chr_num+".fsa"
# !curl -OL https://sgd-archive.yeastgenome.org/sequence/S288C_reference/chromosomes/fasta/{fn}
g_fns.append(fn)
# edit or replace description lines to work similar to PacBio
tag = "SGD_REFchr"+chr_num+" "
add_strain_id_to_description_line(fn,tag)
# zip all nuclear chromosomes together as one file as they are from https://yjx1217.github.io/Yeast_PacBio_2016/data/
# !cat {" ".join(g_fns)} > SGD_REF.genome.fa
# !rm chr*.fsa
# Get SGD gene sequence in FASTA format to search for best matches in the genomes
import sys
gene_filen = gene_name + ".fsa"
if get_seq_from_link:
# !curl -o {gene_filen} {link_to_FASTA_of_gene}
else:
# !touch {gene_filen}
sys.stderr.write("\nEDIT THE FILE '{}' TO CONTAIN "
"YOUR GENE OF INTEREST (FASTA-FORMATTED)"
".".format(gene_filen))
sys.exit(0)
# **I PUT CONTENTS OF FILE `S288C_YDL140C_RPO21_coding.fsa` downloaded from [here](https://www.yeastgenome.org/locus/S000002299/sequence) as 'RPB1.fsa'.**
#
# Now you are prepared to run BLAST to search each PacBio-sequenced genomes for the best match to a gene from the Saccharomyces cerevisiae strain S288C reference sequence.
# ## Use BLAST to search the genomes for matches to the gene in the reference genome at SGD
#
# SGD is the [Saccharomyces cerevisiae Genome Database site](http:yeastgenome.org) and the reference genome is from S288C.
#
# This is going to go through each genome and make a database so it is searchable and then search for matches to the gene. The information on the best match will be collected. One use for that information will be collecting the corresponding sequences later.
#
# Import the script that allows sending BLAST output to Python dataframes so that we can use it here.
from blast_to_df import blast_to_df
# Make a list of all `genome.fa` files, excluding `genome.fa.nhr` and `genome.fa.nin` and `genome.fansq`
# The excluding was only necessary because I had run some queries preliminarily in development. Normally, it would just be the `.re.fa` at the outset.
fn_to_check = "genome.fa"
genomes = []
import os
import fnmatch
for file in os.listdir('.'):
if fnmatch.fnmatch(file, '*'+fn_to_check):
if not file.endswith(".nhr") and not file.endswith(".nin") and not file.endswith(".nsq") :
genomes.append(file)
genomes
SGD_gene = gene_filen
dfs = []
for genome in genomes:
# !makeblastdb -in {genome} -dbtype nucl
# result = !blastn -query {SGD_gene} -db {genome} -outfmt "6 qseqid sseqid stitle pident qcovs length mismatch gapopen qstart qend sstart send qframe sframe frames evalue bitscore qseq sseq" -task blastn
from blast_to_df import blast_to_df
blast_df = blast_to_df(result.n)
dfs.append(blast_df.head(1))
# merge the dataframes in the list `dfs` into one dataframe
df = pd.concat(dfs)
#Save the df
filen_prefix = gene_name + "_orthologBLASTdf"
df.to_pickle(filen_prefix+".pkl")
df.to_csv(filen_prefix+'.tsv', sep='\t',index = False)
df
# Computationally check if any genomes missing from the BLAST results list?
# +
subjids = df.sseqid.tolist()
#print (subjids)
#print (subjids[0:10])
subjids = [x.split(chromosome_id_prefix)[0] for x in subjids]
#print (subjids)
#print (subjids[0:10])
len_genome_fn_end = len(fn_to_check) + 1 # plus one to accound for the period that will be
# between `fn_to_check` and strain_id`, such as `SK1.genome.fa`
genome_ids = [x[:-len_genome_fn_end] for x in genomes]
#print (genome_ids[0:10])
a = set(genome_ids)
#print (a)
print ("initial:",len(a))
r = set(subjids)
print("results:",len(r))
print ("missing:",len(a-r))
#a - r
# -
# Sanity check: Report on how expected size compares to max size seen?
size_seen = df.length.max(0)
print ("Expected size of gene:", size_expected)
print ("Most frequent size of matches:", df.length.mode()[0])
print ("Maximum size of matches:", df.length.max(0))
# ## Collect the identified, raw sequences
#
# Get the expected size centered on the best match, plus a little flanking each because they might not exactly cover the entire open reading frame. (Although, the example here all look to be full size.)
# Get the script for extracting based on position (and install dependency pyfaidx)
import os
file_needed = "extract_subsequence_from_FASTA.py"
if not os.path.isfile(file_needed):
# !curl -O https://raw.githubusercontent.com/fomightez/sequencework/master/Extract_from_FASTA/extract_subsequence_from_FASTA.py
# !pip install pyfaidx
# +
size_expected = size_expected # use value from above, or alter at this point.
#size_expected = df.length.max(0) #bp length of SGD coding sequence; should be equivalent and that way not hardcoded?
extra_add_to_start = 51 #to allow for 'fuzziness' at starting end
extra_add_to_end = 51 #to allow for 'fuzziness' at far end
genome_fn_end = "genome.fa"
def midpoint(items):
'''
takes a iterable of items and returns the midpoint (integer) of the first
and second values
'''
return int((int(items[0])+int(items[1]))/2)
#midpoint((1,100))
def determine_pos_to_get(match_start,match_end):
'''
Take the start and end of the matched region.
Calculate midpoint between those and then
center expected size on that to determine
preliminary start and preliminary end to get.
Add the extra basepairs to get at each end
to allow for fuzziness/differences of actual
gene ends for orthologs.
Return the final start and end positions to get.
'''
center_of_match = midpoint((match_start,match_end))
half_size_expected = int(size_expected/2.0)
if size_expected % 2 != 0:
half_size_expected += 1
start_pos = center_of_match - half_size_expected
end_pos = center_of_match + half_size_expected
start_pos -= extra_add_to_start
end_pos += extra_add_to_end
# Because of getting some flanking sequences to account for 'fuzziness', it
# is possible the start and end can exceed possible. 'End' is not a problem
# because the `extract_subsequence_from_FASTA.py` script will get as much as
# it from the indicated sequence if a larger than possible number is
# provided. However,'start' can become negative and because the region to
# extract is provided as a string the dash can become a problem. Dealing
# with it here by making sequence positive only.
# Additionally, because I rely on center of match to position where to get,
# part being cut-off due to absence on sequence fragment will shift center
# of match away from what is actually center of gene and to counter-balance
# add twice the amount to the other end. (Actually, I feel I should adjust
# the start end likewise if the sequence happens to be shorter than portion
# I would like to capture but I don't know length of involved hit yet and
# that would need to be added to allow that to happen!<--TO DO)
if start_pos < 0:
raw_amount_missing_at_start = abs(start_pos)# for counterbalancing; needs
# to be collected before `start_pos` adjusted
start_pos = 1
end_pos += 2 * raw_amount_missing_at_start
return start_pos, end_pos
# go through the dataframe using information on each to come up with sequence file,
# specific indentifier within sequence file, and the start and end to extract
# store these valaues as a list in a dictionary with the strain identifier as the key.
extracted_info = {}
start,end = 0,0
for row in df.itertuples():
#print (row.length)
start_to_get, end_to_get = determine_pos_to_get(row.sstart, row.send)
posns_to_get = "{}-{}".format(start_to_get, end_to_get)
record_id = row.sseqid
strain_id = row.sseqid.split(chromosome_id_prefix)[0]
seq_fn = strain_id + "." + genome_fn_end
extracted_info[strain_id] = [seq_fn, record_id, posns_to_get]
# Use the dictionary to get the sequences
for id_ in extracted_info:
# #%run extract_subsequence_from_FASTA.py {*extracted_info[id_]} #unpacking doesn't seem to work here in `%run`
# %run extract_subsequence_from_FASTA.py {extracted_info[id_][0]} {extracted_info[id_][1]} {extracted_info[id_][2]}
#package up the retrieved sequences
archive_file_name = gene_name+"_raw_ortholog_seqs.tar.gz"
# make list of extracted files using fnmatch
fn_part_to_match = "seq_extracted"
collected_seq_files_list = []
import os
import sys
import fnmatch
for file in os.listdir('.'):
if fnmatch.fnmatch(file, fn_part_to_match+'*'):
#print (file)
collected_seq_files_list.append(file)
# !tar czf {archive_file_name} {" ".join(collected_seq_files_list)} # use the list for archiving command
sys.stderr.write("\n\nCollected RAW sequences gathered and saved as "
"`{}`.".format(archive_file_name))
# move the collected raw sequences to a folder in preparation for
# extracting encoding sequence from original source below
# !mkdir raw
# !mv seq_extracted*.fa raw
# -
# That archive should contain the "raw" sequence for each gene, even if the ends are a little different for each. At minimum the entire gene sequence needs to be there at this point; extra at each end is preferable at this point.
#
# You should inspect them as soon as possible and adjust the extra sequence to add higher or lower depending on whether the ortholog genes vary more or less, respectively. The reason they don't need to be perfect yet though is because next we are going to extract the longest open reading frame, which presumably demarcates the entire gene. Then we can return to use that information to clean up the collected sequences to just be the coding sequence.
# ## Collect protein translations of the genes and then clean up "raw" sequences to just be coding
#
# We'll assume the longest translatable frame in the collected "raw" sequences encodes the protein sequence for the gene orthologs of interest. Well base these steps on the [section '20.1.13 Identifying open reading frames'](http://biopython.org/DIST/docs/tutorial/Tutorial.html#htoc299) in the present version of the [Biopython Tutorial and Cookbook](http://biopython.org/DIST/docs/tutorial/Tutorial.html) (Last Update – 18 December 2018 (Biopython 1.73).
# (First run the next cell to get a script needed for dealing with the strand during the translation and gathering of thge encoding sequence.)
import os
file_needed = "convert_fasta_to_reverse_complement.py"
if not os.path.isfile(file_needed):
# !curl -O https://raw.githubusercontent.com/fomightez/sequencework/master/ConvertSeq/convert_fasta_to_reverse_complement.py
# Now to perform the work described in the header to this section...
# +
# find the featured open reading frame and collect presumed protein sequences
# Collect the corresponding encoding sequence from the original source
def len_ORF(items):
# orf is fourth item in the tuples
return len(items[3])
def find_orfs_with_trans(seq, trans_table, min_protein_length):
'''
adapted from the present section '20.1.13 Identifying open reading frames'
http://biopython.org/DIST/docs/tutorial/Tutorial.html#htoc299 in the
present version of the [Biopython Tutorial and Cookbook at
http://biopython.org/DIST/docs/tutorial/Tutorial.html
(Last Update – 18 December 2018 (Biopython 1.73)
Same as there except altered to sort on the length of the
open reading frame.
'''
answer = []
seq_len = len(seq)
for strand, nuc in [(+1, seq), (-1, seq.reverse_complement())]:
for frame in range(3):
trans = str(nuc[frame:].translate(trans_table))
trans_len = len(trans)
aa_start = 0
aa_end = 0
while aa_start < trans_len:
aa_end = trans.find("*", aa_start)
if aa_end == -1:
aa_end = trans_len
if aa_end-aa_start >= min_protein_length:
if strand == 1:
start = frame+aa_start*3
end = min(seq_len,frame+aa_end*3+3)
else:
start = seq_len-frame-aa_end*3-3
end = seq_len-frame-aa_start*3
answer.append((start, end, strand,
trans[aa_start:aa_end]))
aa_start = aa_end+1
answer.sort(key=len_ORF, reverse = True)
return answer
def generate_rcoutput_file_name(file_name,suffix_for_saving = "_rc"):
'''
from https://github.com/fomightez/sequencework/blob/master/ConvertSeq/convert_fasta_to_reverse_complement.py
Takes a file name as an argument and returns string for the name of the
output file. The generated name is based on the original file
name.
Specific example
=================
Calling function with
("sequence.fa", "_rc")
returns
"sequence_rc.fa"
'''
main_part_of_name, file_extension = os.path.splitext(
file_name) #from
#http://stackoverflow.com/questions/541390/extracting-extension-from-filename-in-python
if '.' in file_name: #I don't know if this is needed with the os.path.splitext method but I had it before so left it
return main_part_of_name + suffix_for_saving + file_extension
else:
return file_name + suffix_for_saving + ".fa"
def add_strand_to_description_line(file,strand="-1"):
'''
Takes a file and edits description line to add
strand info at end.
Saves the fixed file
'''
import sys
output_file_name = "temp.txt"
# prepare output file for saving so it will be open and ready
with open(output_file_name, 'w') as output_file:
# read in the input file
with open(file, 'r') as input_handler:
# prepare to give feeback later or allow skipping to certain start
lines_processed = 0
for line in input_handler:
lines_processed += 1
if line.startswith(">"):
new_line = line.strip() + "; {} strand\n".format(strand)
else:
new_line = line
# Send text to output
output_file.write(new_line)
# replace the original file with edited
# !mv temp.txt {file}
# Feedback
sys.stderr.write("\nIn {}, strand noted.".format(file))
table = 1 #sets translation table to standard nuclear, see
# https://www.ncbi.nlm.nih.gov/Taxonomy/Utils/wprintgc.cgi
min_pro_len = 80 #cookbook had the standard `100`. Feel free to adjust.
prot_seqs_info = {} #collect as dictionary with strain_id as key. Values to
# be list with source id as first item and protein length as second and
# strand in source seq as third item, and start and end in source sequence as fourth and fifth,
# and file name of protein and gene as sixth and seventh.
# Example key and value pair: 'YPS138':['<source id>','<protein length>',-1,52,2626,'<gene file name>','<protein file name>']
gene_seqs_fn_list = []
prot_seqs_fn_list = []
from Bio import SeqIO
for raw_seq_filen in collected_seq_files_list:
#strain_id = raw_seq_filen[:-len_genome_fn_end] #if was dealing with source seq
strain_id = raw_seq_filen.split(chromosome_id_prefix)[0].split("seq_extracted")[1]
record = SeqIO.read("raw/"+raw_seq_filen,"fasta")
raw_seq_source_fn = strain_id + "." + genome_fn_end
raw_seq_source_id = record.description.split(":")[0]
orf_list = find_orfs_with_trans(record.seq, table, min_pro_len)
orf_start, orf_end, strand, prot_seq = orf_list[0] #longest ORF seq for protein coding
location_raw_seq = record.description.rsplit(":",1)[1] #get to use in calculating
# the start and end position in original genome sequence.
raw_loc_parts = location_raw_seq.split("-")
start_from_raw_seq = int(raw_loc_parts[0])
end_from_raw_seq = int(raw_loc_parts[1])
length_extracted = len(record) #also to use in calculating relative original
#Fix negative value. (Somehow Biopython can report negative value when hitting
# end of sequence without encountering stop codon and negatives messes up
# indexing later it seems.)
if orf_start < 0:
orf_start = 0
# Trim back to the first Methionine, assumed to be the initiating MET.
# (THIS MIGHT BE A SOURCE OF EXTRA 'LEADING' RESIDUES IN SOME CASES & ARGUES
# FOR LIMITING THE AMOUNT OF FLANKING SEQUENCE ADDED TO ALLOW FOR FUZINESS.)
try:
amt_resi_to_trim = prot_seq.index("M")
except ValueError:
sys.stderr.write("**ERROR**When searching for initiating methionine,\n"
"no Methionine found in the traslated protein sequence.**ERROR**")
sys.exit(1)
prot_seq = prot_seq[amt_resi_to_trim:]
len_seq_trimmed = amt_resi_to_trim * 3
# Calculate the adjusted start and end values for the untrimmed ORF
adj_start = start_from_raw_seq + orf_start
adj_end = end_from_raw_seq - (length_extracted - orf_end)
# Adjust for trimming for appropriate strand.
if strand == 1:
adj_start += len_seq_trimmed
#adj_end += 3 # turns out stop codon is part of numbering biopython returns
elif strand == -1:
adj_end -= len_seq_trimmed
#adj_start -= 3 # turns out stop codon is part of numbering biopython returns
else:
sys.stderr.write("**ERROR**No strand match option detected!**ERROR**")
sys.exit(1)
# Collect the sequence for the actual gene encoding region from
# the original sequence. This way the original numbers will
# be put in the file.
start_n_end_str = "{}-{}".format(adj_start,adj_end)
# %run extract_subsequence_from_FASTA.py {raw_seq_source_fn} {raw_seq_source_id} {start_n_end_str}
# rename the extracted subsequence a more distinguishing name and notify
g_output_file_name = strain_id +"_" + gene_name + "_ortholog_gene.fa"
# !mv {raw_seq_filen} {g_output_file_name} # because the sequence saved happens to
# be same as raw sequence file saved previously, that name can be used to
# rename new file.
gene_seqs_fn_list.append(g_output_file_name)
sys.stderr.write("\n\nRenamed gene file to "
"`{}`.".format(g_output_file_name))
# Convert extracted sequence to reverse complement if translation was on negative strand.
if strand == -1:
# %run convert_fasta_to_reverse_complement.py {g_output_file_name}
# replace original sequence file with the produced file
produced_fn = generate_rcoutput_file_name(g_output_file_name)
# !mv {produced_fn} {g_output_file_name}
# add (after saved) onto the end of the description line for that `-1 strand`
# No way to do this in my current version of convert sequence. So editing descr line.
add_strand_to_description_line(g_output_file_name)
#When settled on actual protein encoding sequence, fill out
# description to use for saving the protein sequence.
prot_descr = (record.description.rsplit(":",1)[0]+ " "+ gene_name
+ "_ortholog"+ "| " +str(len(prot_seq)) + " aas | from "
+ raw_seq_source_id + " "
+ str(adj_start) + "-"+str(adj_end))
if strand == -1:
prot_descr += "; {} strand".format(strand)
# save the protein sequence as FASTA
chunk_size = 70 #<---amino acids per line to have in FASTA
prot_seq_chunks = [prot_seq[i:i+chunk_size] for i in range(
0, len(prot_seq),chunk_size)]
prot_seq_fa = ">" + prot_descr + "\n"+ "\n".join(prot_seq_chunks)
p_output_file_name = strain_id +"_" + gene_name + "_protein_ortholog.fa"
with open(p_output_file_name, 'w') as output:
output.write(prot_seq_fa)
prot_seqs_fn_list.append(p_output_file_name)
sys.stderr.write("\n\nProtein sequence saved as "
"`{}`.".format(p_output_file_name))
# at end store information in `prot_seqs_info` for later making a dataframe
# and then text table for saving summary
#'YPS138':['<source id>',<protein length>,-1,52,2626,'<gene file name>','<protein file name>']
prot_seqs_info[strain_id] = [raw_seq_source_id,len(prot_seq),strand,adj_start,adj_end,
g_output_file_name,p_output_file_name]
sys.stderr.write("\n******END OF A SET OF PROTEIN ORTHOLOG "
"AND ENCODING GENE********")
# -
# use `prot_seqs_info` for saving a summary text table (first convert to dataframe?)
table_fn_prefix = gene_name + "_orthologs_table"
table_fn = table_fn_prefix + ".tsv"
pkl_table_fn = table_fn_prefix + ".pkl"
import pandas as pd
info_df = pd.DataFrame.from_dict(prot_seqs_info, orient='index',
columns=['descr_id', 'length', 'strand', 'start','end','gene_file','prot_file']) # based on
# https://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.from_dict.html and
# note from Python 3.6 that `pd.DataFrame.from_items` is deprecated;
#"Please use DataFrame.from_dict"
info_df.to_pickle(pkl_table_fn)
info_df.to_csv(table_fn, sep='\t') # keep index is default
sys.stderr.write("Text file of associated details saved as '{}'.".format(table_fn))
# pack up archive of gene and protein sequences plus the table
seqs_list = gene_seqs_fn_list + prot_seqs_fn_list + [table_fn,pkl_table_fn]
archive_file_name = gene_name+"_ortholog_seqs.tar.gz"
# !tar czf {archive_file_name} {" ".join(seqs_list)} # use the list for archiving command
sys.stderr.write("\nCollected gene and protein sequences"
" (plus table of details) gathered and saved as "
"`{}`.".format(archive_file_name))
# Save the tarballed archive to your local machine.
# -----
# ## Estimate the count of the heptad repeats
#
# Make a table of the estimated heptad repeats, based on match to pattern that I now know to be imperfect, for each orthlogous protein sequence.
# get the 'patmatch results to dataframe' script
# !curl -O https://raw.githubusercontent.com/fomightez/sequencework/master/patmatch-utilities/patmatch_results_to_df.py
# Using the trick of putting `%%capture` on first line from [here](https://stackoverflow.com/a/23692951/8508004) to suppress the output from `patmatch_results_to_df` function from filling up cell.
# +
# %%time
# %%capture
# Go through each protein sequence file and look for matches to heptad pattern
# LATER POSSIBLE IMPROVEMENT. Translate pasted gene sequence and add SGD REF S228C as first in list `prot_seqs_fn_list`. Because
# although this set of orthologs includes essentially S228C, other lists won't and best to have reference for comparing.
heptad_pattern = "[YF]SP[TG]SP[STAGN]" # will catch repeats#2 through #26 of S288C according to Corden, 2013 PMID: 24040939
from patmatch_results_to_df import patmatch_results_to_df
sum_dfs = []
raw_dfs = []
for prot_seq_fn in prot_seqs_fn_list:
# !perl ../../patmatch_1.2/unjustify_fasta.pl {prot_seq_fn}
# output = !perl ../../patmatch_1.2/patmatch.pl -p {heptad_pattern} {prot_seq_fn}.prepared
os.remove(os.path.join(prot_seq_fn+".prepared")) #delete file made for PatMatch
raw_pm_df = patmatch_results_to_df(output.n, pattern=heptad_pattern, name="CTD_heptad")
raw_pm_df.sort_values('hit_number', ascending=False, inplace=True)
sum_dfs.append(raw_pm_df.groupby('FASTA_id').head(1))
raw_dfs.append(raw_pm_df)
sum_pm_df = pd.concat(sum_dfs, ignore_index=True)
sum_pm_df.sort_values('hit_number', ascending=False, inplace=True)
sum_pm_df = sum_pm_df[['FASTA_id','hit_number']]
#make protein length into dictionary with ids as keys to map to FASTA_ids in
# order to add protein length as a column in summary table
length_info_by_id= dict(zip(info_df.descr_id,info_df.length))
sum_pm_df['prot_length'] = sum_pm_df['FASTA_id'].map(length_info_by_id)
sum_pm_df = sum_pm_df.reset_index(drop=True)
raw_pm_df = pd.concat(raw_dfs, ignore_index=True)
# -
# Because of use of `%%capture` to suppress output, need a separate cell to see results summary.
sum_pm_df
# I assume that '+ 2' should be added to the hit_number for each based on S288C according to [Corden, 2013](https://www.ncbi.nlm.nih.gov/pubmed/24040939) (or `+1` like [Hsin and Manley, 2012](https://www.ncbi.nlm.nih.gov/pubmed/23028141)); however, that is something that could be explored further.
#
# Plot distribution
# +
# %matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
#Want an image file of the figure saved?
saveplot = True
saveplot_fn_prefix = 'heptad_repeat_distribution'
#sns.distplot(sum_pm_df["hit_number"], kde=False, bins = max(sum_pm_df["hit_number"]));
p= sns.countplot(sum_pm_df["hit_number"],
order = list(range(sum_pm_df.hit_number.min(),sum_pm_df.hit_number.max()+1)),
palette="Blues"); # `order` to get those categories with zero
# counts to show up from https://stackoverflow.com/a/45359713/8508004
p.set_xlabel("heptad repeats");
if saveplot:
fig = p.get_figure() #based on https://stackoverflow.com/a/39482402/8508004
fig.savefig(saveplot_fn_prefix + '.png')
fig.savefig(saveplot_fn_prefix + '.svg');
# -
# Any patterns obvious relative species?
# +
# %matplotlib inline
# above line works for JupyterLab which I was developing in. Try `%matplotlib notebook` for when in classic.
# Visualization
# This is loosely based on my past use of seaborn when making `plot_sites_position_across_chromosome.py` and related scripts.
# For example, see `GC-clusters relative mito chromosome and feature` where I ran
# `%run plot_sites_position_across_chromosome.py GC_df_for_merging.pkl -o strand_ofGCacross_mito_chrom`
# add the strain info for listing that without chr info & add species information for coloring on that
species_dict = {
"SGD_REF":"cerevisiae",
"S288C":"cerevisiae",
"DBVPG6765":"cerevisiae",
"Y12":"cerevisiae",
"YPS128":"cerevisiae",
"UWOPS034614":"cerevisiae",
"CBS432":"paradoxus",
"N44":"paradoxus",
"YPS138":"paradoxus",
"UWOPS919171":"paradoxus",
"SK1":"cerevisiae",
"DBVPG6044":"cerevisiae",
"UFRJ50816":"paradoxus",
} # originally had keys ordered like https://www.nature.com/articles/ng.3847/figures/6 but adjusted
# when I wanted to control order of y-axis labels to have strains with most repeats at top; see
# note below in `Adjusted look` mentioning not fond of essentially hardcoding; SGD_REF added
# later after style settled and so guarantee it would work with `Adjusted look`.
def FASTA_id_to_strain(FAid):
'''
use FASTA_id column value to convert to strain_id
and then return the strain_id
'''
return FAid.split(chromosome_id_prefix)[0]
sum_pm_df['strain'] = sum_pm_df['FASTA_id'].apply(FASTA_id_to_strain)
# sum_pm_df['species'] = sum_pm_df['FASTA_id'].apply(strain_to_species) # since need species for label plot strips
# it is easier to add species column first and then use map instead of doing both at same with one `apply`
# of a function or both separately, both with `apply` of two different function.
# sum_pm_df['species'] = sum_pm_df['strain'].apply(strain_to_species)
sum_pm_df['species'] = sum_pm_df['strain'].map(species_dict)
#Want an image file of the figure saved?
saveplot = True
saveplot_fn_prefix = 'heptad_repeats_by_strain'
import seaborn as sns
# Simple look - Comment out everything below to the next two lines to see it again.
p = sns.stripplot(x="hit_number", y="strain", hue="species", data=sum_pm_df, marker="D", size=10, alpha=.98)
p.set_xlabel("heptad repeats")
p.set_xticklabels([" ","23"," ","24", " ", "25"]) # This was much easier than all the stuff I tried for `Adjusted` look below
# and the only complaint I have with the results is that what I assume are the `minor` tick lines show up; still ended up
# needing this when added `xticks = p.xaxis.get_major_ticks()` in order to not show decimals for ones I kept
#p.set(xticks=[]) # this works to remove the ticks entirely; however, I want to keep major ticks
xticks = p.xaxis.get_major_ticks() #based on https://stackoverflow.com/q/50820043/8508004
for i in range(len(xticks)):
#print (i) # WAS FOR DEBUGGING
keep_ticks = [1,3,5] #harcoding essentially again, but at least it works
if i not in keep_ticks:
xticks[i].set_visible(False)
'''
# Highly Adjusted look - Comment out default look parts above. Ended up going with simple above because still couldn't get
# those with highest number of repeats with combination I could come up with.
sum_pm_df["repeats"] = sum_pm_df["hit_number"].astype(str) # when not here (use `x="hit_number"` in plot) or
# tried `.astype('category')` get plotting of the 0.5 values too
sum_pm_df.sort_values('hit_number', ascending=True, inplace=True) #resorting again was necessary when
# added `sum_pm_df["hit_number"].astype(str)` to get 'lower' to 'higher' as left to right for x-axis; otherwise
# it was putting the first rows on the left, which happened to be the 'higher' repeat values
#p = sns.catplot(x="repeats", y="strain", hue="species", data=sum_pm_df, marker="D", size=10, alpha=.98) #marker size ignored in catplot?
p = sns.stripplot(x="repeats", y="strain", hue="species", data=sum_pm_df, marker="D", size=10, alpha=.98)
#p = sns.stripplot(x="repeats", y="strain", hue="species", order = list(species_dict.keys()), data=sum_pm_df, marker="D",
# size=10, alpha=.98) # not fond of essentially harcoding to strain order but makes more logical sense to have
# strains with most repeats at the top of the y-axis; adding `order` makes `sort` order be ignored
p.set_xlabel("heptad repeats")
sum_pm_df.sort_values('hit_number', ascending=False, inplace=True) #revert to descending sort for storing df;
'''
if saveplot:
fig = p.get_figure() #based on https://stackoverflow.com/a/39482402/8508004
fig.savefig(saveplot_fn_prefix + '.png', bbox_inches='tight')
fig.savefig(saveplot_fn_prefix + '.svg');
# -
# No obvious patterns except *cerevisiae* only one to reach the maximum seen among these cases. (Keep in mind that S288C is represented twice in all this.)
#
# What about the length of the deduced protein sequences relative the repeat number?
# +
# %matplotlib inline
# above line works for JupyterLab which I was developing in. Try `%matplotlib notebook` for when in classic.
# Visualization
# This is loosely based on my past use of seaborn when making `plot_sites_position_across_chromosome.py` and related scripts.
# For example, see `GC-clusters relative mito chromosome and feature` where I ran
# `%run plot_sites_position_across_chromosome.py GC_df_for_merging.pkl -o strand_ofGCacross_mito_chrom`
# add the strain info for listing that without chr info & add species information for coloring on that
species_dict = {
"SGD_REF":"cerevisiae",
"S288C":"cerevisiae",
"DBVPG6765":"cerevisiae",
"Y12":"cerevisiae",
"YPS128":"cerevisiae",
"UWOPS034614":"cerevisiae",
"CBS432":"paradoxus",
"N44":"paradoxus",
"YPS138":"paradoxus",
"UWOPS919171":"paradoxus",
"SK1":"cerevisiae",
"DBVPG6044":"cerevisiae",
"UFRJ50816":"paradoxus",
} # originally had keys ordered like https://www.nature.com/articles/ng.3847/figures/6 but adjusted
# when I wanted to control order of y-axis labels to have strains with most repeats at top; see
# note below in `Adjusted look` mentioning not fond of essentially hardcoding; SGD_REF added
# later after style settled and so guarantee it would work with `Adjusted look`.
def FASTA_id_to_strain(FAid):
'''
use FASTA_id column value to convert to strain_id
and then return the strain_id
'''
return FAid.split(chromosome_id_prefix)[0]
sum_pm_df['strain'] = sum_pm_df['FASTA_id'].apply(FASTA_id_to_strain)
# sum_pm_df['species'] = sum_pm_df['FASTA_id'].apply(strain_to_species) # since need species for label plot strips
# it is easier to add species column first and then use map instead of doing both at same with one `apply`
# of a function or both separately, both with `apply` of two different function.
# sum_pm_df['species'] = sum_pm_df['strain'].apply(strain_to_species)
sum_pm_df['species'] = sum_pm_df['strain'].map(species_dict)
#Want an image file of the figure saved?
saveplot = True
saveplot_fn_prefix = 'heptad_repeats_by_proteinlen'
import seaborn as sns
# Simple look - Comment out everything below to the next two lines to see it again.
p = sns.stripplot(x="hit_number", y="strain", hue="prot_length", data=sum_pm_df, marker="D", size=10, alpha=.98)
p.set_xlabel("heptad repeats")
p.set_xticklabels([" ","23"," ","24", " ", "25"]) # This was much easier than all the stuff I tried for `Adjusted` look below
# and the only complaint I have with the results is that what I assume are the `minor` tick lines show up; still ended up
# needing this when added `xticks = p.xaxis.get_major_ticks()` in order to not show decimals for ones I kept
#p.set(xticks=[]) # this works to remove the ticks entirely; however, I want to keep major ticks
xticks = p.xaxis.get_major_ticks() #based on https://stackoverflow.com/q/50820043/8508004
for i in range(len(xticks)):
#print (i) # WAS FOR DEBUGGING
keep_ticks = [1,3,5] #harcoding essentially again, but at least it works
if i not in keep_ticks:
xticks[i].set_visible(False)
'''
# Highly Adjusted look - Comment out default look parts above. Ended up going with simple above because still couldn't get
# those with highest number of repeats with combination I could come up with.
sum_pm_df["repeats"] = sum_pm_df["hit_number"].astype(str) # when not here (use `x="hit_number"` in plot) or
# tried `.astype('category')` get plotting of the 0.5 values too
sum_pm_df.sort_values('hit_number', ascending=True, inplace=True) #resorting again was necessary when
# added `sum_pm_df["hit_number"].astype(str)` to get 'lower' to 'higher' as left to right for x-axis; otherwise
# it was putting the first rows on the left, which happened to be the 'higher' repeat values
#p = sns.catplot(x="repeats", y="strain", hue="species", data=sum_pm_df, marker="D", size=10, alpha=.98) #marker size ignored in catplot?
p = sns.stripplot(x="repeats", y="strain", hue="species", data=sum_pm_df, marker="D", size=10, alpha=.98)
#p = sns.stripplot(x="repeats", y="strain", hue="species", order = list(species_dict.keys()), data=sum_pm_df, marker="D",
# size=10, alpha=.98) # not fond of essentially harcoding to strain order but makes more logical sense to have
# strains with most repeats at the top of the y-axis; adding `order` makes `sort` order be ignored
p.set_xlabel("heptad repeats")
sum_pm_df.sort_values('hit_number', ascending=False, inplace=True) #revert to descending sort for storing df;
'''
if saveplot:
fig = p.get_figure() #based on https://stackoverflow.com/a/39482402/8508004
fig.savefig(saveplot_fn_prefix + '.png', bbox_inches='tight')
fig.savefig(saveplot_fn_prefix + '.svg');
# -
# Increase by seven as expected. However, interesting that some where the deduced protein sequence is the same size are in the bin with a differing number of repeats?
#
# Examining this indicates that perhaps my heptad pattern for estimating may not be perfected?
# Here is the alignment of the end of the deduced CTD of *cerevisiae* reference with the *paradoxus* strain UWOPS919171.
#
# ```
# SGD_REFchr04 NDAMAGGFTAYGGADYGEATSPFGAYGEAPTSPGFGVSSPGFSPTSPTYS
# UWOPS919171chrIV NDAMAGGFTAYGGADYGEATSPFGAYGEAPTSPGFGVSSPGFSPTSPTYS
# **************************************************
#
# SGD_REFchr04 PTSPAYSPTSPSYSPTSPSYSPTSPSYSPTSPSYSPTSPSYSPTSPSYSP
# UWOPS919171chrIV PTSPAYSPTSPSYSPTSPSYSPTSPSYSPTSPSYSPTSPSYSPTSPSYSP
# **************************************************
#
# SGD_REFchr04 TSPSYSPTSPSYSPTSPSYSPTSPSYSPTSPSYSPTSPSYSPTSPSYSPT
# UWOPS919171chrIV TSPSYSPTSPSYSPTSPSYSPTSPSYSSTSPSYSPTSPSYSPTSPSYSPT
# ***************************.**********************
#
# SGD_REFchr04 SPSYSPTSPAYSPTSPSYSPTSPSYSPTSPSYSPTSPSYSPTSPNYSPTS
# UWOPS919171chrIV SPSYSPTSPAYSPTSPSYSPTSPSYSPTSPSYSPTSPSYSPTSPNYSPTS
# **************************************************
#
# SGD_REFchr04 PSYSPTSPGYSPGSPAYSPKQDEQKHNENENSR
# UWOPS919171chrIV PSYSPTSPGYSPGSPAYSPKQDEQKHNENENSK
# ********************************:
# ```
#
# Although the same length, a `YSPTSPS` in the reference sequence is `YSSTSPS` in UWOPS919171. This is something else that could be explored further.
#
# Likewise, for *cerevisiae* N44 vs. *cerevisiae* SK1, my pattern doesn't allow for the `YSPTSPS` to `YSPMSPS` change. (That repeat is shown as lost in *paradoxus* UFRJ50816 in ths alignment just because happened to include it along with N44 and SK1 at the time of the alignment and the TCoffee program chose to place the gap in UFRJ50816 at this point because it was the lest conserved on the other alignment lines. TCoffee could have placed the gap anywhere among the same repeats if it was considering UFRJ50816 relatice N43 or others alone.)
#
# ```
# N44chrIV NDAMAGGFTAYGGADYGEATSPFGAYGEAPTSPGFGVSSPGFSPTSPTYS
# SK1chrIV NDAMAGGFTAYGGADYGEATSPFGAYGEAPTSPGFGVSSPGFSPTSPTYS
# UFRJ50816chrXI NDAMAGGFTAYGGADYGEATSPFGAYGEAPTSPGFGVSSPGFSPTSPTYS
# **************************************************
#
# N44chrIV PTSPAYSPTSPSYSPTSPSYSPTSPSYSPTSPSYSPTSPSYSPTSPSYSP
# SK1chrIV PTSPAYSPTSPSYSPTSPSYSPTSPSYSPTSPSYSPTSPSYSPTSPSYSP
# UFRJ50816chrXI PTSPAYSPTSPSYSPTSPSYSPTSPSYSPTSPSYSPTSPSYSPTSPSY--
# ************************************************
#
# N44chrIV TSPSYSPTSPSYSPTSPSYSPTSPSYSPTSPSYSPTSPSYSPTSPSYSPT
# SK1chrIV MSPSYSPTSPSYSPTSPSYSPTSPSYSPTSPSYSPTSPSYSPTSPSYSPT
# UFRJ50816chrXI -----SPTSPSYSPTSPSYSPTSPSYSPTSPSYSPTSPSYSPTSPSYSPT
# *********************************************
#
# N44chrIV SPAYSPTSPSYSPTSPSYSPTSPSYSPTSPSYSPTSPNYSPTSPSYSPTS
# SK1chrIV SPAYSPTSPSYSPTSPSYSPTSPSYSPTSPSYSPTSPNYSPTSPSYSPTS
# UFRJ50816chrXI SPAYSPTSPSYSPTSPSYSPTSPSYSPTSPSYSPTSPNYSPTSPSYSPTS
# **************************************************
#
# N44chrIV PGYSPGSPAYSPKQDEQKHNENENSK
# SK1chrIV PGYSPGSPAYSPKQDEQKHNENENSR
# UFRJ50816chrXI PGYSPGSPAYSPKQDEQKHNENENSK
# *************************:
# ```
# All the raw data is there for each strain in `raw_pm_df`. For example, the next cell shows how to view the data associated with the summary table for strain UFRJ50816:
UFRJ50816 = raw_pm_df[raw_pm_df['FASTA_id'] == 'UFRJ50816chrXI'].sort_values('hit_number', ascending=True).reset_index(drop=True)
UFRJ50816
# The summary and raw data will be packaged up into one file in the cell below. One of the forms will be a tabular text data ('.tsv') files that can be opened in any spreadsheet software.
# save summary and raw results for use elsewhere (or use `.pkl` files for reloading the pickled dataframe into Python/pandas)
patmatch_fn_prefix = gene_name + "_orthologs_patmatch_results"
patmatchsum_fn_prefix = gene_name + "_orthologs_patmatch_results_summary"
patmatch_fn = patmatch_fn_prefix + ".tsv"
pkl_patmatch_fn = patmatch_fn_prefix + ".pkl"
patmatchsum_fn = patmatchsum_fn_prefix + ".tsv"
pklsum_patmatch_fn = patmatchsum_fn_prefix + ".pkl"
import pandas as pd
sum_pm_df.to_pickle(pklsum_patmatch_fn)
sum_pm_df.to_csv(patmatchsum_fn, sep='\t') # keep index is default
sys.stderr.write("Text file of summary details saved as '{}'.".format(patmatchsum_fn))
raw_pm_df.to_pickle(pkl_patmatch_fn)
raw_pm_df.to_csv(patmatch_fn, sep='\t') # keep index is default
sys.stderr.write("\nText file of raw details saved as '{}'.".format(patmatchsum_fn))
# pack up archive dataframes
pm_dfs_list = [patmatch_fn,pkl_patmatch_fn,patmatchsum_fn,pklsum_patmatch_fn]
archive_file_name = patmatch_fn_prefix+".tar.gz"
# !tar czf {archive_file_name} {" ".join(pm_dfs_list)} # use the list for archiving command
sys.stderr.write("\nCollected pattern matching"
" results gathered and saved as "
"`{}`.".format(archive_file_name))
# Download the tarballed archive of the files to your computer.
#
# For now that archive doesn't include the figures generated from the plots. Download those if you want them. (Look for `saveplot_fn_prefix` settings in the code to help identify file names.)
# ----
| notebooks/GSD/GSD Rpb1_orthologs_in_PB_genomes.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# We have polaris and 47Cas stars real data
# Then we want to calculate its distance using our algorithm
# Then also calculate the distance but this time using the pixel centroids of the image
# We know the pixels because of astrometry.
import math
import numpy as np
# +
def convertRADEC(ra, dec):
"""Converts the given ra and dec to its cartesian coordinates"""
r = 1
x = r * math.sin(np.deg2rad(dec)) * math.cos(np.deg2rad(ra))
y = r * math.sin(np.deg2rad(dec)) * math.sin(np.deg2rad(ra))
z = r * math.cos(np.deg2rad(dec))
return [x, y, z]
def getDistance(a, b):
a_car = convertRADEC(a[0], a[1] + 90)
b_car = convertRADEC(b[0], b[1] + 90)
dab = math.degrees(math.acos(a_car[0] * b_car[0] +
a_car[1] * b_car[1] +
a_car[2] * b_car[2]))
return dab
# -
star_polaris_ra_dec = [37.95456067, 89.26410897]
star_47cas_ra_dec = [31.28093428, 77.28134006]
dpolaris_47cas = getDistance(star_polaris_ra_dec, star_47cas_ra_dec)
print(f'{dpolaris_47cas=:.9f}')
# +
# centroid pixel coordinates in image polar_05secs:
# polaris: [631, 507]
# 47cas: [1150, 290]
# +
# Now using the centroids we will get their unitary vectors using the equations from Manuel
# And we should see the same distance
# +
# f is the focal length
f = 2657.33
def computeUnitaryVector(centroid, f):
CENTER_X = 666
CENTER_Y = 440
x = centroid[0] - CENTER_X
y = centroid[1] - CENTER_Y
x_u = math.cos(math.atan2(y, x)) * math.cos(math.pi/2 - math.atan(math.sqrt(pow(x/f, 2)+pow(y/f, 2))))
y_u = math.sin(math.atan2(y, x)) * math.cos(math.pi/2 - math.atan(math.sqrt(pow(x/f, 2)+pow(y/f, 2))))
z_u = math.sin(math.pi/2 - math.atan(math.sqrt(pow(x/f, 2) + pow(y/f, 2))))
return [x_u, y_u, z_u]
def getDistanceUnitaries(a, b):
dab = math.degrees(math.acos(a[0] * b[0] +
a[1] * b[1] +
a[2] * b[2]))
return dab
# -
star_polaris_unitary_v = computeUnitaryVector([631, 507], f)
star_47cas_unitary_v = computeUnitaryVector([1150, 290], f)
getDistanceUnitaries(star_polaris_unitary_v, star_47cas_unitary_v)
| startrackerpy/server/startracker/distances_maths.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.7.1
# language: julia
# name: julia-1.7
# ---
using CSV, DataFrames, Dates, Plots
data = CSV.read("--path--/COVID-19/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv", DataFrame);
days = Dates.Date(2020,1,22):Dates.Day(1):(Dates.today()-Dates.Day(1));
casesItaly = Array(filter(:"Country/Region" => == ("Italy"), data)[:,5:end]);
ma = casesItaly[1:7]
count = 1
for i in casesItaly[8:end]
push!(ma, i-casesItaly[count])
count += 1
end
plot(days,ma,label="Italy")
| Covid.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + active=""
# 给你两个字符串数组 word1 和 word2 。
# 如果两个数组表示的字符串相同,返回 true ;否则,返回 false 。
#
# 数组表示的字符串 是由数组中的所有元素 按顺序 连接形成的字符串。
#
# 示例 1:
# 输入:word1 = ["ab", "c"], word2 = ["a", "bc"]
# 输出:true
# 解释:
# word1 表示的字符串为 "ab" + "c" -> "abc"
# word2 表示的字符串为 "a" + "bc" -> "abc"
# 两个字符串相同,返回 true
#
# 示例 2:
# 输入:word1 = ["a", "cb"], word2 = ["ab", "c"]
# 输出:false
#
# 示例 3:
# 输入:word1 = ["abc", "d", "defg"], word2 = ["abcddefg"]
# 输出:true
#
# 提示:
# 1 <= word1.length, word2.length <= 103
# 1 <= word1[i].length, word2[i].length <= 103
# 1 <= sum(word1[i].length), sum(word2[i].length) <= 103
# word1[i] 和 word2[i] 由小写字母组成
# -
class Solution:
def arrayStringsAreEqual(self, word1: List[str], word2: List[str]) -> bool:
s1 = ''.join(word1)
s2 = ''.join(word2)
return s1 == s2
| String/1123/1662. Check If Two String Arrays are Equivalent.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
l = [1, 2, 3, 4, 5]
l.append(6)
l
l.count(3)
help(l.count)
l.pop(2)
l
l.count(3)
my_list = [1, 2, 3]
my_list.insert(1, 100)
my_list
my_list.insert(3, 900)
my_list
my_list.insert(5, 500)
my_list
my_list.insert(20, 2000)
my_list
| Methods.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import math
import random
# Reading of file
def ReadFile(file):
f=open(file,"r")
lines=f.read().splitlines()
f.close()
items=[]
for i in range(len(lines)):
line=lines[i].split(',')
feature=[]
for j in line:
feature.append(float(j))
items.append(feature)
return items
def Figure(items):
plt.figure(figsize=(20,10))
for item in items:
plt.scatter(item[0],item[1],c='b',s=70)
plt.title('Before applying K Means')
plt.show()
def FinalFigure(items,clusters):
plt.figure(figsize=(20,10))
for i in range(len(items)):
colors=['k','b','y','g','r']
plt.scatter(items[i][0],items[i][1],c=colors[clusters[i]-1],s=70)
plt.title('After applying K Means')
plt.show()
def EuclideanDistance(obj1,obj2):
sum=0
for i in range(len(obj1)):
sum+=math.pow(obj1[i]-obj2[i],2)
return math.sqrt(sum)
def Distances(items,means):
k=len(means)
distances=[]
for i in range(k):
distance=[]
for item in items:
dist=EuclideanDistance(item,means[i])
distance.append(dist)
distances.append(distance)
return distances
def InitializeMeans(items, k):
no_f = len(items[0]);
means =[]
while(len(means)!=k):
mean=list(random.choice(items))
if mean not in means:
means.append(mean)
return means
def CalculateMean(items,clusters,i,j):
sum=0.0
count=0
z=len(items)
for k in range(z):
if(clusters[k]==j+1):
sum+=items[k][i]
count+=1
if(count!=0):
return(sum/float(count))
else:
return -1
def UpdateMean(items,clusters,means):
a=len(means)
b=len(means[0])
for j in range(a):
for i in range(b):
num=CalculateMean(items,clusters,i,j)
if(num!=-1):
means[j][i]=num
return means
def ClusterAssigning(clusters,distances):
for j in range(len(clusters)):
min=math.inf
for i in range(len(distances)):
if min>distances[i][j]:
min=distances[i][j]
clusters[j]=i+1
return clusters
def KMeans(file,no_clus,num_of_iterations):
items=ReadFile(file)
Figure(items)
clusters=[0 for i in range(len(items))]
means=InitializeMeans(items,no_clus)
while(num_of_iterations):
distances=Distances(items,means)
clusters=ClusterAssigning(clusters,distances)
#print("---------------Means---------------------------")
#print(means)
means=UpdateMean(items,clusters,means)
#print("---------------Clusters---------------------------")
#print(clusters)
#print()
num_of_iterations-=1
FinalFigure(items,clusters)
print('Points\t\tCluster')
print('----------------------')
for i in range(len(items)):
print(items[i],'\t',clusters[i])
# Parameters : ( File_Name, No_of_Clusters, No_of_Iterations )
KMeans('sample.txt',4,5)
| K_Means/K_Means_Using_EuclideanDistance.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # First Attempt
# batch size 256 lr 1e-3
# ### Import modules
# %matplotlib inline
from __future__ import division
import sys
import os
os.environ['MKL_THREADING_LAYER']='GNU'
sys.path.append('../')
from Modules.Basics import *
from Modules.Class_Basics import *
# ## Options
# +
classTrainFeatures = basic_features
classModel = 'modelSwish'
varSet = "basic_features"
nSplits = 10
ensembleSize = 10
ensembleMode = 'loss'
maxEpochs = 10000
compileArgs = {'loss':'binary_crossentropy', 'optimizer':'adam'}
trainParams = {'epochs' : 1, 'batch_size' : 256, 'verbose' : 0}
modelParams = {'version':classModel, 'nIn':len(classTrainFeatures), 'compileArgs':compileArgs}
print "\nTraining on", len(classTrainFeatures), "features:", [var for var in classTrainFeatures]
# -
# ## Import data
trainData = h5py.File(dirLoc + 'train.hdf5', "r+")
valData = h5py.File(dirLoc + 'val.hdf5', "r+")
# ## Determine LR
lrFinder = batchLRFindClassifier(trainData, nSplits, getClassifier, modelParams, trainParams, lrBounds=[1e-5,1e-2], trainOnWeights=False, verbose=0)
compileArgs['lr'] = 1e-3
# ## Train classifier
results, histories = batchTrainClassifier(trainData, nSplits, getClassifier, modelParams, trainParams, patience=20, cosAnnealMult=2, trainOnWeights=False, maxEpochs=maxEpochs, verbose=1)
# ## Construct ensemble
with open('train_weights/resultsFile.pkl', 'r') as fin:
results = pickle.load(fin)
ensemble, weights = assembleEnsemble(results, ensembleSize, ensembleMode, compileArgs)
# ## Response on development data
batchEnsemblePredict(ensemble, weights, trainData, ensembleSize=10, verbose=1)
print 'Training ROC AUC: unweighted {}, weighted {}'.format(roc_auc_score(getFeature('targets', trainData), getFeature('pred', trainData)),
roc_auc_score(getFeature('targets', trainData), getFeature('pred', trainData), sample_weight=getFeature('weights', trainData)))
# ## Response on val data
batchEnsemblePredict(ensemble, weights, valData, ensembleSize=10, verbose=1)
print 'Testing ROC AUC: unweighted {}, weighted {}'.format(roc_auc_score(getFeature('targets', valData), getFeature('pred', valData)),
roc_auc_score(getFeature('targets', valData), getFeature('pred', valData), sample_weight=getFeature('weights', valData)))
# ## Evaluation
# ### Import in dataframe
def convertToDF(datafile, columns={'gen_target', 'gen_weight', 'pred_class'}, nLoad=-1):
data = pandas.DataFrame()
data['gen_target'] = getFeature('targets', datafile, nLoad)
data['gen_weight'] = getFeature('weights', datafile, nLoad)
data['pred_class'] = getFeature('pred', datafile, nLoad)
print len(data), "candidates loaded"
return data
devData = convertToDF(trainData)
valData = convertToDF(valData)
sigVal = (valData.gen_target == 1)
bkgVal = (valData.gen_target == 0)
# ### MVA distributions
getClassPredPlot([valData[bkgVal], valData[sigVal]], weightName='gen_weight')
# +
def AMS(s, b):
""" Approximate Median Significance defined as:
AMS = sqrt(
2 { (s + b + b_r) log[1 + (s/(b+b_r))] - s}
)
where b_r = 10, b = background, s = signal, log is natural logarithm """
br = 10.0
radicand = 2 *( (s+b+br) * math.log (1.0 + s/(b+br)) -s)
if radicand < 0:
print 'radicand is negative. Exiting'
exit()
else:
return math.sqrt(radicand)
def amsScan(inData, res=0.0001):
best = [0,-1]
for i in np.linspace(0.,1.,1./res):
ams = AMS(np.sum(inData.loc[(inData['pred_class'] >= i) & sigVal, 'gen_weight']),
np.sum(inData.loc[(inData['pred_class'] >= i) & bkgVal, 'gen_weight']))
if ams > best[1]:
best = [i, ams]
print best
# -
amsScan(valData)
# ## Save classified data
name = dirLoc + signal + "_" + channel + "_" + varSet + '_' + classModel + '_classifiedData.csv'
print "Saving data to", name
valData.to_csv(name, columns=['gen_target', 'gen_weight', 'gen_sample', 'pred_class'])
# ## Save/load
name = "weights/DNN_" + signal + "_" + channel + "_" + varSet + '_' + classModel
print name
# ### Save
saveEnsemble(name, ensemble, weights, compileArgs, overwrite=1)
# ### Load
ensemble, weights, compileArgs, inputPipe, outputPipe = loadEnsemble(name)
| Classifiers/Day1/Model_0_Basic_Features_256_1e-3_swish_mult2_patience20.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import torch
import torch.nn as nn
import torchvision.models as models
from torch.nn.utils.rnn import pack_padded_sequence
class EncoderCNN(nn.Module):
def __init__(self, embed_size):
"""Load the pretrained ResNet-152 and replace top fc layer."""
super(EncoderCNN, self).__init__()
resnet = models.resnet152(pretrained=True)
modules = list(resnet.children())[:-1] # delete the last fc layer.
self.resnet = nn.Sequential(*modules)
self.linear = nn.Linear(resnet.fc.in_features, embed_size)
self.bn = nn.BatchNorm1d(embed_size, momentum=0.01)
def forward(self, images):
"""Extract feature vectors from input images."""
with torch.no_grad():
features = self.resnet(images)
features = features.reshape(features.size(0), -1)
features = self.bn(self.linear(features))
return features
class DecoderRNN(nn.Module):
def __init__(self, embed_size, hidden_size, vocab_size, num_layers, max_seq_length=20):
"""Set the hyper-parameters and build the layers."""
super(DecoderRNN, self).__init__()
self.embed = nn.Embedding(vocab_size, embed_size)
self.lstm = nn.LSTM(embed_size, hidden_size, num_layers, batch_first=True)
self.linear = nn.Linear(hidden_size, vocab_size)
self.max_seg_length = max_seq_length
def forward(self, features, captions, lengths):
"""Decode image feature vectors and generates captions."""
embeddings = self.embed(captions)
embeddings = torch.cat((features.unsqueeze(1), embeddings), 1)
packed = pack_padded_sequence(embeddings, lengths, batch_first=True)
hiddens, _ = self.lstm(packed)
outputs = self.linear(hiddens[0])
return outputs
def sample(self, features, states=None):
"""Generate captions for given image features using greedy search."""
sampled_ids = []
inputs = features.unsqueeze(1)
for i in range(self.max_seg_length):
hiddens, states = self.lstm(inputs, states) # hiddens: (batch_size, 1, hidden_size)
outputs = self.linear(hiddens.squeeze(1)) # outputs: (batch_size, vocab_size)
_, predicted = outputs.max(1) # predicted: (batch_size)
sampled_ids.append(predicted)
inputs = self.embed(predicted) # inputs: (batch_size, embed_size)
inputs = inputs.unsqueeze(1) # inputs: (batch_size, 1, embed_size)
sampled_ids = torch.stack(sampled_ids, 1) # sampled_ids: (batch_size, max_seq_length)
return sampled_ids
| 03-advanced/image_captioning/model.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Introduction
# > Notebook setup
# ### Notebook functions and setup
# The following are functions that are used within the notebook to reduce and DRY.
# +
# %load_ext autoreload
# %autoreload 2
#eventually mode all but essential local to function blocks.
#import datetime
import logging
#import math
import os
import sys
#from datetime import datetime, timedelta
#import matplotlib.dates as mdates
#import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
#import seaborn as sns
#import ta
#from sklearn import svm
#from sklearn.metrics import (explained_variance_score, mean_squared_error,
# r2_score)
#from sklearn.model_selection import GridSearchCV
#from sklearn.svm import SVR
#from ta import add_all_ta_features
from src.features.build_features import StockTechnicals
from src.models.backtest_strategy import TradeHoldStrategy
from src.visualization.visualize import DisplayTicker
nb_dir = os.path.split(os.getcwd())[0]
if nb_dir not in sys.path:
sys.path.append(nb_dir)
logging.basicConfig(
stream=sys.stdout,
level=logging.INFO) #DEBUG to see all.
logger = logging.getLogger('NOTEBOOK_LOGGER')
# functions...
## pulling data.
def pull_data_files(data_path, symbols, start, end):
import os
import requests
os.makedirs(data_path, exist_ok=True)
parms = { 'period1': start, 'period2':end, 'interval': '1d', 'events': 'history' }
base_url = 'https://query1.finance.yahoo.com/v7/finance/download/{}'
for s in symbols:
r = requests.get(base_url.format(s), params=parms)
logger.debug('calling: {}'.format(r.url))
filename = '{}/{}.csv'.format(data_path, s).replace('^', '_')
with open(filename, 'wb') as fd:
for chunk in r.iter_content(chunk_size=128):
fd.write(chunk)
logger.info('files downloaded to {}'.format(data_path))
def get_epoch_date(y,m,d):
import datetime
import math
return math.trunc(datetime.datetime(y,m,d).timestamp())
def get_color_palette():
#flatui from seaborn site
return ["#9b59b6", "#3498db", "#95a5a6", "#e74c3c", "#34495e", "#2ecc71"]
def convert_data_add_features(stock_ticker_csv, start_date, end_date, close_feature='Close'):
"""
stock_ticker_csv : csv file
end_date : string
days_to_backtrack : int
Takes in a CSV File and converts the date to numbers
Returns DF with Dates converted to numerical format and the original Dates for plotting
Adds some features to the DataFrame
"""
from datetime import datetime, timedelta
import matplotlib.dates as mdates
import ta
start_date = datetime.strptime(start_date, '%Y-%m-%d')
end_date = datetime.strptime(end_date, '%Y-%m-%d')
# Load the CSV File
stock_ticker = pd.read_csv(stock_ticker_csv, index_col=['Date'], parse_dates=['Date'])
stock_ticker.sort_values('Date')
stock_ticker = stock_ticker[start_date: end_date]
#print(stock_ticker.info())
stock_ticker.reset_index(inplace=True)
data_df = stock_ticker.copy()
data_df = data_df.reset_index()
org_dates = data_df['Date']
data_df['Pretty Date'] = data_df['Date']
# Converting Dates to Numbers - SVR doesn't work with dates
data_df['Date'] = data_df['Date'].map(mdates.date2num)
indicator_bb = ta.volatility.BollingerBands(close=data_df[close_feature], n=20, ndev=2)
indicator_SMA20 = ta.trend.SMAIndicator(close=data_df[close_feature],n=20, fillna=True)
indicator_MACD = ta.trend.MACD(close=data_df[close_feature],n_fast=5, n_slow=30, fillna=True)
# Features added to original date
data_df['SMA_20'] = indicator_SMA20.sma_indicator()
data_df['bb_bbm'] = indicator_bb.bollinger_mavg()
data_df['bb_bbh'] = indicator_bb.bollinger_hband()
data_df['bb_bbl'] = indicator_bb.bollinger_lband()
data_df['MACD'] = indicator_MACD.macd()
data_df['MACD signal'] = indicator_MACD.macd_signal()
# Return DF with Dates converted to numerical format and the original Dates for plotting
return data_df, org_dates
# support vector grid search parms
def svc_param_selection(X, y, nfolds, model):
from sklearn.model_selection import GridSearchCV
from sklearn import svm
from sklearn.svm import SVR
Cs = [10, 100, 1000, 10000, 100000]
gammas = [0.00001, 0.0001, 0.001, 0.01, 0.1, 1]
param_grid = {'C': Cs, 'gamma' : gammas}
grid_search = GridSearchCV(svm.SVR(kernel=model), param_grid, cv=nfolds)
grid_search.fit(X, y)
grid_search.best_params_
return grid_search.best_params_
def predict_multiple_variables(ticker_df, dates_org, num_forecast_days, C_user=None, G_user=None, model = 'rbf', close_feature='Close'):
from sklearn.svm import SVR
from sklearn.metrics import (explained_variance_score, mean_squared_error,
r2_score)
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
features=['Date','SMA_20','Open','Volume', 'bb_bbl', 'MACD']
num_training_days = len(ticker_df)
days_to_predict = dates_org[-num_forecast_days:].dt.date
print("=" * 100)
print("Predicting values for the following dates: ")
for d in days_to_predict:
print(f'{d}')
print("=" * 100)
#print(f'{type(dates_org[-num_forecast_days:])}')
#print(f'{type(dates_org[-num_forecast_days:].values)}')
#print(dates_org[-num_forecast_days:])
train_x = ticker_df[-(num_training_days+num_forecast_days):-num_forecast_days][features]
# Seperate Predicted variable
train_y = ticker_df[-(num_training_days+num_forecast_days):-num_forecast_days][[close_feature]]
# Split off test days for forecasting
test_x = ticker_df[-num_forecast_days:][features]
test_y = ticker_df[-num_forecast_days:][[close_feature]]
best_params = svc_param_selection(train_x, train_y.values.reshape(-1,), 3, model=model)
if C_user is None or G_user is None:
C_user = best_params["C"]
G_user = best_params["gamma"]
print(f'Using Grid Search Values C {C_user} and gamma {G_user} values')
else:
print(f'Using user provided C {C_user} and gamma {G_user} values')
# Fit training data using SVR/RBF
svr_rbf = SVR(kernel= model, C=C_user, gamma= G_user)
svr_rbf.fit(train_x, train_y.values.reshape(-1,))
y_pred = svr_rbf.predict(test_x)
print("=" * 100)
print("Predicted Values: {}".format(y_pred))
print("-" * 100)
print(f"Actual Values: {np.squeeze(test_y.values)}")
print("-" * 100)
print(f"Model Score: {svr_rbf.score(train_x, train_y)}")
print("-" * 100)
print(f"Mean Squared Error (MSE): {mean_squared_error(test_y, y_pred)}")
print("-" * 100)
print(f"RMSE: {np.sqrt(mean_squared_error(test_y, y_pred))}")
print("=" * 100)
plt.figure(figsize = (12,6))
plt.plot(ticker_df['Pretty Date'][-(num_training_days+num_forecast_days):-num_forecast_days],
svr_rbf.predict(train_x),
color= 'red', label= 'Train Data')
plt.plot(ticker_df['Pretty Date'][-(num_training_days+num_forecast_days):-num_forecast_days],
train_y ,
color= 'black',
label= 'Actual Data')
plt.legend()
plt.show()
plt.figure(figsize = (12,6))
ax = plt.gca()
formatter = mdates.DateFormatter("%Y-%m-%d")
ax.xaxis.set_major_formatter(formatter)
locator = mdates.DayLocator()
ax.xaxis.set_major_locator(locator)
#print("xmin: {}".format(num_forecast_days:num_forecast_days+1 ))
# if the forecast is one day, it will not plot a line
if num_forecast_days == 1:
plt.xlim(xmin=ticker_df['Pretty Date'][-num_forecast_days:], xmax=ticker_df['Pretty Date'][-1:])
plt.scatter(ticker_df['Pretty Date'][-num_forecast_days:], svr_rbf.predict(test_x),
color= 'green',
label= 'Predicted Data')
plt.scatter(ticker_df['Pretty Date'][-num_forecast_days:], test_y,
color= 'black',
label= 'Actual')
# Otherwise, plot a line for x > 1
else:
plt.xlim(xmin=ticker_df['Pretty Date'][-num_forecast_days:-num_forecast_days+1], xmax=ticker_df['Pretty Date'][-1:])
plt.plot(ticker_df['Pretty Date'][-num_forecast_days:], svr_rbf.predict(test_x),
color= 'green',
label= 'Predicted Data')
plt.plot(ticker_df['Pretty Date'][-num_forecast_days:], test_y,
color= 'black',
label= 'Actual')
#print("X max: {}, X Min: {}".format(xmax, xmin))
plt.legend()
plt.show()
def corr_plot(df):
import seaborn as sns
import matplotlib.pyplot as plt
corrMatrix = df.drop(columns=['index','Date','Pretty Date']).corr()
#print(corrMatrix.shape)
import seaborn as sn
plt.figure(figsize = (18,18))
sn.heatmap(corrMatrix,
annot=True,
vmin = -1,
vmax=1,
center=0,
fmt='.2g',
linewidths=10,
annot_kws={"size": 8},
cbar_kws={"shrink": 1.0},
cmap=get_color_palette(), # sns.diverging_palette(20, 220, n=200),
square=True)
plt.show()
# -
# ## Download the data from the Market Data Provider
#
# > Note: this uses a local data path setting
# +
## setup for file download to local path -- this is used for other
local_data_path = './.data'
## dates in epoch time
start_epoch = get_epoch_date(2008,3,30)
end_epoch = get_epoch_date(2020,3,1)
# our symbols as needed by the provider. NOTE ^GSPC becomes _GSPC
symbols = ['MSFT', 'AAPL', 'GOOG', '^GSPC', 'AMZN' ]
# the actual pull
pull_data_files(local_data_path, symbols, start_epoch, end_epoch)
# -
# ## Feature enrichment
#
# This step changes dates to serial numbers and adds several features
#
# - SMA 20
# - bollinger band for 20 days:
# - Moving Average
# - High and Low band
# - MACD
#
# +
## setup some data frames for modeling
start_date = '2019-01-31'
end_date = '2020-03-31'
#file_target = '{}/{}.csv'
#MSFT, MSFT_dates = convert_data_add_features(file_target.format(local_data_path, 'MSFT'),start_date, end_date)
#AAPL, AAPL_dates = convert_data_add_features(file_target.format(local_data_path, 'AAPL'),start_date, end_date)
#AMZN, AMZN_dates = convert_data_add_features(file_target.format(local_data_path, 'AMZN'),start_date, end_date)
#GOOG, GOOG_dates = convert_data_add_features(file_target.format(local_data_path, 'GOOG'),start_date, end_date)
#_GSPC, _GSPC_dates = convert_data_add_features(file_target.format(local_data_path, '_GSPC'),start_date, end_date)
start_date = '2019-10-31'
end_date = '2019-12-31'
MSFT, MSFT_dates = convert_data_add_features('./.data/MSFT.csv',start_date, end_date)
AAPL, AAPL_dates = convert_data_add_features('./.data/AAPL.csv',start_date, end_date)
AMZN, AMZN_dates = convert_data_add_features('./.data/AMZN.csv',start_date, end_date)
GOOG, GOOG_dates = convert_data_add_features('./.data/GOOG.csv',start_date, end_date)
_GSPC, _GSPC_dates = convert_data_add_features('./.data/_GSPC.csv',start_date, end_date)
# -
# # Basic Exploratory Analysis
#
# >TODO: what kind of diagrams, tables, etc. to put here?
corr_plot(MSFT)
# # Feature Wrangling and Setup
# ## Predictions
#
# ### MSFT Predictions
predict_multiple_variables(MSFT,MSFT_dates,num_forecast_days=5, C_user= 100, G_user=.009, model = 'rbf')
# ### AAPL Precitions
corr_plot(AAPL)
predict_multiple_variables(AAPL,AAPL_dates,num_forecast_days=5, model='rbf')
corr_plot(AMZN)
predict_multiple_variables(AMZN,AMZN_dates,num_forecast_days=5, C_user=100, G_user=.009)
# +
corr_plot(GOOG)
# -
predict_multiple_variables(GOOG, GOOG_dates, num_forecast_days=5, C_user=120, G_user=.001,model = 'rbf')
# # Model Creation and Tuning
# # Results and Comparison
# # Summary
| notebooks/a-final-notebook.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# 
# 
# 
# 
def squares(n):
for i in range(n):
yield i**2
list(squares(5))
min(squares(5))
max(squares(5))
sum(squares(5))
sq = squares(5)
min(sq)
next(sq)
bool(10)
bool(0+0j)
bool([]),bool([0]),bool([None]),bool({})
sq = squares(5)
min(sq)
bool(sq)
class Person:
pass
p = Person()
bool(p)
class Person:
def __bool__(self):
return False
p = Person()
bool(p)
class MySeq:
def __init__(self,n):
self.n = n
def __len__(self):
return self.n
def __getitem__(self):
pass
my_seq = MySeq(0)
bool(my_seq)
my_seq = MySeq(10)
bool(my_seq)
any([0,'',None])
any([0,1,None])
all([1,2,3,4,5,0])
all([10,'Hello'])
# #### Example1
from numbers import Number # <--use this library if u need to know if it's numerical or not
isinstance(10,Number)
isinstance(10.5, Number)
from decimal import Decimal
isinstance(Decimal('10.5'),Number)
l = [10,20,30,40]
is_all_numbers = True
for item in l:
if not isinstance(item,Number):
is_all_numbers = False
break
is_all_numbers
all(l)
all(i > 35 for i in l)
def is_numeric(v):
return isinstance(v,Number)
pred_l1 = list(map(lambda x: isinstance(x,Number),l))
pred_l2 = [is_numeric(i) for i in l]
pred_l1, pred_l2
all(pred_l1), all(pred_l2)
l = [10,20,30,0, 'hello']
all(map(lambda x: isinstance(x,Number),l))
# ### Example2
with open('car-brands.txt','r', encoding='utf-8',errors='ignore') as f:
for row in f:
print(len(row), row, end = '')
with open('car-brands.txt','r', encoding='utf-8',errors='ignore') as f:
result = all(map(lambda row: len(row) >= 4, f))
print(result)
with open('car-brands.txt','r', encoding='utf-8',errors='ignore') as f:
result = any(map(lambda row: len(row) > 10, f))
print(result)
with open('car-brands.txt','r', encoding='utf-8',errors='ignore') as f:
result = all((len(row) >= 4 for row in f))
print(result)
| 9. Aggregators- All, Any, and others.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import category_encoders as ce
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import classification_report,confusion_matrix
# ### preprocessing
# [click](https://scikit-learn.org/stable/modules/preprocessing.html)
# +
import warnings
warnings.filterwarnings('ignore')
train_dir='E:/crash_prediction/train.csv'
test_dir='E:/crash_prediction/test.csv'
col=['Severity','Safety_Score','days','complaints','Control_Metric','turbulence','temp','code','Max_Elevation','Violations','weather','id']
df = pd.read_csv(train_dir)
df_test= pd.read_csv(test_dir)
# +
import seaborn as sns
import matplotlib.pyplot as plt
import seaborn as sns
corr=df.corr()
sns.heatmap(corr, xticklabels=corr.columns.values, yticklabels=corr.columns.values)
# +
df.columns=col
df_test.columns=col[1:]
df_test.head(5)
train = df.drop( ['id'] ,axis=1)
test = df_test.drop( ['id'] ,axis=1)
# -
train.head(5)
# +
from sklearn.model_selection import train_test_split
X = train.drop(['Severity'], axis = 1)
Y = train['Severity']
X_Train1, X_Test1, Y_Train1, Y_Test1 = train_test_split(X, Y, test_size = 0.20, random_state = 10)
# -
# ## Selecting K best feature
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
bestfeatures = SelectKBest(score_func=chi2, k=8)
fit = bestfeatures.fit(X_Train1, Y_Train1)
dfscores = pd.DataFrame(fit.scores_)
dfcolumns = pd.DataFrame(X.columns)
featureScores = pd.concat([dfcolumns,dfscores],axis=1)
featureScores.columns = ['features','Score'] #naming the dataframe columns
featureScores
print(featureScores.nlargest(10,'Score')) #print 10 best features
# +
# STANDARDIZATION OF DATA
train['Safety_Score']=(train['Safety_Score']-train['Safety_Score'].mean())/train['Safety_Score'].std()
test['Safety_Score']=(test['Safety_Score']-test['Safety_Score'].mean())/test['Safety_Score'].std()
train['Control_Metric']=(train['Control_Metric']-train['Control_Metric'].mean())/train['Control_Metric'].std()
test['Control_Metric']=(test['Control_Metric']-test['Control_Metric'].mean())/test['Control_Metric'].std()
train['Max_Elevation']=(train['Max_Elevation']-train['Max_Elevation'].mean())/train['Max_Elevation'].std()
test['Max_Elevation']=(test['Max_Elevation']-test['Max_Elevation'].mean())/test['Max_Elevation'].std()
train['weather']=(train['weather']-train['weather'].mean())/train['weather'].std()
test['weather']=(test['weather']-test['weather'].mean())/test['weather'].std()
train['days']=(train['days']-train['days'].mean())/train['days'].std()
test['days']=(test['days']-test['days'].mean())/test['days'].std()
train['complaints']=(train['complaints']-train['complaints'].mean())/train['complaints'].std()
test['complaints']=(test['complaints']-test['complaints'].mean())/test['complaints'].std()
train['code']=(train['code']-train['code'].mean())/train['code'].std()
test['code']=(test['code']-test['code'].mean())/test['code'].std()
train['Violations']=(train['Violations']-train['Violations'].mean())/train['Violations'].std()
test['Violations']=(test['Violations']-test['Violations'].mean())/test['Violations'].std()
train['turbulence']=(train['turbulence']-train['turbulence'].mean())/train['turbulence'].std()
test['turbulence']=(test['turbulence']-test['turbulence'].mean())/test['turbulence'].std()
train['temp']=(train['temp']-train['temp'].mean())/train['temp'].std()
test['temp']=(test['temp']-test['temp'].mean())/test['temp'].std()
# -
# ### PCA
# [click](https://scikit-learn.org/stable/modules/generated/sklearn.decomposition.PCA.html)
# +
from sklearn.model_selection import train_test_split
X = train.drop(['Severity'], axis = 1)
Y = train['Severity']
#from sklearn.decomposition import PCA
#pca = PCA(svd_solver='auto',n_components =5)
#X_pca = pca.fit_transform(X)
#test_final=test.drop(['temp'], axis = 1)
#Test_pca=pca.fit_transform(test_final)
X_Train, X_Test, Y_Train, Y_Test = train_test_split(X, Y, test_size = 0.20, random_state = 0)
# -
from sklearn.linear_model import RidgeClassifier
Ridge_classifier = RidgeClassifier(alpha=10,solver='svd').fit(X_Train, Y_Train)
predictions =Ridge_classifier.predict(X_Test)
print(confusion_matrix(Y_Test,predictions))
print(classification_report(Y_Test,predictions))
from sklearn.linear_model import RidgeClassifierCV
Ridge_cv = RidgeClassifierCV(alphas=[1e-3, 1e-2, 1e-1, 1,10]).fit(X_Train, Y_Train)
predictions1 =Ridge_cv.predict(X_Test)
print(confusion_matrix(Y_Test,predictions1))
print(classification_report(Y_Test,predictions1))
from sklearn.linear_model import LogisticRegression
trainedmodel = LogisticRegression(penalty='l2',C=1.0,solver='sag').fit(X_Train,Y_Train)
predictions6 =trainedmodel.predict(X_Test)
print(confusion_matrix(Y_Test,predictions6))
print(classification_report(Y_Test,predictions6))
from sklearn.linear_model import LogisticRegressionCV
logisticRegressionCV = LogisticRegression().fit(X_Train,Y_Train)
predictions7 =logisticRegressionCV.predict(X_Test)
print(confusion_matrix(Y_Test,predictions7))
print(classification_report(Y_Test,predictions7))
from sklearn import linear_model
sgd_classifier = linear_model.SGDClassifier(max_iter=1000, tol=1e-3)
sgd_classifier.fit(X_Train,Y_Train)
predictions8 =sgd_classifier.predict(X_Test)
print(confusion_matrix(Y_Test,predictions8))
print(classification_report(Y_Test,predictions8))
from sklearn.linear_model import Perceptron
percep= Perceptron(tol=1e-2, random_state=0)
percep.fit(X_Train,Y_Train)
predictions9 =sgd_classifier.predict(X_Test)
print(confusion_matrix(Y_Test,predictions9))
print(classification_report(Y_Test,predictions9))
from sklearn.svm import SVC
svc_clf = SVC(gamma='auto',C=10,kernel='rbf',decision_function_shape='ovo').fit(X_Train,Y_Train)
predictions10 =svc_clf.predict(X_Test)
print(confusion_matrix(Y_Test,predictions10))
print(classification_report(Y_Test,predictions10))
#param_grid = [
# {'C': [1, 10, 100, 1000], 'kernel': ['linear']},
#{'C': [1, 10, 100, 1000], 'gamma': [0.001, 0.0001], 'kernel': ['rbf']},
#]
#grid_search0=GridSearchCV(svc_clf,param_grid=param_grid)
#grid_search0.fit(X_Train, Y_Train)
# +
#grid_search0.best_estimator_
# -
#svc_clf = SVC(C=1000, cache_size=200, class_weight=None, coef0=0.0,
# decision_function_shape='ovr', degree=3, gamma=0.4, kernel='sigmoid',
# max_iter=-1, probability=False, random_state=None, shrinking=True,
# tol=0.001, verbose=False).fit(X_Train,Y_Train)
# +
#prediction_svc = svc_clf.predict(X_Test)
#print(confusion_matrix(Y_Test,prediction_svc))
#print(classification_report(Y_Test,prediction_svc))
# -
from sklearn import svm
trainedsvm = svm.LinearSVC(C=1,penalty='l2').fit(X_Train, Y_Train)
predictionsvm = trainedsvm.predict(X_Test)
print(confusion_matrix(Y_Test,predictionsvm))
print(classification_report(Y_Test,predictionsvm))
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors = 4,algorithm='auto',leaf_size=70)
knn.fit(X_Train, Y_Train)
Y_pred = knn.predict(X_Test)
print(confusion_matrix(Y_Test,Y_pred))
print(classification_report(Y_Test,Y_pred))
from sklearn.naive_bayes import GaussianNB
trainednb = GaussianNB().fit(X_Train, Y_Train)
predictionnb = trainednb.predict(X_Test)
print(confusion_matrix(Y_Test,predictionnb))
print(classification_report(Y_Test,predictionnb))
from sklearn.model_selection import cross_val_score
from sklearn.tree import DecisionTreeClassifier
DT_clf = DecisionTreeClassifier(random_state=10,max_depth=2)
DT_clf.fit(X_Train, Y_Train)
predictionDT = trainednb.predict(X_Test)
print(confusion_matrix(Y_Test,predictionDT))
print(classification_report(Y_Test,predictionDT))
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import classification_report, confusion_matrix, roc_curve, auc
learning_rates = [0.05, 0.1, 0.25, 0.5, 0.75, 1]
for learning_rate in learning_rates:
gb = GradientBoostingClassifier(n_estimators=1000, learning_rate = learning_rate, max_depth = 9, random_state = 0)
gb.fit(X_Train, Y_Train)
print("Learning rate: ", learning_rate)
print("Accuracy score (training): {0:.3f}".format(gb.score(X_Train, Y_Train)))
print("Accuracy score (validation): {0:.3f}".format(gb.score(X_Test, Y_Test)))
print()
# +
gb = GradientBoostingClassifier(n_estimators=1100, learning_rate = 0.25, max_depth =7 , random_state = 0)
gb.fit(X_Train, Y_Train)
predictions = gb.predict(X_Test)
print("Confusion Matrix:")
print(confusion_matrix(Y_Test, predictions))
print()
print("Classification Report")
print(classification_report(Y_Test, predictions))
# -
prediction_gb_final=gb.predict(test)
submission = pd.DataFrame({
"Accident_ID": df_test["id"],
"Severity": prediction_gb_final
})
submission.to_csv('E:/crash_prediction/s11.csv', index=False)
from sklearn.ensemble import BaggingClassifier
from sklearn.neighbors import KNeighborsClassifier
bagging = BaggingClassifier(gb,max_samples=0.7,random_state=0).fit(X_Train, Y_Train)
prediction_bag = bagging.predict(X_Test)
print(confusion_matrix(Y_Test,prediction_bag))
print(classification_report(Y_Test,prediction_bag))
from sklearn.ensemble import RandomForestClassifier
trainedforest = RandomForestClassifier().fit(X_Train,Y_Train)
predictionforest = trainedforest.predict(X_Test)
print(confusion_matrix(Y_Test,predictionforest))
print(classification_report(Y_Test,predictionforest))
from sklearn.model_selection import RandomizedSearchCV, GridSearchCV
params1={
"max_depth" : [ 3, 4, 5, 6, 8, 10, 12, 15],
"n_estimators" : [100,300,500,700,800,1000],
"max_features" : [4,6,8,10]
}
random_search0=RandomizedSearchCV(trainedforest,param_distributions=params1,n_iter=5,n_jobs=-1,cv=5,verbose=3)
random_search0.fit(X_Train, Y_Train)
random_search0.best_estimator_
trainedforest = RandomForestClassifier(bootstrap=True, ccp_alpha=0.0, class_weight=None,
criterion='gini', max_depth=15, max_features=6,
max_leaf_nodes=None, max_samples=None,
min_impurity_decrease=0.0, min_impurity_split=None,
min_samples_leaf=1, min_samples_split=2,
min_weight_fraction_leaf=0.0, n_estimators=500,
n_jobs=None, oob_score=False, random_state=None,
verbose=0, warm_start=False).fit(X_Train,Y_Train)
predictionforest = trainedforest.predict(X_Test)
print(confusion_matrix(Y_Test,predictionforest))
print(classification_report(Y_Test,predictionforest))
from sklearn.ensemble import ExtraTreesClassifier
clf_extra_tree = ExtraTreesClassifier(n_estimators=1000,random_state=0,max_features=10,max_depth=10)
clf_extra_tree.fit(X_Train,Y_Train)
predictionforest_extra = trainedforest.predict(X_Test)
print(confusion_matrix(Y_Test,predictionforest_extra))
print(classification_report(Y_Test,predictionforest_extra))
from sklearn.ensemble import AdaBoostClassifier
clf_ada = AdaBoostClassifier(n_estimators=1000, random_state=0)
clf_ada.fit(X_Train,Y_Train)
prediction_ada = trainedforest.predict(X_Test)
print(confusion_matrix(Y_Test,prediction_ada))
print(classification_report(Y_Test,prediction_ada))
from sklearn.multiclass import OneVsRestClassifier
clf_ovr = OneVsRestClassifier(gb).fit(X_Train,Y_Train)
prediction_ovr = clf_ovr.predict(X_Test)
print(confusion_matrix(Y_Test,prediction_ovr))
print(classification_report(Y_Test,prediction_ovr))
#prediction_final=clf_ovr.predict(test_final)
#submission = pd.DataFrame({
# "Accident_ID": df_test["id"],
# "Severity": prediction_final
# })
#submission.to_csv('E:/crash_prediction/s7.csv', index=False)
from sklearn.multiclass import OneVsOneClassifier
clf_ovo = OneVsOneClassifier(gb).fit(X_Train,Y_Train)
prediction_ovo = clf_ovo.predict(X_Test)
print(confusion_matrix(Y_Test,prediction_ovo))
print(classification_report(Y_Test,prediction_ovo))
from sklearn.multiclass import OutputCodeClassifier
clf_occ = OutputCodeClassifier(gb).fit(X_Train,Y_Train)
prediction_occ = clf_occ.predict(X_Test)
print(confusion_matrix(Y_Test,prediction_occ))
print(classification_report(Y_Test,prediction_occ))
from sklearn.neural_network import MLPClassifier
clf_mlp = MLPClassifier(solver='sgd', alpha=1e-3,hidden_layer_sizes=(512,256,128,32,4), random_state=10)
clf_mlp.fit(X_Train,Y_Train)
prediction_mlp = clf_mlp.predict(X_Test)
print(confusion_matrix(Y_Test,prediction_mlp))
print(classification_report(Y_Test,prediction_mlp))
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
trainedlda = LinearDiscriminantAnalysis(solver='lsqr',shrinkage='auto').fit(X_Train, Y_Train)
predictionlda = trainedlda.predict(X_Test)
print(confusion_matrix(Y_Test,predictionlda))
print(classification_report(Y_Test,predictionlda))
from sklearn.model_selection import RandomizedSearchCV, GridSearchCV
import xgboost
classifier=xgboost.XGBClassifier()
classifier.fit(X_Train, Y_Train)
prediction_xg0 = classifier.predict(X_Test)
print(confusion_matrix(Y_Test,prediction_xg0))
print(classification_report(Y_Test,prediction_xg0))
# +
params={
"learning_rate" : [0.05, 0.10, 0.20, 0.25, 0.30,0.50] ,
"max_depth" : [ 3, 4, 5, 6, 8, 10, 12, 15],
"min_child_weight" : [ 1, 3, 5, 7 ],
"gamma" : [ 0.0, 0.1, 0.2 , 0.3, 0.4 ],
"colsample_bytree" : [ 0.3, 0.4, 0.5 , 0.7 ],
"n_estimators" : [100,300,500,700,800,1000]
}
# -
random_search=RandomizedSearchCV(classifier,param_distributions=params,n_iter=5,n_jobs=-1,cv=5,verbose=3)
random_search.fit(X_Train, Y_Train)
prediction_xg = random_search.predict(X_Test)
print(confusion_matrix(Y_Test,prediction_xg))
print(classification_report(Y_Test,prediction_xg))
random_search.best_estimator_
classifier=xgboost.XGBClassifier(base_score=0.5, booster='gbtree', colsample_bylevel=1,
colsample_bynode=1, colsample_bytree=0.7, gamma=0.2,
learning_rate=0.05, max_delta_step=0, max_depth=12,
min_child_weight=5, missing=None, n_estimators=800, n_jobs=1,
nthread=None, objective='multi:softprob', random_state=0,
reg_alpha=0, reg_lambda=1, scale_pos_weight=1, seed=None,
silent=None, subsample=1, verbosity=1)
classifier.fit(X_Train, Y_Train)
prediction_xg1 = classifier.predict(X_Test)
print(confusion_matrix(Y_Test,prediction_xg1))
print(classification_report(Y_Test,prediction_xg1))
classifier_rf=xgboost.XGBRFClassifier()
random_search1=RandomizedSearchCV(classifier_rf,param_distributions=params,n_iter=5,n_jobs=-1,cv=5,verbose=3)
random_search1.fit(X_Train, Y_Train)
random_search1.best_estimator_
classifier_rf=xgboost.XGBRFClassifier(base_score=0.5, colsample_bylevel=1, colsample_bynode=0.8,
colsample_bytree=0.7, gamma=0.4, learning_rate=0.25,
max_delta_step=0, max_depth=12,missing=None,
n_estimators=1000, n_jobs=-1, nthread=None,
objective='multi:softprob', random_state=0, reg_alpha=0,
reg_lambda=1, scale_pos_weight=1, seed=None, silent=None,
subsample=1, verbosity=1)
classifier_rf.fit(X_Train, Y_Train)
prediction_xg2 = classifier_rf.predict(X_Test)
print(confusion_matrix(Y_Test,prediction_xg2))
print(classification_report(Y_Test,prediction_xg2))
#prediction_final=classifier_rf.predict(test_final)
#submission = pd.DataFrame({
# "Accident_ID": df_test["id"],
# "Severity": prediction_final
# })
#submission.to_csv('E:/crash_prediction/s8.csv', index=False)
# ## Staking classifier
# [click](https://scikit-learn.org/stable/modules/generated/sklearn.ensemble.StackingClassifier.html)
from sklearn.ensemble import StackingClassifier
estimators = [('c1',svc_clf),('c2',trainedforest),('c3',gb),('c4',clf_occ),('c5',classifier),('c6',clf_extra_tree),('c7',clf_ovr)]
clf_stack = StackingClassifier(
estimators=estimators, final_estimator=LogisticRegression(),n_jobs=-1
)
clf_stack.fit(X_Train, Y_Train)
stack_pred= clf_stack.predict(X_Test)
print(confusion_matrix(Y_Test,stack_pred))
print(classification_report(Y_Test,stack_pred))
#prediction_final=clf_stack.predict(test_final)
#submission = pd.DataFrame({
# "Accident_ID": df_test["id"],
# "Severity": prediction_final
# })
#submission.to_csv('E:/crash_prediction/s10.csv', index=False)
from sklearn.ensemble import StackingClassifier
estimators = [('c2',trainedforest),('c3',gb),('c4',clf_occ),('c5',classifier),('c7',clf_ovr),('c8',bagging),('c10',clf_ada)]
clf_stack = StackingClassifier(
estimators=estimators, final_estimator=LogisticRegression(),n_jobs=-1
)
clf_stack.fit(X_Train, Y_Train)
stack_pred= clf_stack.predict(X_Test)
print(confusion_matrix(Y_Test,stack_pred))
print(classification_report(Y_Test,stack_pred))
| Notebooks(models)/Notebook1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import json
import gc
# +
X = pd.read_csv('../treated_data/df_merged.csv', index_col=0)
with open('../treated_data/type_of_feature2.json', 'r') as fp:
type_of_feature = json.load(fp)
# -
# # Static DF train
# Create static df
df_static = X[type_of_feature['static']]
df_static.dtypes.unique()
# # feature engineering
# Positive days means feature, makes no sense
#
df_static['DAYS_EMPLOYED'].replace(365243,np.nan, inplace=True)
df_static['CREDIT_INCOME_PERCENT'] = df_static['AMT_CREDIT'] / df_static['AMT_INCOME_TOTAL']
df_static['ANNUITY_INCOME_PERCENT'] = df_static['AMT_ANNUITY_x'] / df_static['AMT_INCOME_TOTAL']
df_static['CREDIT_TERM'] = df_static['AMT_ANNUITY_x'] / df_static['AMT_CREDIT']
df_static['DAYS_EMPLOYED_PERCENT'] = df_static['DAYS_EMPLOYED'] / df_static['DAYS_BIRTH']
# The definition that was given to static features is the same thought all the months. that's why drop_duplicates
#
# get_dummies droping first to minimize redundant info
#
# dropped columns with too much absent information
df_static.drop_duplicates('SK_ID_CURR',inplace=True)
df_static = pd.get_dummies(df_static, drop_first=True)
df_static = df_static[df_static.columns[df_static.isnull().mean() < 0.3]].set_index('SK_ID_CURR')
train_columns = df_static.columns
# # Saving static df
df_static.to_csv('../treated_data/df_static.csv')
del df_static
gc.collect()
# # TEST
X_test = pd.read_csv('../treated_data/df_merged_test.csv', index_col=0)
# Create static df
df_static_test = X_test[type_of_feature['static']]
df_static_test.dtypes.unique()
# Positive days means feature, makes no sense
df_static_test['DAYS_EMPLOYED'].replace(365243,np.nan, inplace=True)
df_static_test['CREDIT_INCOME_PERCENT'] = df_static_test['AMT_CREDIT'] / df_static_test['AMT_INCOME_TOTAL']
df_static_test['ANNUITY_INCOME_PERCENT'] = df_static_test['AMT_ANNUITY_x'] / df_static_test['AMT_INCOME_TOTAL']
df_static_test['CREDIT_TERM'] = df_static_test['AMT_ANNUITY_x'] / df_static_test['AMT_CREDIT']
df_static_test['DAYS_EMPLOYED_PERCENT'] = df_static_test['DAYS_EMPLOYED'] / df_static_test['DAYS_BIRTH']
df_static_test.drop_duplicates('SK_ID_CURR',inplace=True)
df_static_test = pd.get_dummies(df_static_test, drop_first=True)
df_static_test = df_static_test[train_columns].set_index('SK_ID_CURR')
df_static_test.to_csv('../treated_data/df_static_test.csv')
| time_series/.ipynb_checkpoints/03.5_create_and_analyze_static_df-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import sklearn
from bs4 import BeautifulSoup
import re
from nltk.corpus import stopwords
train = pd.read_csv("./data/labeledTrainData.tsv", delimiter="\t")
train.head()
train.shape
train["review"][0][:600]
# 定义与处理函数
def review_to_words(review, remove_stopwords=False):
# 去掉 html
review_text = BeautifulSoup(review, "html5lib").get_text()
# 去掉 none letter
letters_only = re.sub("[^a-zA-Z]", " ", review_text)
# 转换大小写并分割
words = letters_only.lower().split()
# stop_words
stops = set(stopwords.words("english"))
# 删除 stop_words
meaningful_words = [w for w in words if not w in stops]
return " ".join(meaningful_words)
# +
print("开始清洗并解析影评......")
num_reviews = train["review"].size
clean_train_reviews = []
for i in range(num_reviews):
if((i + 1) % 5000 == 0):
print("影评 {} of {}".format(i, num_reviews))
clean_train_reviews.append(review_to_words(train["review"][i]))
# -
clean_train_reviews[0]
# +
# 词袋
from sklearn.feature_extraction.text import CountVectorizer
vectorizer = CountVectorizer(
analyzer = "word",
tokenizer = None,
preprocessor = None,
stop_words = None,
max_features = 5000
)
train_data_features = vectorizer.fit_transform(clean_train_reviews)
train_data_features = train_data_features.toarray()
# -
train_data_features.shape
from sklearn.ensemble import RandomForestClassifier
forest = RandomForestClassifier(n_estimators = 100)
forest = forest.fit( train_data_features, train["sentiment"])
# +
# Read the test data
test = pd.read_csv("./data/testData.tsv", header=0, delimiter="\t", \
quoting=3 )
# Verify that there are 25,000 rows and 2 columns
print(test.shape)
# Create an empty list and append the clean reviews one by one
num_reviews = len(test["review"])
clean_test_reviews = []
print("Cleaning and parsing the test set movie reviews...\n")
for i in range(num_reviews):
if( (i+1) % 5000 == 0 ):
print("Review {} of {}\n".format(i+1, num_reviews))
clean_review = review_to_words( test["review"][i] )
clean_test_reviews.append( clean_review )
# Get a bag of words for the test set, and convert to a numpy array
test_data_features = vectorizer.transform(clean_test_reviews)
test_data_features = test_data_features.toarray()
# Use the random forest to make sentiment label predictions
result = forest.predict(test_data_features)
# Copy the results to a pandas dataframe with an "id" column and
# a "sentiment" column
output = pd.DataFrame( data={"id":test["id"], "sentiment":result} )
# Use pandas to write the comma-separated output file
output.to_csv( "Bag_of_Words_model.csv", index=False, quoting=3 )
# -
| rnn/word2vec/old_sentiment_imdb.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # <img style="float: left; padding-right: 10px; width: 45px" src="https://raw.githubusercontent.com/Harvard-IACS/2018-CS109A/master/content/styles/iacs.png"> CS109B Data Science 2: Advanced Topics in Data Science
#
# ## Lab 4 - Bayesian Analysis
#
# **Harvard University**<br>
# **Spring 2020**<br>
# **Instructors:** <NAME>, <NAME>, and <NAME><br>
# **Lab Instructors:** <NAME> and <NAME><br>
# **Content:** <NAME>
#
# ---
## RUN THIS CELL TO PROPERLY HIGHLIGHT THE EXERCISES
import requests
from IPython.core.display import HTML
styles = requests.get("https://raw.githubusercontent.com/Harvard-IACS/2019-CS109B/master/content/styles/cs109.css").text
HTML(styles)
import pymc3 as pm
from pymc3 import summary
# +
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats as stats
import pandas as pd
# %matplotlib inline
import warnings
warnings.filterwarnings('ignore')
# -
print('Running on PyMC3 v{}'.format(pm.__version__))
# + language="javascript"
# IPython.OutputArea.auto_scroll_threshold = 20000;
# -
# <a id=top></a>
#
# ## Learning Objectives
#
# By the end of this lab, you should be able to:
# * Understand how probability distributions work.
# * Apply Bayes Rule in calculating probabilities.
# * Understand how to apply Bayesian analysis using PyMC3
# * Avoid getting fired when talking to your Bayesian employer.
#
# **This lab corresponds to Lectures 6, 7, and 8, and maps to Homework 3.**
# ## Table of Contents
#
# 1. The Bayesian Way of Thinking or Is this a Fair Coin?
# 2. [Intro to `pyMC3`](#pymc3).
# 3. [Bayesian Linear Regression](#blr).
# 4. [Try this at Home: Example on Mining Disasters](#no4).
# ## 1. The Bayesian way of Thinking
#
# ```
# Here is my state of knowledge about the situation. Here is some data, I am now going to revise my state of knowledge.
# ```
# <div class="exercise" style="background-color:#b3e6ff"><b>Table Exercise</b>: Discuss the statement above with your table mates and make sure everyone understands what it means and what constitutes Bayesian way of thinking. Finally, count the Bayesians among you. </div>
# ### A. Bayes Rule
#
# \begin{equation}
# \label{eq:bayes}
# P(A|\textbf{B}) = \frac{P(\textbf{B} |A) P(A) }{P(\textbf{B})}
# \end{equation}
#
# $P(A|\textbf{B})$ is the **posterior** distribution, prob(hypothesis | data)
#
# $P(\textbf{B} |A)$ is the **likelihood** function, how probable is my data **B** for different values of the parameters
#
# $P(A)$ is the marginal probability to observe the data, called the **prior**, this captures our belief about the data before observing it.
#
# $P(\textbf{B})$ is the marginal distribution (sometimes called marginal likelihood)
# <BR>
# <div class="exercise" style="background-color:#b3e6ff"><b>Table Exercise</b>: Solve the Monty Hall Paradox using Bayes Rule.</div>
#
# 
#
# You are invited to play a game. There are 3 doors behind **one** of which are the keys to a brand new red Tesla. There is a goat behind each of the other two.
#
# You are asked to pick one door, and let's say you pick **Door1**. The host knows where the keys are. Of the two remaining closed doors, he will always open the door that has a goat behind it. He'll say "I will do you a favor and open **Door2**". So he opens Door2 inside which there is, of course, a goat. He now asks you, do you want to open the initial Door you chose or change to **Door3**? Generally, in this game, when you are presented with this choice should you swap the doors?
#
# **Initial Steps:**
# - Start by defining the `events` of this probabilities game. One definition is:
#
# - $A_i$: car is behind door $i$
#
# - $B_i$ host opens door $i$
#
# $i\in[1,2,3]$
#
# - In more math terms, the question is: is the probability that the price is behind **Door 1** higher than the probability that the price is behind **Door2**, given that an event **has occured**?
# ### B. Bayes Rule written with Probability Distributions
#
# We have data that we believe come from an underlying distribution of unknown parameters. If we find those parameters, we know everything about the process that generated this data and we can make inferences (create new data).
#
# \begin{equation}
# \label{eq:bayes}
# P(\theta|\textbf{D}) = \frac{P(\textbf{D} |\theta) P(\theta) }{P(\textbf{D})}
# \end{equation}
# #### But what is $\theta \;$?
#
# $\theta$ is an unknown yet fixed set of parameters. In Bayesian inference we express our belief about what $\theta$ might be and instead of trying to guess $\theta$ exactly, we look for its **probability distribution**. What that means is that we are looking for the **parameters** of that distribution. For example, for a Poisson distribution our $\theta$ is only $\lambda$. In a normal distribution, our $\theta$ is often just $\mu$ and $\sigma$.
# ### C. A review of Common Probability Distributions
#
# #### Discrete Distributions
#
# The random variable has a **probability mass function (pmf)** which measures the probability that our random variable will take a specific value $y$, denoted $P(Y=y)$.
#
# - **Bernoulli** (binary outcome, success has probability $\theta$, $one$ trial):
# $
# P(Y=k) = \theta^k(1-\theta)^{1-k}
# $
# <HR>
# - **Binomial** (binary outcome, success has probability $\theta$, $n$ trials):
# \begin{equation}
# P(Y=k) = {{n}\choose{k}} \cdot \theta^k(1-\theta)^{n-k}
# \end{equation}
#
# *Note*: Binomial(1,$p$) = Bernouli($p$)
# <HR>
# - **Negative Binomial**
# <HR>
# - **Poisson** (counts independent events occurring at a rate)
# \begin{equation}
# P\left( Y=y|\lambda \right) = \frac{{e^{ - \lambda } \lambda ^y }}{{y!}}
# \end{equation}
# y = 0,1,2,...
# <HR>
# - **Discrete Uniform**
# <HR>
# - **Categorical, or Multinulli** (random variables can take any of K possible categories, each having its own probability; this is a generalization of the Bernoulli distribution for a discrete variable with more than two possible outcomes, such as the roll of a die)
# <HR>
# - **Dirichlet-multinomial** (a generalization of the beta distribution for many variables)
# #### Continuous Distributions
#
# The random variable has a **probability density function (pdf)**.
# - **Uniform** (variable equally likely to be near each value in interval $(a,b)$)
# \begin{equation}
# P(X = x) = \frac{1}{b - a}
# \end{equation}
# anywhere within the interval $(a, b)$, and zero elsewhere.
# <HR>
# - **Normal** (a.k.a. Gaussian)
# \begin{equation}
# X \sim \mathcal{N}(\mu,\,\sigma^{2})
# \end{equation}
#
# A Normal distribution can be parameterized either in terms of precision $\tau$ or standard deviation ($\sigma^{2}$. The link between the two is given by
# \begin{equation}
# \tau = \frac{1}{\sigma^{2}}
# \end{equation}
# - Mean $\mu$
# - Variance $\frac{1}{\tau}$ or $\sigma^{2}$
# - Parameters: `mu: float`, `sigma: float` or `tau: float`
# <HR>
# - **Beta** (variable ($\theta$) taking on values in the interval $[0,1]$, and parametrized by two positive parameters, $\alpha$ and $\beta$ that control the shape of the distribution.
#
# *Note:*Beta is a good distribution to use for priors (beliefs) because its range is $[0,1]$ which is the natural range for a probability and because we can model a wide range of functions by changing the $\alpha$ and $\beta$ parameters.
#
# \begin{equation}
# \label{eq:beta}
# P(\theta) = \frac{1}{B(\alpha, \beta)} {\theta}^{\alpha - 1} (1 - \theta)^{\beta - 1} \propto {\theta}^{\alpha - 1} (1 - \theta)^{\beta - 1}
# \end{equation}
#
#
# where the normalisation constant, $B$, is a beta function of $\alpha$ and $\beta$,
#
#
# \begin{equation}
# B(\alpha, \beta) = \int_{t=0}^1 t^{\alpha - 1} (1 - t)^{\beta - 1} dt.
# \end{equation}
# <HR>
# - **Exponential**
# <HR>
# - **Gamma**
#
#
# #### Code Resources:
# - Statistical Distributions in numpy/scipy: [scipy.stats](https://docs.scipy.org/doc/scipy/reference/stats.html)
# - Statistical Distributions in pyMC3: [distributions in PyMC3](https://docs.pymc.io/api/distributions.html) (we will see those below).
# <div class="discussion"><b>Exercise: Plot a Discrete variable</b></div>
#
# Change the value of $\mu$ in the Poisson PMF and see how the plot changes. Remember that the y-axis in a discrete probability distribution shows the probability of the random variable having a specific value in the x-axis.
#
# \begin{equation}
# P\left( X=k \right) = \frac{{e^{ - \mu } \mu ^k }}{{k!}}
# \end{equation}
#
# **stats.poisson.pmf(x, mu)** $\mu$(mu) is our $\theta$ in this case.
plt.style.use('seaborn-darkgrid')
x = np.arange(0, 30)
for m in [0.5, 3, 8]:
pmf = stats.poisson.pmf(x, m)
plt.plot(x, pmf, 'o', alpha=0.5, label='$\mu$ = {}'.format(m))
plt.xlabel('random variable', fontsize=12)
plt.ylabel('probability', fontsize=12)
plt.legend(loc=1)
plt.ylim=(-0.1)
plt.show()
# same for binomial
plt.style.use('seaborn-darkgrid')
x = np.arange(0, 22)
ns = [10, 17]
ps = [0.5, 0.7]
for n, p in zip(ns, ps):
pmf = stats.binom.pmf(x, n, p)
plt.plot(x, pmf, 'o', alpha=0.5, label='n = {}, p = {}'.format(n, p))
plt.xlabel('x', fontsize=14)
plt.ylabel('f(x)', fontsize=14)
plt.legend(loc=1)
plt.show()
# discrete uniform
plt.style.use('seaborn-darkgrid')
ls = [0]
us = [3] # watch out, this number can only be integer!
for l, u in zip(ls, us):
x = np.arange(l, u+1)
pmf = [1.0 / (u - l + 1)] * len(x)
plt.plot(x, pmf, '-o', label='lower = {}, upper = {}'.format(l, u))
plt.xlabel('x', fontsize=12)
plt.ylabel('probability P(x)', fontsize=12)
plt.legend(loc=1)
plt.show()
# <div class="discussion"><b>Exercise: Plot a continuous variable<br></div>
#
# Change the value of $\mu$ in the Uniform PDF and see how the plot changes.
#
# Remember that the y-axis in a continuous probability distribution does not shows the actual probability of the random variable having a specific value in the x-axis because that probability is zero!. Instead, to see the probability that the variable is within a small margin we look at the integral below the curve of the PDF.
#
# The uniform is often used as a noninformative prior.
# ```
# Uniform - numpy.random.uniform(a=0.0, b=1.0, size)
# ```
#
# $\alpha$ and $\beta$ are our parameters. `size` is how many tries to perform.
# Our $\theta$ is basically the combination of the parameters a,b. We can also call it
# \begin{equation}
# \mu = (a+b)/2
# \end{equation}
# +
from scipy.stats import uniform
r = uniform.rvs(size=1000)
plt.plot(r, uniform.pdf(r),'r-', lw=5, alpha=0.6, label='uniform pdf')
plt.hist(r, density=True, histtype='stepfilled', alpha=0.2)
plt.ylabel(r'probability density')
plt.xlabel(f'random variable')
plt.legend(loc='best', frameon=False)
plt.show()
# +
from scipy.stats import beta
alphas = [0.5, 1.5, 3.0]
betas = [0.5, 1.5, 3.0]
x = np.linspace(0, 1, 1000)
colors = ['red', 'green', 'blue']
fig, ax = plt.subplots(figsize=(8, 5))
for a, b, colors in zip(alphas, betas, colors):
dist = beta(a, b)
plt.plot(x, dist.pdf(x), c=colors,
label=f'a={a}, b={b}')
ax.set_ylim(0, 3)
ax.set_xlabel(r'$\theta$')
ax.set_ylabel(r'$p(\theta|\alpha,\beta)$')
ax.set_title('Beta Distribution')
ax.legend(loc='best')
fig.show();
# -
plt.style.use('seaborn-darkgrid')
x = np.linspace(-5, 5, 1000)
mus = [0., 0., 0., -2.]
sigmas = [0.4, 1., 2., 0.4]
for mu, sigma in zip(mus, sigmas):
pdf = stats.norm.pdf(x, mu, sigma)
plt.plot(x, pdf, label=r'$\mu$ = '+ f'{mu},' + r'$\sigma$ = ' + f'{sigma}')
plt.xlabel('random variable', fontsize=12)
plt.ylabel('probability density', fontsize=12)
plt.legend(loc=1)
plt.show()
plt.style.use('seaborn-darkgrid')
x = np.linspace(-5, 5, 1000)
mus = [0., 0., 0., -2.] # mean
sigmas = [0.4, 1., 2., 0.4] # std
for mu, sigma in zip(mus, sigmas):
plt.plot(x, uniform.pdf(x, mu, sigma), lw=5, alpha=0.4, \
label=r'$\mu$ = '+ f'{mu},' + r'$\sigma$ = ' + f'{sigma}')
plt.xlabel('random variable', fontsize=12)
plt.ylabel('probability density', fontsize=12)
plt.legend(loc=1)
plt.show()
# ### D. Is this a Fair Coin?
#
# We do not want to promote gambling but let's say you visit the casino in **Monte Carlo**. You want to test your theory that casinos are dubious places where coins have been manipulated to have a larger probability for tails. So you will try to estimate how fair a coin is based on 100 flips. <BR>
# You begin by flipping the coin. You get either Heads ($H$) or Tails ($T$) as our observed data and want to see if your posterior probabilities change as you obtain more data, that is, more coin flips. A nice way to visualize this is to plot the posterior probabilities as we observe more flips (data).
# We will be using Bayes rule. $\textbf{D}$ is our data.
#
# \begin{equation}
# \label{eq:bayes}
# P(\theta|\textbf{D}) = \frac{P(\textbf{D} |\theta) P(\theta) }{P(\textbf{D})}
# \end{equation}
# In the case of a coin toss when we observe $k$ heads in $n$ tosses:
# \begin{equation}
# \label{eq:bayes}
# P(\theta|\textbf{k}) = Beta(\alpha + \textbf{k}, \beta + n - \textbf{k})
# \end{equation}
#
# we can say that $\alpha$ and $\beta$ play the roles of a "prior number of heads" and "prior number of tails".
# +
# play with the priors - here we manually set them but we could be sampling from a separate Beta
trials = np.array([0, 1, 3, 5, 10, 15, 20, 100, 200, 300])
heads = np.array([0, 1, 2, 4, 8, 10, 10, 50, 180, 150])
x = np.linspace(0, 1, 100)
# for simplicity we set a,b=1
plt.figure(figsize=(10,8))
for k, N in enumerate(trials):
sx = plt.subplot(len(trials)/2, 2, k+1)
posterior = stats.beta.pdf(x, 1 + heads[k], 1 + trials[k] - heads[k])
plt.plot(x, posterior, alpha = 0.5, label=f'{trials[k]} tosses\n {heads[k]} heads');
plt.fill_between(x, 0, posterior, color="#348ABD", alpha=0.4)
plt.legend(loc='upper left', fontsize=10)
plt.legend()
plt.autoscale(tight=True)
plt.suptitle("Posterior probabilities for coin flips", fontsize=15);
plt.tight_layout()
plt.subplots_adjust(top=0.88)
# -
# <a id=pymc3></a> [Top](#top)
#
# ## 2. Introduction to `pyMC3`
#
# PyMC3 is a Python library for programming Bayesian analysis, and more specifically, data creation, model definition, model fitting, and posterior analysis. It uses the concept of a `model` which contains assigned parametric statistical distributions to unknown quantities in the model. Within models we define random variables and their distributions. A distribution requires at least a `name` argument, and other `parameters` that define it. You may also use the `logp()` method in the model to build the model log-likelihood function. We define and fit the model.
#
# PyMC3 includes a comprehensive set of pre-defined statistical distributions that can be used as model building blocks. Although they are not meant to be used outside of a `model`, you can invoke them by using the prefix `pm`, as in `pm.Normal`.
#
# #### Markov Chain Monte Carlo (MCMC) Simulations
#
# PyMC3 uses the **No-U-Turn Sampler (NUTS)** and the **Random Walk Metropolis**, two Markov chain Monte Carlo (MCMC) algorithms for sampling in posterior space. Monte Carlo gets into the name because when we sample in posterior space, we choose our next move via a pseudo-random process. NUTS is a sophisticated algorithm that can handle a large number of unknown (albeit continuous) variables.
with pm.Model() as model:
z = pm.Normal('z', mu=0., sigma=5.)
x = pm.Normal('x', mu=z, sigma=1., observed=5.)
print(x.logp({'z': 2.5}))
print(z.random(10, 100)[:10])
# **References**:
#
# - *<NAME>, <NAME>, <NAME>. 2016. Probabilistic programming in Python using PyMC3. PeerJ Computer Science 2:e55* [(https://doi.org/10.7717/peerj-cs.55)](https://doi.org/10.7717/peerj-cs.55)
# - [Distributions in PyMC3](https://docs.pymc.io/api/distributions.html)
# - [More Details on Distributions](https://docs.pymc.io/developer_guide.html)
#
# Information about PyMC3 functions including descriptions of distributions, sampling methods, and other functions, is available via the `help` command.
# +
#help(pm.Poisson)
# -
# <a id=blr></a> [Top](#top)
#
# ## 3. Bayesian Linear Regression
# Let's say we want to predict outcomes Y as normally distributed observations with an expected value $mu$ that is a linear function of two predictor variables, $\bf{x}_1$ and $\bf{x}_2$.
#
# \begin{equation}
# \mu = \alpha + \beta_1 \bf{x}_1 + \beta_2 x_2
# \end{equation}
#
# \begin{equation}
# Y \sim \mathcal{N}(\mu,\,\sigma^{2})
# \end{equation}
#
# where $\sigma^2$ represents the measurement error.
#
# In this example, we will use $\sigma^2 = 10$
#
# We also choose the parameters as normal distributions:
#
# \begin{eqnarray}
# \alpha \sim \mathcal{N}(0,\,10) \\
# \beta_i \sim \mathcal{N}(0,\,10) \\
# \sigma^2 \sim |\mathcal{N}(0,\,10)|
# \end{eqnarray}
#
# We will artificially create the data to predict on. We will then see if our model predicts them correctly.
# +
# Initialize random number generator
np.random.seed(123)
# True parameter values
alpha, sigma = 1, 1
beta = [1, 2.5]
# Size of dataset
size = 100
# Predictor variable
X1 = np.linspace(0, 1, size)
X2 = np.linspace(0,.2, size)
# Simulate outcome variable
Y = alpha + beta[0]*X1 + beta[1]*X2 + np.random.randn(size)*sigma
fig, ax = plt.subplots(1,2, figsize=(10,6), sharex=True)
ax[0].scatter(X1,Y)
ax[1].scatter(X2,Y)
ax[0].set_xlabel(r'$x_1$', fontsize=14)
ax[0].set_ylabel(r'$Y$', fontsize=14)
ax[1].set_xlabel(r'$x_2$', fontsize=14)
ax[1].set_ylabel(r'$Y$', fontsize=14)
# -
X1
# +
from pymc3 import Model, Normal, HalfNormal
basic_model = Model()
with basic_model:
# Priors for unknown model parameters, specifically create stochastic random variables
# with Normal prior distributions for the regression coefficients,
# and a half-normal distribution for the standard deviation of the observations, σ.
alpha = Normal('alpha', mu=0, sd=10)
beta = Normal('beta', mu=0, sd=10, shape=2)
sigma = HalfNormal('sigma', sd=1)
# Expected value of outcome - posterior
mu = alpha + beta[0]*X1 + beta[1]*X2
# Likelihood (sampling distribution) of observations
Y_obs = Normal('Y_obs', mu=mu, sd=sigma, observed=Y)
# +
# model fitting with sampling
from pymc3 import NUTS, sample, find_MAP
from scipy import optimize
with basic_model:
# obtain starting values via MAP
start = find_MAP(fmin=optimize.fmin_powell)
# instantiate sampler
step = NUTS(scaling=start)
# draw 2000 posterior samples
trace = sample(2000, step, start=start)
# +
from pymc3 import traceplot
traceplot(trace);
# -
results = pm.summary(trace,
var_names=['alpha', 'beta', 'sigma'])
results
# This linear regression example is from the original paper on PyMC3: *<NAME>, <NAME>, <NAME>. 2016. Probabilistic programming in Python using PyMC3. PeerJ Computer Science 2:e55 https://doi.org/10.7717/peerj-cs.55*
# <a id=no4></a> [Top](#top)
#
# ## 4. Try this at Home: Example on Mining Disasters
# We will go over the classical `mining disasters from 1851 to 1962` dataset.
#
# This example is from the [pyMC3 Docs](https://docs.pymc.io/notebooks/getting_started.html).
import pandas as pd
disaster_data = pd.Series([4, 5, 4, 0, 1, 4, 3, 4, 0, 6, 3, 3, 4, 0, 2, 6,
3, 3, 5, 4, 5, 3, 1, 4, 4, 1, 5, 5, 3, 4, 2, 5,
2, 2, 3, 4, 2, 1, 3, np.nan, 2, 1, 1, 1, 1, 3, 0, 0,
1, 0, 1, 1, 0, 0, 3, 1, 0, 3, 2, 2, 0, 1, 1, 1,
0, 1, 0, 1, 0, 0, 0, 2, 1, 0, 0, 0, 1, 1, 0, 2,
3, 3, 1, np.nan, 2, 1, 1, 1, 1, 2, 4, 2, 0, 0, 1, 4,
0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1])
fontsize = 12
years = np.arange(1851, 1962)
plt.figure(figsize=(10,5))
#plt.scatter(years, disaster_data);
plt.bar(years, disaster_data)
plt.ylabel('Disaster count', size=fontsize)
plt.xlabel('Year', size=fontsize);
plt.title('Was there a Turning Point in Mining disasters from 1851 to 1962?', size=15);
# #### Building the model
#
# **Step1:** We choose the probability model for our experiment. Occurrences of disasters in the time series is thought to follow a **Poisson** process with a large **rate** parameter in the early part of the time series, and from one with a smaller **rate** in the later part. We are interested in locating the change point in the series, which perhaps is related to changes in mining safety regulations.
#
# ```
# disasters = pm.Poisson('disasters', rate, observed=disaster_data)
# ```
#
# We have two rates, `early_rate` if $t<=s$, and `late_rate` if $t>s$, where $s$ is the year the switch was made (a.k.a. the `switchpoint`).
#
# **Step2:** Choose a prior distributions of the two rates, what we believe the rates were before we observed the data, and the switchpoint. We choose Exponential.
# ```
# early_rate = pm.Exponential('early_rate', 1)
# ```
#
# The parameters of this model are:
#
#
# **Note:** Watch for missing values. Missing values are handled transparently by passing a MaskedArray or a pandas.DataFrame. Behind the scenes, another random variable, disasters.missing_values is created to model the missing values. If you pass a np.array with missing values you will get an error.
with pm.Model() as disaster_model:
# discrete
switchpoint = pm.DiscreteUniform('switchpoint', lower=years.min(), upper=years.max(), testval=1900)
# Priors for pre- and post-switch rates number of disasters
early_rate = pm.Exponential('early_rate', 1)
late_rate = pm.Exponential('late_rate', 1)
# our theta - allocate appropriate Poisson rates to years before and after current
# switch is an `if` statement in puMC3
rate = pm.math.switch(switchpoint >= years, early_rate, late_rate)
# our observed data as a likelihood function of the `rate` parameters
# shows how we think our data is distributed
disasters = pm.Poisson('disasters', rate, observed=disaster_data)
# #### Model Fitting
# there are defaults but we can also more explicitly set the sampling algorithms
with disaster_model:
# for continuous variables
step1 = pm.NUTS([early_rate, late_rate])
# for discrete variables
step2 = pm.Metropolis([switchpoint, disasters.missing_values[0]] )
trace = pm.sample(10000, step=[step1, step2])
# try different number of samples
#trace = pm.sample(5000, step=[step1, step2])
# #### Posterior Analysis
# On the left side plots we notice that our early rate is between 2.5 and 3.5 disasters a year. In the late period it seems to be between 0.6 and 1.2 so definitely lower.
#
# The right side plots show the samples we drew to come to our conclusion.
pm.traceplot(trace, ['early_rate', 'late_rate', 'switchpoint'], figsize=(20,10));
results = pm.summary(trace,
var_names=['early_rate', 'late_rate', 'switchpoint'])
results
| content/labs/lab04/notebook/cs109b_lab04_bayes.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Introduction to products and measurements <img width="200" align="right" src="../images/easi_logo_full_text_wide.png">
#
# * **Products used:**
# [usgs_espa_ls8c1_sr](https://explorer.stage.earth.dataobservatory.net/usgs_espa_ls8c1_sr)
#
# * **Prerequisites:** Users of this notebook should have a basic understanding of:
# * How to run a [Jupyter notebook](01_Jupyter_notebooks.ipynb)
#
# ## Background
# A "datacube" is a digital information architecture that specialises in hosting and cataloguing spatial information.
# [Digital Earth Africa (DE Africa)](https://www.digitalearthafrica.org/) is based on the [Open Data Cube](https://www.opendatacube.org/) infrastructure, and specialises in storing remotely sensed data, particularly from Earth Observation satellites such as [Landsat](https://landsat.gsfc.nasa.gov/) and [Sentinel](http://www.esa.int/Applications/Observing_the_Earth/Copernicus/Overview4).
#
# The Digital Earth Africa datacube contains both raw satellite data and derivative data "products".
# These data products are often composed of a range of "measurements" such as the suite of remote sensing band values or statistical product summaries. Before running a query to load data from the datacube, it is useful to know what it contains.
# This notebook demonstrates several straightforward ways to inspect the product and measurement contents of a datacube.
# ## Description
# This notebook demonstrates how to connect to the Digital Earth Africa datacube and interrogate the available products and measurements stored within.
# Topics covered include:
#
# * How to connect to a datacube
# * How to list all the products
# * How to list a selected product's measurements
# * How to interactively visualise data in the datacube
#
# ***
# ## Getting started
# To run this introduction to products and measurements, run all the cells in the notebook starting with the "Load packages" cell. For help with running notebook cells, refer back to the [Jupyter Notebooks notebook](01_Jupyter_notebooks.ipynb).
# ### Load packages
# The `datacube` package is required to access and work with available data.
# The `pandas` package is required to format tables.
# The `DcViewer` utility will allow us to interactively explore the products available in the datacube.
# +
import datacube
import pandas as pd
from odc.ui import DcViewer
# Set some configurations for displaying tables nicely
pd.set_option('display.max_colwidth', 200)
pd.set_option('display.max_rows', None)
# -
# ### Connect to the datacube
#
# After importing the `datacube` package, users need to specify a name for their session, known as the app name.
#
# This name is generated by the user and is used to track down issues with database queries.
# It does not have any effect on the analysis.
# Use a short name that is consistent with the purpose of your notebook such as the way `02_Products_and_measurements` has been used as the app name in this notebook.
#
# The resulting `dc` object is what we use to access all the data contained within the Digital Earth Africa datacube.
dc = datacube.Datacube(app="02_Products_and_measurements")
# ## List products
# Once a datacube instance has been created, users can explore the products and measurements stored within.
#
# The following cell lists all product attributes currently available in the Digital Earth Africa datacube by using the `dc.list_products().columns` function.
dc.list_products().columns
# Any of these can be used to customise the product information returned by the `dc.list_products()` function, as shown in the next cell.
#
# Additionally, the next cell lists all products that are currently available in the Digital Earth Africa datacube by using the `dc.list_products()` function.
#
# Products listed under **name** in the following table represent the product options available when querying the datacube.
# The table below provides some useful information about each product, including a brief product **description**, the **instrument** and **platform** the data originated from (e.g. Landsat 8 OLI), and the product's default **crs** (coordinate reference system) and **resolution** if applicable.
# +
products = dc.list_products()
display_columns = ["name",
"description",
"platform",
"instrument",
"crs",
"resolution"]
products[display_columns].sort_index()
# -
# ## List measurements
#
# Most products are associated with a range of available measurements.
# These can be individual satellite bands (e.g. Landsat's near-infrared band) or statistical product summaries.
#
# Using the **name** column of products listed above, let's interrogate the measurements associated with the `ls8_usgs_sr_scene` product using the `dc.list_measurements()` function.
# This product name refers to the US Geological Survey's Landsat 8 Analysis-ready data product.
#
# The table below includes a range of technical information about each band in the dataset, including any **aliases** which can be used to load the data, the data type or **dtype**, any **flags_definition** that are associated with the measurement (this information is used for tasks like cloud masking), and the measurement's **nodata** value.
#
# Change the `product` name below and re-run the following cell to explore available measurements associated with other products.
# +
product = "usgs_espa_ls8c1_sr"
measurements = dc.list_measurements()
measurements.loc[product]
# -
# ## Visualising available data
# For a more visual way of exploring the data that is available within the Digital Earth Africa datacube, we can use the interactive `DcViewer` utility or the online [DE Africa Explorer](https://explorer.digitalearth.africa/ls8_usgs_sr_scene) website.
# We will use the `DcViewer` utility in this exiercise.
# Select a product from the drop-down menu on the top-right of the map to show the areas data is available for in blue.
# You can also use the back and forward buttons above the map to toggle through time.
#
#
#
# The utility is only able to visualise a limited number of datasets at one time.
# If the available data footprints do not appear, either press the "show" button on the top right, or zoom further in on the map.
DcViewer(dc=dc,
time='2013',
center=(-33.45, -70.66),
zoom=6)
# ## Recommended next steps
#
# For more advanced information about working with Jupyter Notebooks or JupyterLab, you can explore [JupyterLab documentation page](https://jupyterlab.readthedocs.io/en/stable/user/notebook.html).
#
# To continue working through the notebooks in this beginner's guide, the following notebooks are designed to be worked through in the following order:
#
# 1. [Jupyter Notebooks](01_Jupyter_notebooks.ipynb)
# 2. **Products and measurements (this notebook)**
# 3. [Loading data](03_Loading_data.ipynb)
# 4. [Plotting](04_Plotting.ipynb)
# 5. [Performing a basic analysis](05_Basic_analysis.ipynb)
# 6. [Introduction to numpy](06_Intro_to_numpy.ipynb)
# 7. [Introduction to xarray](07_Intro_to_xarray.ipynb)
# 8. [Parallel processing with Dask](08_Parallel_processing_with_dask.ipynb)
#
# Once you have you have completed the above six tutorials, join advanced users in exploring:
#
# * The "Datasets" directory in the repository, where you can explore DE Africa products in depth.
# * The "Frequently used code" directory, which contains a recipe book of common techniques and methods for analysing DE Africa data.
# * The "Real-world examples" directory, which provides more complex workflows and analysis case studies.
# ***
# ## Additional information
#
# **License:** The code in this notebook is licensed under the [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0).
#
# **Contact:** If you need assistance, please post a question on the [Open Data Cube Slack channel](http://slack.opendatacube.org/) or on the [GIS Stack Exchange](https://gis.stackexchange.com/questions/ask?tags=open-data-cube) using the `open-data-cube` tag (you can view previously asked questions [here](https://gis.stackexchange.com/questions/tagged/open-data-cube)).
# If you would like to report an issue with this notebook, you can file one on [Github](https://github.com/GeoscienceAustralia/dea-notebooks).
#
# **Last modified:** May 2020
# ## Tags
# Browse all available tags on the DE Africa User Guide's [Tags Index](https://) (placeholder as this does not exist yet)
# + raw_mimetype="text/restructuredtext" active=""
# **Tags**: :index:`dc.list_products`, :index:`dc.list_measurements`, :index:`products`, :index:`measurements`, :index:`landsat 5`, :index:`datacube explorer`, :index:`DcViewer`
| Beginners_guide/02_Products_and_measurements.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Question2
def isPrime(n):
if (n<2):
return False
for i in range(2,n//2+1):
if(n%i==0):
return False
return True
a = int(input("Enter lower range :"))
b = int(input("Enter higher range :"))
for x in range(a,b):
if isPrime(x):
print(x, end = ' ')
# -
lst = list(range(2500))
print(lst)
lst_Prime = []
for item in lst:
if isPrime(item):
lst_Prime.append(item)
print(lst_Prime)
lst_Prime_one = filter(isPrime,lst)
list(list(lst_Prime_one))
# # Question 3
# +
# Make a Lambda function for capitalizing the whole sentence passed using arguments.
# and map all the sentences in the List, with the lambda functions
# -
strings =["hey this is sai", "i am in mumbai"]
cap = map (lambda x: str.capitalize(x),strings)
list(cap)
# # Question 1
# +
# Write a program to identify sub list [1,1,5] is there in the given listin the same order
# -
listy = [1,5,6,4,1,2,3,5]
sublist = listy[0:4:6]
print(sublist)
| Assignment Day 5.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: visualization-curriculum-gF8wUgMm
# language: python
# name: visualization-curriculum-gf8wugmm
# ---
# + [markdown] papermill={"duration": 0.016382, "end_time": "2020-03-26T01:14:44.815997", "exception": false, "start_time": "2020-03-26T01:14:44.799615", "status": "completed"} tags=[]
# # Estimating The Infected Population From Deaths
# > Estimating the number of infected people by country based on the number of deaths and case fatality rate.
#
# - comments: true
# - author: <NAME>
# - categories: [growth, compare, interactive, estimation]
# - hide: false
# - image: images/covid-estimate-infections.png
# - permalink: /covid-infected/
# - toc: true
# + papermill={"duration": 0.708509, "end_time": "2020-03-26T01:14:45.537370", "exception": false, "start_time": "2020-03-26T01:14:44.828861", "status": "completed"} tags=[]
#hide
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import altair as alt
from datetime import timedelta, datetime, date
# %config InlineBackend.figure_format = 'retina'
chart_width = 550
chart_height= 400
# + papermill={"duration": 0.042724, "end_time": "2020-03-26T01:14:45.591803", "exception": false, "start_time": "2020-03-26T01:14:45.549079", "status": "completed"} tags=[]
#hide
def plot(data, type1, levels):
data_countries_pc2 = data.copy()
for i in range(0,len(countries)):
data_countries_pc2[i] = data_countries_pc2[i].reset_index()
data_countries_pc2[i]['n_days'] = data_countries_pc2[i].index
if type1 == "scatter":
data_countries_pc2[i]['cases'] = data_countries_pc2[i]["total_cases"]
data_countries_pc2[i]['infected'] = data_countries_pc2[i]["total_infected"]
data_plot = data_countries_pc2[0]
for i in range(1, len(countries)):
data_plot = pd.concat([data_plot, data_countries_pc2[i]], axis=0)
if type1 == "scatter":
data_plot["45_line"] = data_plot["cases"]
# Plot it using Altair
source = data_plot
if levels == True:
ylabel = "Total"
else :
ylabel = "Per Million"
scales = alt.selection_interval(bind='scales')
selection = alt.selection_multi(fields=['location'], bind='legend')
if type1 == "line":
base = alt.Chart(source, title = "Estimated Infected Population By Country").encode(
x = alt.X('n_days:Q', title = "Days since outbreak"),
y = alt.Y("infected:Q",title = ylabel),
color = alt.Color('location:N', legend=alt.Legend(title="Country", labelFontSize=15, titleFontSize=17),
scale=alt.Scale(scheme='tableau20')),
opacity = alt.condition(selection, alt.value(1), alt.value(0.1))
)
lines = base.mark_line().add_selection(
scales
).add_selection(
selection
).properties(
width=chart_width,
height=chart_height
)
return(
( lines)
.configure_title(fontSize=20)
.configure_axis(labelFontSize=15,titleFontSize=18)
)
if levels == True:
ylabel = "Infected"
xlabel = "Cases"
else :
ylabel = "Per Million Infected"
xlabel = "Per Million Cases"
if type1 == "scatter":
base = alt.Chart(source, title = "COVID-19 Cases VS Infected").encode(
x = alt.X('cases:Q', title = xlabel),
y = alt.Y("infected:Q",title = ylabel),
color = alt.Color('location:N', legend=alt.Legend(title="Country", labelFontSize=15, titleFontSize=17),
scale=alt.Scale(scheme='tableau20')),
opacity = alt.condition(selection, alt.value(1), alt.value(0.1))
)
scatter = base.mark_point().add_selection(
scales
).add_selection(
selection
).properties(
width=chart_width,
height=chart_height
)
line_45 = alt.Chart(source).encode(
x = "cases:Q",
y = alt.Y("45_line:Q", scale=alt.Scale(domain=(0, max(data_plot["infected"])))),
).mark_line(color="grey", strokeDash=[3,3])
return(
(scatter + line_45)
.configure_title(fontSize=20)
.configure_axis(labelFontSize=15,titleFontSize=18)
)
# + papermill={"duration": 6.783474, "end_time": "2020-03-26T01:14:52.387134", "exception": false, "start_time": "2020-03-26T01:14:45.603660", "status": "completed"} tags=[]
#hide
# Get data on deaths D_t
data = pd.read_csv("https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv",
error_bad_lines=False)
data = data.drop(columns=["Lat", "Long"])
data = data.melt(id_vars= ["Province/State", "Country/Region"])
data = pd.DataFrame(data.groupby(['Country/Region', "variable"]).sum())
data.reset_index(inplace=True)
data = data.rename(columns={"Country/Region": "location", "variable": "date", "value": "total_deaths"})
data['date'] =pd.to_datetime(data.date)
data = data.sort_values(by = "date")
data.loc[data.location == "US","location"] = "United States"
data.loc[data.location == "Korea, South","location"] = "South Korea"
#hide
# Get data and clean it
# test
data_cases = pd.read_csv("https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv", error_bad_lines=False)
data_cases = data_cases.drop(columns=["Lat", "Long"])
data_cases = data_cases.melt(id_vars= ["Province/State", "Country/Region"])
data_cases = pd.DataFrame(data_cases.groupby(['Country/Region', "variable"]).sum())
data_cases.reset_index(inplace=True)
data_cases = data_cases.rename(columns={"Country/Region": "location", "variable": "date", "value": "total_cases"})
data_cases['date'] =pd.to_datetime(data_cases.date)
data_cases = data_cases.sort_values(by = "date")
data_cases.loc[data_cases.location == "US","location"] = "United States"
data_cases.loc[data_cases.location == "Korea, South","location"] = "South Korea"
# Add countries
countries = ["China", "Italy", "Spain", "France", "United Kingdom", "Germany",
"Portugal", "United States", "Singapore","South Korea", "Japan",
"Brazil","Iran", "India", "Switzerland", "Canada", "Australia"]
data_final = pd.merge(data,
data_cases
)
data_final["CFR"] = data_final["total_deaths"]/data_final["total_cases"]
data_final["total_infected"] = np.NaN
data_final = data_final.sort_values(by = ['location', 'date'])
data_final = data_final.reset_index(drop = True)
for j in countries:
for i in data_final["date"].unique()[0:-8]:
data_final.loc[(data_final.date == i) & (data_final.location == j), "total_infected"] = data_final.loc[(data_final.date == i + np.timedelta64(8, 'D')) & (data_final.location == j), "total_deaths"].iloc[0]/data_final.loc[(data_final.date == i + np.timedelta64(8, 'D')) & (data_final.location == j), "CFR"].iloc[0]
# Estimate growth rate of infected, g
data_final['infected_g'] = np.log(data_final['total_infected'])
data_final['infected_g'] = data_final['infected_g'].diff()
# Estimate number of infected given g
today = data_final.date.iloc[-1]
for j in countries:
for i in range(7,-1,-1):
data_final.loc[(data_final.location == j) & (data_final.date == today - timedelta(i)), "total_infected"] = data_final.loc[data_final.location == j, "total_infected"].iloc[-i-2]*(1+data_final.loc[data_final.location == j, "infected_g"].aggregate(func = "mean"))
data_pc = data_final[['location', 'date', 'total_infected']].copy()
countries = ["China", "Italy", "Spain", "France", "United Kingdom", "Germany",
"Portugal", "United States", "Singapore","South Korea", "Japan",
"Brazil","Iran"]
data_countries = []
data_countries_pc = []
for i in countries:
data_pc.loc[data_pc.location == i,"total_infected"] = data_pc.loc[data_pc.location == i,"total_infected"]
# Get each country time series
filter1 = data_pc["total_infected"] > 1
for i in countries:
filter_country = data_pc["location"]== i
data_countries_pc.append(data_pc[filter_country & filter1])
# + [markdown] papermill={"duration": 0.011774, "end_time": "2020-03-26T01:14:52.411812", "exception": false, "start_time": "2020-03-26T01:14:52.400038", "status": "completed"} tags=[]
# ## Estimated Infected Population By Country
#
# by days since outbreak
#
# > Tip: Click (Shift+ for multiple) on countries in the legend to filter the visualization.
# + papermill={"duration": 1.819528, "end_time": "2020-03-26T01:14:54.243268", "exception": false, "start_time": "2020-03-26T01:14:52.423740", "status": "completed"} tags=[]
#hide_input
# Plot estimated absolute number of infected
plot1 = plot(data_countries_pc, "line", True)
plot1.save("../images/covid-estimate-infections.png")
plot1
# + [markdown] papermill={"duration": 0.013248, "end_time": "2020-03-26T01:14:54.270211", "exception": false, "start_time": "2020-03-26T01:14:54.256963", "status": "completed"} tags=[]
# Lastest Country Estimates
# + papermill={"duration": 0.049371, "end_time": "2020-03-26T01:14:54.332994", "exception": false, "start_time": "2020-03-26T01:14:54.283623", "status": "completed"} tags=[]
#hide_input
label = 'Estimate of Infected'
temp = pd.concat([x.copy() for x in data_countries_pc]).loc[lambda x: x.date >= '3/1/2020']
metric_name = f'{label}'
temp.columns = ['Country', 'Date', metric_name]
temp.loc[:, "Estimate of Infected"] = temp.loc[:, "Estimate of Infected"].round(0).map('{:,.0f}'.format)
temp.groupby('Country').last()
# + [markdown] papermill={"duration": 0.014175, "end_time": "2020-03-26T01:14:54.361531", "exception": false, "start_time": "2020-03-26T01:14:54.347356", "status": "completed"} tags=[]
# ## Infected vs. number of confirmed cases
# > Allows you to compare how countries have been tracking the true number of infected people. The smaller deviation from the dashed line (45 degree line) the better job at tracking the true number of infected people.
# + [markdown] papermill={"duration": 0.016112, "end_time": "2020-03-26T01:14:54.391956", "exception": false, "start_time": "2020-03-26T01:14:54.375844", "status": "completed"} tags=[]
# > Tip: Click (Shift+ for multiple) on countries in the legend to filter the visualization.
# + papermill={"duration": 0.286386, "end_time": "2020-03-26T01:14:54.692691", "exception": false, "start_time": "2020-03-26T01:14:54.406305", "status": "completed"} tags=[]
#hide_input
# Plot it using Altair
data_pc = data_final[['location', 'date', 'total_cases', 'total_infected']].copy()
countries = ["Italy", "Spain", "France", "United Kingdom", "Germany",
"Portugal", "United States", "Singapore","South Korea", "Japan",
"Brazil","Iran"]
data_countries = []
data_countries_pc = []
for i in countries:
data_pc.loc[data_pc.location == i,"total_infected"] = data_pc.loc[data_pc.location == i,"total_infected"]
data_pc.loc[data_pc.location == i,"total_cases"] = data_pc.loc[data_pc.location == i,"total_cases"]
# get each country time series
filter1 = data_pc["total_infected"] > 1
for i in countries:
filter_country = data_pc["location"]== i
data_countries_pc.append(data_pc[filter_country & filter1])
plot(data_countries_pc, "scatter", True)
# + [markdown] papermill={"duration": 0.01582, "end_time": "2020-03-26T01:14:54.724319", "exception": false, "start_time": "2020-03-26T01:14:54.708499", "status": "completed"} tags=[]
# Latest Observed vs. Estimate of Infected Cases
# + papermill={"duration": 0.049176, "end_time": "2020-03-26T01:14:54.788641", "exception": false, "start_time": "2020-03-26T01:14:54.739465", "status": "completed"} tags=[]
#hide_input
label1 = 'Observed Cases'
label2 = 'Estimate of Infected'
temp = pd.concat([x.copy() for x in data_countries_pc]).loc[lambda x: x.date >= '3/1/2020']
metric_name1 = f'{label1}'
metric_name2 = f'{label2}'
temp.columns = ['Country', 'Date', metric_name1, metric_name2]
# temp.loc[:, 'month'] = temp.date.dt.strftime('%Y-%m')
temp.loc[:, "Observed Cases"] = temp.loc[:, "Observed Cases"].round(0).map('{:,.0f}'.format)
temp.loc[:, "Estimate of Infected"] = temp.loc[:, "Estimate of Infected"].round(0).map('{:,.0f}'.format)
temp.groupby('Country').last()
# + [markdown] papermill={"duration": 0.016089, "end_time": "2020-03-26T01:14:54.821161", "exception": false, "start_time": "2020-03-26T01:14:54.805072", "status": "completed"} tags=[]
# ## Methodology
# + [markdown] papermill={"duration": 0.016123, "end_time": "2020-03-26T01:14:54.853741", "exception": false, "start_time": "2020-03-26T01:14:54.837618", "status": "completed"} tags=[]
# We argue that the number of infected in the past can be infered using today's number of deaths and average fatality rate from confirmed cases in the following way:
#
# {% raw %}
# $$ I_{t-j} = \frac{D_t}{{CFR}_t}$$
# {% endraw %}
#
# where {% raw %}$I_t${% endraw %} = number of infected, {% raw %}$D_t${% endraw %} = number of deaths, and {% raw %}${CFR}_t ${% endraw %} = case fatality rate = {% raw %}$\frac{D}{C}${% endraw %}. The {% raw %}$j${% endraw %} depends on the average number of days that covid patients die after having the first symptoms.
# + [markdown] papermill={"duration": 0.015544, "end_time": "2020-03-26T01:14:54.885751", "exception": false, "start_time": "2020-03-26T01:14:54.870207", "status": "completed"} tags=[]
# **Assumption 1**: The case fatality rate is a good proxy for the fatality rate of the infected population
#
# + [markdown] papermill={"duration": 0.03925, "end_time": "2020-03-26T01:14:54.947105", "exception": false, "start_time": "2020-03-26T01:14:54.907855", "status": "completed"} tags=[]
# Then, in order to estimate the current number of infected {% raw %}$I_t${% endraw %} we need to estimate its growth rate from {% raw %}$t-j${% endraw %} to {% raw %}$t${% endraw %}.
#
# {% raw %}
# $$I_t = (1+\hat{g})^j I_{t-j}$$
# {% endraw %}
# + [markdown] papermill={"duration": 0.016038, "end_time": "2020-03-26T01:14:54.979189", "exception": false, "start_time": "2020-03-26T01:14:54.963151", "status": "completed"} tags=[]
# **Assumption 2**: The growth rate of infected $\hat{g}$ is an unbiased estimate of $g$ .
#
# For now we estimate $g$ using the average growth rate since having the first infected person.
# + [markdown] papermill={"duration": 0.016521, "end_time": "2020-03-26T01:14:55.011653", "exception": false, "start_time": "2020-03-26T01:14:54.995132", "status": "completed"} tags=[]
# **Assumption 3**: It takes on average 8 days to die after having the first symptoms.
# + [markdown] papermill={"duration": 0.016856, "end_time": "2020-03-26T01:14:55.045339", "exception": false, "start_time": "2020-03-26T01:14:55.028483", "status": "completed"} tags=[]
# This analysis was conducted by [Joao <NAME>](https://www.jbduarte.com). Relevant sources are listed below:
#
#
# 1. [2019 Novel Coronavirus COVID-19 (2019-nCoV) Data Repository by Johns Hopkins CSSE](https://systems.jhu.edu/research/public-health/ncov/) [GitHub repository](https://github.com/CSSEGISandData/COVID-19).
#
# 2. [Feenstra, <NAME>., <NAME> and <NAME> (2015), "The Next Generation of the Penn World Table" American Economic Review, 105(10), 3150-3182](https://www.rug.nl/ggdc/productivity/pwt/related-research)
#
# + papermill={"duration": 0.01931, "end_time": "2020-03-26T01:14:55.085783", "exception": false, "start_time": "2020-03-26T01:14:55.066473", "status": "completed"} tags=[]
| _notebooks/2020-03-19-estimating_infected.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Probalistic Confirmed COVID19 Cases- Denmark
# **Jorge: remember to reexecute the cell with the photo.**
# ### Table of contents
# [Initialization](#Initialization)
# [Data Importing and Processing](#Data-Importing-and-Processing)
# 1. [Kalman Filter Modeling: Case of Denmark Data](#1.-Kalman-Filter-Modeling:-Case-of-Denmark-Data)
# 1.1. [Model with the vector c fixed as [0, 1]](#1.1.-Kalman-Filter-Model-vector-c-fixed-as-[0,-1])
# 1.2. [Model with the vector c as a random variable with prior](#1.2.-Kalman-Filter-with-the-vector-c-as-a-random-variable-with-prior)
# 1.3. [Model without input (2 hidden variables)](#1.3.-Kalman-Filter-without-Input)
# 2. [Kalman Filter Modeling: Case of Norway Data](#2.-Kalman-Filter-Modeling:-Case-of-Norway-Data)
# 2.1. [Model with the vector c fixed as [0, 1]](#2.1.-Kalman-Filter-Model-vector-c-fixed-as-[0,-1])
# 2.2. [Model with the vector c as a random variable with prior](#2.2.-Kalman-Filter-with-the-vector-c-as-a-random-variable-with-prior)
# 2.3. [Model without input (2 hidden variables)](#2.3.-Kalman-Filter-without-Input)
# 3. [Kalman Filter Modeling: Case of Sweden Data](#Kalman-Filter-Modeling:-Case-of-Sweden-Data)
# 3.1. [Model with the vector c fixed as [0, 1]](#3.1.-Kalman-Filter-Model-vector-c-fixed-as-[0,-1])
# 3.2. [Model with the vector c as a random variable with prior](#3.2.-Kalman-Filter-with-the-vector-c-as-a-random-variable-with-prior)
# 3.3. [Model without input (2 hidden variables)](#3.3.-Kalman-Filter-without-Input)
# ## Initialization
# + colab={"base_uri": "https://localhost:8080/", "height": 75, "resources": {"http://localhost:8080/nbextensions/google.colab/files.js": {"data": "<KEY> "headers": [["content-type", "application/javascript"]], "ok": true, "status": 200, "status_text": ""}}} colab_type="code" id="CoJmUeBEb2AO" outputId="a1e10ec8-af53-4ccd-f8c1-38e9cf1e4b54"
from os.path import join, pardir
import jax
import jax.numpy as jnp
import matplotlib.pyplot as plt
import numpy as np
import numpyro
import numpyro.distributions as dist
import pandas as pd
import seaborn as sns
from jax import lax, random, vmap
from jax.scipy.special import logsumexp
from numpyro import handlers
from numpyro.infer import MCMC, NUTS
from sklearn.preprocessing import StandardScaler
np.random.seed(2103)
# +
ROOT = pardir
DATA = join(ROOT, "data", "raw")
# random seed
np.random.seed(42)
#plot style
plt.style.use('ggplot')
# %matplotlib inline
plt.rcParams['figure.figsize'] = (16, 10)
# -
# ## Data Importing and Processing
# The data in this case are the confirmed cases of the COVID-19 and the the mobility data (from Google) for three specific countries: Denmark, Sweden and Norway.
#
adress = join(ROOT, "data", "processed")
data = pd.read_csv(join(adress, 'data_three_mob_cov.csv'),parse_dates=['Date'])
data.info()
data.head(5)
# Handy functions to split the data, train the models and plot the results.
# +
def split_forecast(df, n_train=65):
"""Split dataframe `df` as training, test and input mobility data."""
# just take the first 4 mobility features
X = df.iloc[:, 3:7].values.astype(np.float_)
# confirmed cases
y = df.iloc[:,2].values.astype(np.float_)
idx_train = [*range(0,n_train)]
idx_test = [*range(n_train, len(y))]
y_train = y[:n_train]
y_test = y[n_train:]
return X, y_train, y_test
def train_kf(model, data, n_train, n_test, num_samples=9000, num_warmup=3000, **kwargs):
"""Train a Kalman Filter model."""
rng_key = random.PRNGKey(0)
rng_key, rng_key_ = random.split(rng_key)
nuts_kernel = NUTS(model=model)
# burn-in is still too much in comparison with the samples
mcmc = MCMC(
nuts_kernel, num_samples=num_samples, num_warmup=num_warmup, num_chains=1
)
mcmc.run(rng_key_, T=n_train, T_forecast=n_test, obs=data, **kwargs)
return mcmc
def get_samples(mcmc):
"""Get samples from variables in MCMC."""
return {k: v for k, v in mcmc.get_samples().items()}
def plot_samples(hmc_samples, nodes, dist=True):
"""Plot samples from the variables in `nodes`."""
for node in nodes:
if len(hmc_samples[node].shape) > 1:
n_vars = hmc_samples[node].shape[1]
for i in range(n_vars):
plt.figure(figsize=(4, 3))
if dist:
sns.distplot(hmc_samples[node][:, i], label=node + "%d" % i)
else:
plt.plot(hmc_samples[node][:, i], label=node + "%d" % i)
plt.legend()
plt.show()
else:
plt.figure(figsize=(4, 3))
if dist:
sns.distplot(hmc_samples[node], label=node)
else:
plt.plot(hmc_samples[node], label=node)
plt.legend()
plt.show()
def plot_forecast(hmc_samples, idx_train, idx_test, y_train, y_test):
"""Plot the results of forecasting (dimension are different)."""
y_hat = hmc_samples["y_pred"].mean(axis=0)
y_std = hmc_samples["y_pred"].std(axis=0)
y_pred_025 = y_hat - 1.96 * y_std
y_pred_975 = y_hat + 1.96 * y_std
plt.plot(idx_train, y_train, "b-")
plt.plot(idx_test, y_test, "bx")
plt.plot(idx_test[:-1], y_hat, "r-")
plt.plot(idx_test[:-1], y_pred_025, "r--")
plt.plot(idx_test[:-1], y_pred_975, "r--")
plt.fill_between(idx_test[:-1], y_pred_025, y_pred_975, alpha=0.3)
plt.legend(
[
"true (train)",
"true (test)",
"forecast",
"forecast + stddev",
"forecast - stddev",
]
)
plt.show()
n_train = 65 # number of points to train
n_test = 20 # number of points to forecast
idx_train = [*range(0,n_train)]
idx_test = [*range(n_train, n_train+n_test)]
# -
# ## 1. Kalman Filter Modeling: Case of Denmark Data
data_dk=data[data['Country'] == "Denmark"]
data_dk.head(5)
print("The length of the full dataset for Denmark is:" + " " )
print(len(data_dk))
# Prepare input of the models (we are using numpyro so the inputs are numpy arrays).
X, y_train, y_test = split_forecast(data_dk)
# ### 1.1. Kalman Filter Model vector c fixed as [0, 1]
# First model: the sampling distribution is replaced by one fixed variable $c$.
# + colab={"base_uri": "https://localhost:8080/", "height": 74} colab_type="code" id="Ng55LSLpcgdt" outputId="bd6bec3a-c418-445c-98d3-5f5a8f4e8f22"
def f(carry, input_t):
x_t, noise_t = input_t
W, beta, z_prev, tau = carry
z_t = beta * z_prev + W @ x_t + noise_t
z_prev = z_t
return (W, beta, z_prev, tau), z_t
def model_wo_c(T, T_forecast, x, obs=None):
"""Define KF with inputs and fixed sampling dist."""
# Define priors over beta, tau, sigma, z_1
W = numpyro.sample(
name="W", fn=dist.Normal(loc=jnp.zeros((2, 4)), scale=jnp.ones((2, 4)))
)
beta = numpyro.sample(
name="beta", fn=dist.Normal(loc=jnp.zeros(2), scale=jnp.ones(2))
)
tau = numpyro.sample(name="tau", fn=dist.HalfCauchy(scale=jnp.ones(2)))
sigma = numpyro.sample(name="sigma", fn=dist.HalfCauchy(scale=0.1))
z_prev = numpyro.sample(
name="z_1", fn=dist.Normal(loc=jnp.zeros(2), scale=jnp.ones(2))
)
# Define LKJ prior
L_Omega = numpyro.sample("L_Omega", dist.LKJCholesky(2, 10.0))
Sigma_lower = jnp.matmul(
jnp.diag(jnp.sqrt(tau)), L_Omega
) # lower cholesky factor of the covariance matrix
noises = numpyro.sample(
"noises",
fn=dist.MultivariateNormal(loc=jnp.zeros(2), scale_tril=Sigma_lower),
sample_shape=(T + T_forecast - 2,),
)
# Propagate the dynamics forward using jax.lax.scan
carry = (W, beta, z_prev, tau)
z_collection = [z_prev]
carry, zs_exp = lax.scan(f, carry, (x, noises), T + T_forecast - 2)
z_collection = jnp.concatenate((jnp.array(z_collection), zs_exp), axis=0)
obs_mean = z_collection[:T, 1]
pred_mean = z_collection[T:, 1]
# Sample the observed y (y_obs)
numpyro.sample(name="y_obs", fn=dist.Normal(loc=obs_mean, scale=sigma), obs=obs)
numpyro.sample(name="y_pred", fn=dist.Normal(loc=pred_mean, scale=sigma), obs=None)
# + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" id="mw9b8NHIhdWe" outputId="61013633-802f-4c77-b70e-fd606ff35b1c"
mcmc = train_kf(model_wo_c, y_train, n_train, n_test, x=X[2:])
# -
# Plots of the distribution of the samples for each variable.
# + colab={} colab_type="code" id="UujwoeyCpTlf"
hmc_samples = get_samples(mcmc)
plot_samples(hmc_samples, ["beta", "tau", "sigma"])
# -
# Forecasting prediction, all the datapoints in the test set are within the Confidence Interval.
# + colab={} colab_type="code" id="qoe7CzPvp8QL"
plot_forecast(hmc_samples, idx_train, idx_test, y_train, y_test)
# + [markdown] colab_type="text" id="cn6zF-KJgyGv"
# ### 1.2. Kalman Filter with the vector c as a random variable with prior
# -
# Second model: the sampling distribution is a Normal distribution $c$.
# + colab={} colab_type="code" id="Ws4_3zgEn6MC"
def model_w_c(T, T_forecast, x, obs=None):
# Define priors over beta, tau, sigma, z_1 (keep the shapes in mind)
W = numpyro.sample(
name="W", fn=dist.Normal(loc=jnp.zeros((2, 4)), scale=jnp.ones((2, 4)))
)
beta = numpyro.sample(
name="beta", fn=dist.Normal(loc=jnp.array([0.0, 0.0]), scale=jnp.ones(2))
)
tau = numpyro.sample(name="tau", fn=dist.HalfCauchy(scale=jnp.array([2,2])))
sigma = numpyro.sample(name="sigma", fn=dist.HalfCauchy(scale=1))
z_prev = numpyro.sample(
name="z_1", fn=dist.Normal(loc=jnp.zeros(2), scale=jnp.ones(2))
)
# Define LKJ prior
L_Omega = numpyro.sample("L_Omega", dist.LKJCholesky(2, 10.0))
Sigma_lower = jnp.matmul(
jnp.diag(jnp.sqrt(tau)), L_Omega
) # lower cholesky factor of the covariance matrix
noises = numpyro.sample(
"noises",
fn=dist.MultivariateNormal(loc=jnp.zeros(2), scale_tril=Sigma_lower),
sample_shape=(T + T_forecast - 2,),
)
# Propagate the dynamics forward using jax.lax.scan
carry = (W, beta, z_prev, tau)
z_collection = [z_prev]
carry, zs_exp = lax.scan(f, carry, (x, noises), T + T_forecast - 2)
z_collection = jnp.concatenate((jnp.array(z_collection), zs_exp), axis=0)
c = numpyro.sample(
name="c", fn=dist.Normal(loc=jnp.array([[0.0], [0.0]]), scale=jnp.ones((2, 1)))
)
obs_mean = jnp.dot(z_collection[:T, :], c).squeeze()
pred_mean = jnp.dot(z_collection[T:, :], c).squeeze()
# Sample the observed y (y_obs)
numpyro.sample(name="y_obs", fn=dist.Normal(loc=obs_mean, scale=sigma), obs=obs)
numpyro.sample(name="y_pred", fn=dist.Normal(loc=pred_mean, scale=sigma), obs=None)
# + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" id="IPTubrRIn6MJ" outputId="2df40248-bde8-435a-e083-abd9293d064d"
mcmc2 = train_kf(model_w_c, y_train, n_train, n_test, x=X[:-2])
# + colab={} colab_type="code" id="njjAWNp3kWWO"
hmc_samples = get_samples(mcmc2)
plot_samples(hmc_samples, ["beta", "tau", "sigma"])
# + colab={} colab_type="code" id="q8osgHedeeMC"
plot_forecast(hmc_samples, idx_train, idx_test, y_train, y_test)
# + [markdown] colab_type="text" id="E715bRprgkbN"
# ### 1.3. Kalman Filter without Input
# -
# Third model: no input mobility data, **two** hidden states.
# + colab={} colab_type="code" id="jJSQ3bc4evbx"
def f_s(carry, noise_t):
"""Propagate forward the time series."""
beta, z_prev, tau = carry
z_t = beta * z_prev + noise_t
z_prev = z_t
return (beta, z_prev, tau), z_t
def twoh_c_kf(T, T_forecast, obs=None):
"""Define Kalman Filter with two hidden variates."""
# Define priors over beta, tau, sigma, z_1
# W = numpyro.sample(name="W", fn=dist.Normal(loc=jnp.zeros((2,4)), scale=jnp.ones((2,4))))
beta = numpyro.sample(
name="beta", fn=dist.Normal(loc=jnp.array([0.0, 0.0]), scale=jnp.ones(2))
)
tau = numpyro.sample(name="tau", fn=dist.HalfCauchy(scale=jnp.array([10,10])))
sigma = numpyro.sample(name="sigma", fn=dist.HalfCauchy(scale=5))
z_prev = numpyro.sample(
name="z_1", fn=dist.Normal(loc=jnp.zeros(2), scale=jnp.ones(2))
)
# Define LKJ prior
L_Omega = numpyro.sample("L_Omega", dist.LKJCholesky(2, 10.0))
Sigma_lower = jnp.matmul(
jnp.diag(jnp.sqrt(tau)), L_Omega
) # lower cholesky factor of the covariance matrix
noises = numpyro.sample(
"noises",
fn=dist.MultivariateNormal(loc=jnp.zeros(2), scale_tril=Sigma_lower),
sample_shape=(T + T_forecast - 2,),
)
# Propagate the dynamics forward using jax.lax.scan
carry = (beta, z_prev, tau)
z_collection = [z_prev]
carry, zs_exp = lax.scan(f_s, carry, noises, T + T_forecast - 2)
z_collection = jnp.concatenate((jnp.array(z_collection), zs_exp), axis=0)
c = numpyro.sample(
name="c", fn=dist.Normal(loc=jnp.array([[0.0], [0.0]]), scale=jnp.ones((2, 1)))
)
obs_mean = jnp.dot(z_collection[:T, :], c).squeeze()
pred_mean = jnp.dot(z_collection[T:, :], c).squeeze()
# Sample the observed y (y_obs)
numpyro.sample(name="y_obs", fn=dist.Normal(loc=obs_mean, scale=sigma), obs=obs)
numpyro.sample(name="y_pred", fn=dist.Normal(loc=pred_mean, scale=sigma), obs=None)
# + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" id="ctttbf_uevb0" outputId="b5ba812b-c96a-4b8a-eec6-ed9a9879516f"
mcmc3 = train_kf(twoh_c_kf, y_train, n_train, n_test, num_samples=12000, num_warmup=5000)
# + colab={} colab_type="code" id="y8cd27q2evb2"
hmc_samples = get_samples(mcmc3)
plot_samples(hmc_samples, ["beta", "tau", "sigma"])
# + colab={"base_uri": "https://localhost:8080/", "height": 793} colab_type="code" id="-DU_X0r-evb4" outputId="94e1895d-6f81-4ad2-90a0-519fed786c0d"
plot_forecast(hmc_samples, idx_train, idx_test, y_train, y_test)
# -
# ## 2. Kalman Filter Modeling: Case of Norway Data
data_no=data[data['Country'] == "Norway"]
data_no.head(5)
print("The length of the full dataset for Norway is:" + " " )
print(len(data_no))
n_train = 66 # number of points to train
n_test = 20 # number of points to forecast
idx_train = [*range(0,n_train)]
idx_test = [*range(n_train, n_train+n_test)]
X, y_train, y_test = split_forecast(data_no, n_train)
# ### 2.1. Kalman Filter Model vector c fixed as [0, 1]
mcmc_no = train_kf(model_wo_c, y_train, n_train, n_test, x=X[:-2])
hmc_samples = get_samples(mcmc_no)
plot_samples(hmc_samples, ["beta", "tau", "sigma"])
plot_forecast(hmc_samples, idx_train, idx_test, y_train, y_test)
# ### 2.2. Kalman Filter with the vector c as a random variable with prior
mcmc2_no = train_kf(model_w_c, y_train, n_train, n_test, x=X[:-2])
hmc_samples = get_samples(mcmc2_no)
plot_samples(hmc_samples, ["beta", "tau", "sigma"])
plot_forecast(hmc_samples, idx_train, idx_test, y_train, y_test)
# ### 2.3. Kalman Filter without Input
mcmc3_no = train_kf(twoh_c_kf, y_train, n_train, n_test)
hmc_samples = get_samples(mcmc3_no)
plot_samples(hmc_samples, ["beta", "tau", "sigma"])
plot_forecast(hmc_samples, idx_train, idx_test, y_train, y_test)
# ## 3. Kalman Filter Modeling: Case of Sweden Data
data_sw=data[data['Country'] == "Sweden"]
data_sw.head(5)
print("The length of the full dataset for Sweden is:" + " " )
print(len(data_sw))
n_train = 75 # number of points to train
n_test = 22 # number of points to forecast
idx_train = [*range(0,n_train)]
idx_test = [*range(n_train, n_train+n_test)]
X, y_train, y_test = split_forecast(data_sw, n_train)
# ### 3.1. Kalman Filter Model vector c fixed as [0, 1]
mcmc_sw = train_kf(model_wo_c, y_train, n_train, n_test, x=X[:-2])
hmc_samples = get_samples(mcmc_sw)
plot_samples(hmc_samples, ["beta", "tau", "sigma"])
plot_forecast(hmc_samples, idx_train, idx_test, y_train, y_test)
# ### 3.2. Kalman Filter with the vector c as a random variable with prior
mcmc2_sw = train_kf(model_w_c, y_train, n_train, n_test, x=X[:-2])
hmc_samples = get_samples(mcmc2_sw)
plot_samples(hmc_samples, ["beta", "tau", "sigma"])
plot_forecast(hmc_samples, idx_train, idx_test, y_train, y_test)
# ### 3.3. Kalman Filter without Input
mcmc3_sw = train_kf(twoh_c_kf, y_train, n_train, n_test)
hmc_samples = get_samples(mcmc3_sw)
plot_samples(hmc_samples, ["beta", "tau", "sigma"])
plot_forecast(hmc_samples, idx_train, idx_test, y_train, y_test)
# Save results to rerun the plotting functions.
import pickle
MODELS = join(ROOT, "models")
for i, mc in enumerate([mcmc3_no, mcmc_sw, mcmc2_sw, mcmc3_sw]):
with open(join(MODELS, f"hmc_ok_{i}.pickle"), "wb") as f:
pickle.dump(get_samples(mc),f)
# ## Gaussian Process
| notebooks/4.3-mbml_kf_final.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
import mechanize
import cookielib
# Browser
br = mechanize.Browser()
# Cookie Jar
cj = cookielib.LWPCookieJar()
br.set_cookiejar(cj)
# Browser options
br.set_handle_equiv(True)
br.set_handle_gzip(True)
br.set_handle_redirect(True)
br.set_handle_referer(True)
br.set_handle_robots(False)
# Follows refresh 0 but not hangs on refresh > 0
br.set_handle_refresh(mechanize._http.HTTPRefreshProcessor(), max_time=1)
# Want debugging messages?
#br.set_debug_http(True)
#br.set_debug_redirects(True)
#br.set_debug_responses(True)
# User-Agent (this is cheating, ok?)
br.addheaders = [('User-agent', 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/40.0.2214.85 Safari/537.36')]
# -
#r = br.open('https://www.yelp.com/search?find_desc=vietnamese+food&find_loc=Hoboken&ns=1')
r = br.open('http://stackoverflow.com/search?q=how+to+find+duplicates+in+list+python')
html = r.read()
html
import bs4
soup = bs4.BeautifulSoup(html)
search_results = soup.findAll(attrs={'class':'question-summary search-result'},limit=10)
search_results[0]
with open('this_html.html', 'w') as outfile:
outfile.write(html)
title=[res.find(attrs={'class':'result-link'}).span.text.strip() for res in search_results]
time=[res.find(attrs={'class':'relativetime'}).text.strip() for res in search_results]
ans=[res.find(attrs={'class':'status answered-accepted'}) for res in search_results]
c=[]
for b in ans:
c.append(b.text.strip().replace('answer',' answer') if b!=None else u'0 answer')
link=[res.find(attrs={'class':'result-link'}).a['data-searchsession'] for res in search_results]
link=["<http://stackoverflow.com"+i+">" for i in link]
zip(title,time,c,link)
ans=[res.find(attrs={'class':'status answered-accepted'}) for res in search_results]
c=[]
for b in ans:
c.append(b.text.strip().replace('answer',' answer') if b!=None else u'0 answer')
c
str(search_results[0])[1915:2032]
sr = search_results[0]
sr
a=None
a if 1 else 0
| Assignment4/Scraping with Mechanize.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 1. Identifying bugs in code
# The following `stripped_reversed_lowercase` function contains at least one bug. You can see this by running the code in the cell below which tests the functionality of the `stripped_reversed_lowercase` function.
#
# Set trace at the beginning of `stripped_reversed_lowercase` and use debugger to solve the bug(s). Execute the code line by line and print variables used in the function to understand what's going wrong.
def stripped_reversed_lowercase(original):
#import pdb; pdb.set_trace()
stripped = original.strip()
reversed_string = ''.join(reversed(stripped))
output = reversed_string.lower()
return output
# + editable=false
# Let's verify it works
original = ' \n Original String '
result = stripped_reversed_lowercase(original)
assert result == 'gnirts lanigiro'
# -
| notebooks/beginner/exercises/debugging_exercise.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="BqmjPG8KGBcv"
#
# In this blog post, we will identify whether an image is Rock, Paper or Scissors.
#
# ## The dataset
# Rock Paper Scissors is a dataset containing about 3,000 computer-generated images from a variety of different hands, from different races, ages and genders, posed into Rock, Paper or Scissors and labelled as such. Each image is 300 X 300 pixels in 24-bit color. The images have all been generated using CGI techniques as an experiment in determining if a CGI-based dataset can be used for classification against real images. You can download the dataset [here](http://www.laurencemoroney.com/rock-paper-scissors-dataset/).
# + colab={"base_uri": "https://localhost:8080/", "height": 402} colab_type="code" id="it1c0jCiNCIM" outputId="699c3644-b092-43ee-c9c6-5ea4a3ed38ac"
# download the training and test set zip files
# !wget --no-check-certificate \
# https://storage.googleapis.com/laurencemoroney-blog.appspot.com/rps.zip \
# -O /tmp/rps.zip
# !wget --no-check-certificate \
# https://storage.googleapis.com/laurencemoroney-blog.appspot.com/rps-test-set.zip \
# -O /tmp/rps-test-set.zip
# + colab={} colab_type="code" id="PnYP_HhYNVUK"
import os
import zipfile
# unzip the data into the tmp directory
local_zip = '/tmp/rps.zip'
zip_ref = zipfile.ZipFile(local_zip, 'r')
zip_ref.extractall('/tmp/')
zip_ref.close()
local_zip = '/tmp/rps-test-set.zip'
zip_ref = zipfile.ZipFile(local_zip, 'r')
zip_ref.extractall('/tmp/')
zip_ref.close()
# + colab={"base_uri": "https://localhost:8080/", "height": 142} colab_type="code" id="MrxdR83ANgjS" outputId="8d03d781-7863-4d9a-fb79-c26a4d688e06"
# subdirectories
rock_dir = os.path.join('/tmp/rps/rock')
paper_dir = os.path.join('/tmp/rps/paper')
scissors_dir = os.path.join('/tmp/rps/scissors')
print('total training rock images:', len(os.listdir(rock_dir)))
print('total training paper images:', len(os.listdir(paper_dir)))
print('total training scissors images:', len(os.listdir(scissors_dir)))
rock_files = os.listdir(rock_dir)
print(rock_files[:10])
paper_files = os.listdir(paper_dir)
print(paper_files[:10])
scissors_files = os.listdir(scissors_dir)
print(scissors_files[:10])
# + [markdown] colab_type="text" id="DXyArVFnJ9cf"
# There are 840 images of each class.
# Let's see some examples of the images.
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" id="jp9dLel9N9DS" outputId="cb881830-915d-42d5-a274-679586c3dae6"
# %matplotlib inline
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
pic_index = 2
next_rock = [os.path.join(rock_dir, fname)
for fname in rock_files[pic_index-2:pic_index]]
next_paper = [os.path.join(paper_dir, fname)
for fname in paper_files[pic_index-2:pic_index]]
next_scissors = [os.path.join(scissors_dir, fname)
for fname in scissors_files[pic_index-2:pic_index]]
for i, img_path in enumerate(next_rock+next_paper+next_scissors):
print(img_path)
img = mpimg.imread(img_path)
plt.imshow(img)
plt.axis('Off')
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" id="LWTisYLQM1aM" outputId="96c80223-a670-4b94-c2a3-fdb338f3400c"
import tensorflow as tf
import keras_preprocessing
from keras_preprocessing import image
from keras_preprocessing.image import ImageDataGenerator
# set up the image generator
TRAINING_DIR = "/tmp/rps/"
training_datagen = ImageDataGenerator(
rescale = 1./255,
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
VALIDATION_DIR = "/tmp/rps-test-set/"
validation_datagen = ImageDataGenerator(rescale = 1./255)
train_generator = training_datagen.flow_from_directory(
TRAINING_DIR,
target_size=(150,150),
class_mode='categorical',
batch_size=126
)
validation_generator = validation_datagen.flow_from_directory(
VALIDATION_DIR,
target_size=(150,150),
class_mode='categorical',
batch_size=126
)
model = tf.keras.models.Sequential([
# Note the input shape is the desired size of the image 150x150 with 3 bytes color
# the first convolution
tf.keras.layers.Conv2D(64, (3,3), activation='relu', input_shape=(150, 150, 3)),
tf.keras.layers.MaxPooling2D(2, 2),
# the second convolution
tf.keras.layers.Conv2D(64, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
# the third convolution
tf.keras.layers.Conv2D(128, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
# the fourth convolution
tf.keras.layers.Conv2D(128, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
# flatten the results to feed into a DNN
tf.keras.layers.Flatten(),
tf.keras.layers.Dropout(0.5),
# 512 neuron hidden layer
tf.keras.layers.Dense(512, activation='relu'),
tf.keras.layers.Dense(3, activation='softmax')
])
model.summary()
model.compile(loss = 'categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
history = model.fit(train_generator, epochs=25, steps_per_epoch=20, validation_data = validation_generator, verbose = 1, validation_steps=3)
model.save("rps.h5")
# + colab={"base_uri": "https://localhost:8080/", "height": 298} colab_type="code" id="aeTRVCr6aosw" outputId="a85b312e-2b84-4483-86c1-f83d549b08c3"
import matplotlib.pyplot as plt
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(acc))
plt.plot(epochs, acc, 'r', label='Training accuracy')
plt.plot(epochs, val_acc, 'b', label='Validation accuracy')
plt.title('Training and validation accuracy')
plt.legend(loc=0)
plt.figure()
plt.show()
# + [markdown] colab_type="text" id="fDKuBLBSKOgC"
# We can see that the training accuracy improved over time and trends towards 1.0. The validation accuracy was unstable in the beginning but has a value between 0.9 and 1.0 over time.
#
# ## Model evaluation
# Let us now test the model with some images that it hasn't previously seen. This new dataset with 33 images can be downloaded [here](https://storage.googleapis.com/laurencemoroney-blog.appspot.com/rps-validation.zip).
# + colab={"base_uri": "https://localhost:8080/", "height": 1000, "resources": {"http://localhost:8080/nbextensions/google.colab/files.js": {"data": "<KEY>", "headers": [["content-type", "application/javascript"]], "ok": true, "status": 200, "status_text": ""}}} colab_type="code" id="ZABJp7T3VLCU" outputId="090250cd-3599-4e01-ca73-ee17dd1c3b5c"
import numpy as np
from google.colab import files
from keras.preprocessing import image
uploaded = files.upload()
for fn in uploaded.keys():
# predicting images
path = fn
img = image.load_img(path, target_size=(150, 150))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
images = np.vstack([x])
classes = model.predict(images, batch_size=10)
print(fn)
print(classes)
# + [markdown] colab_type="text" id="Lmqq5v5OK0Ej"
# When using the image generator, the classes come from directories and thus were sorted in alphabetical order. So the first value is for paper, then rock, and then scissors.
# The model guessed 32 out of 33 images correctly. It got only 1 image wrong, the `paper9.png`. If you download the images for yourself, you can see why the model got confused.
# We can conclude that the model is highly accurate.
| _jupyterNotebooks/computer_vision/RockPaperScissors.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Third Attempt on Language Detection
#
# This code shows the third attempt on processing the corpora and trying to come up with a model for the europar.test file.
#
# This code uses files in the /pickles directory. This contains the serialized pickle files with processed tokenized documents, all words and all letters in each one of the documents.
#
# To build a model, this uses the most common words and most common letters used on each language and builds a feature set around them.
from bs4 import BeautifulSoup
from nltk.tokenize import sent_tokenize, word_tokenize
from collections import Counter
from nltk.tokenize import RegexpTokenizer
import os
import nltk
import random
import time
import string
import pickle
import numpy
# +
# Utility functions
# These functions were taken from attempt 1 and put together for
# easier maintenance and testing.
def print_elapsed_time():
end = time.time()
elapsed = end - start
m, s = divmod(elapsed, 60)
h, m = divmod(m, 60)
return ("%d:%02d:%02d" % (h,m,s))
#Remove any <tags> within text
def extract_text_only(text):
soup = BeautifulSoup(text,"lxml")
# soup = BeautifulSoup(text,"html5lib")
return soup.get_text()
def tokenize_removepuncuation(text):
#words only
tokenizer = RegexpTokenizer(r'\w+')
return tokenizer.tokenize(text)
def most_common_words_letter(most_common_words,most_common_letters):
#Create word_features, a list of most common words on all languauges
#this will be used on feature set fed to classifier
word_features = set()
letter_features = set()
for k,v in most_common_words.items():
for word in v:
word_features.add(word[0])
# Create letter_features, a list of most common letters on all languages
for k,v in most_common_letters.items():
for letter in v:
letter_features.add(letter[0])
return word_features, letter_features
#Takes two Counter objects, removes common elements
def remove_common_elements(counter1, counter2):
elements_intersect = (counter1 & counter2).most_common()
for n in elements_intersect:
key = n[0]
del counter1[key]
del counter2[key]
return counter1, counter2
# +
#Feature extraction and creation functions
#Loop through directory and extract text
#directory is the path to directory to process
def get_text_from_directory(directory):
language_label = directory.split("/")[-1]
documents = []
counter = 0
#keep a count on unique words seen on documents
word_counter = Counter()
alphabet_counter = Counter()
alphabet = set()
#Collect a minimun of 5,000 words per directory(language)
for filename in os.listdir(directory):
try:
text_file = open(directory+"/"+filename,"r").read()
text = extract_text_only(text_file)
#Tokenize words and remove punctuation
tokenized_text = tokenize_removepuncuation(text)
#add to dict counter
word_counter.update(tokenized_text)
#get letters and add to alphabet
[alphabet_counter.update(list(n)) for n in tokenized_text]
documents.append((tokenized_text,language_label))
counter = counter + 1
except:
print(directory+" - Issue with filename:"+filename+" Ignoring.")
return documents, word_counter, alphabet_counter
def extract_data_from_corpora(corpora_directory, number_of_words, number_of_letter, save_pickles):
all_documents = []
most_common_words = {}
most_common_letters = {}
#Loop through all directories contain corpora with all languages
#directory will be the folder containing documents on that language
for directory in os.listdir(corpora_directory):
#full_path contains
full_path = corpora_directory+directory
if(os.path.isdir(full_path)):
print("About to process directory "+directory)
#process directory, text contains documents list with rows (['worda1','worda2,'worda3'],'LANG-A')
#word_counter contains count of all words seen
text, word_counter, alphabet = get_text_from_directory(full_path)
print("Number of words for this language:"+str(len(word_counter)))
#Keep only letters that are not common ascii letters
for letter in list(alphabet):
if(letter in list(string.ascii_letters) or letter in list(string.digits)):
del alphabet[letter]
#Keep track of most common words per language to use on feature set
most_common_words[directory] = word_counter.most_common(number_of_words)
most_common_letters[directory] = alphabet.most_common(number_of_letter)
if(save_pickles):
#Save to pickle so it can be read without having to process again
pickle_out = open("pickles/word_counter_"+directory+".pickle","wb")
pickle.dump(word_counter, pickle_out)
pickle_out.close()
pickle_out = open("pickles/alphabet_"+directory+".pickle","wb")
pickle.dump(alphabet, pickle_out)
pickle_out.close()
pickle_out = open("pickles/documents_"+directory+".pickle","wb")
pickle.dump(text, pickle_out)
pickle_out.close()
all_documents = all_documents + text
if(save_pickles):
#Save all_documents to pickle for later use
pickle_out = open("pickles/all_documents.pickle","wb")
pickle.dump(all_documents, pickle_out)
pickle_out.close()
return all_documents, most_common_words, most_common_letters
#this function receives a document and creates a feature based list
# Input:
# [(['worda1','worda2,'worda3'],'LANG-A'),
# ['wordb1','wordb2,'wordb3'],'LANG-B'),...]
# Ouput:
# [('αποφασιστικής': False,
# 'Περιφερειακής': True,
# 'pontot': False,...),'ru'),...]
def document_features(document):
document_words = set(document)
features = {}
#Add word that are part of common words
for word in word_features:
features[word] = (word in document_words)
#Add letters that are part of common letters
#get letters and add to alphabet
document_alphabet = set()
[document_alphabet.update(list(n)) for n in document_words]
#document_alphabet now contains all letters on this document
#Keep only letters that are not common ascii letters
for letter in list(document_alphabet):
if(letter in list(string.ascii_letters) or letter in list(string.digits)):
document_alphabet.remove(letter)
#Add to feature if exist
for l in letter_features:
features[l] = (l in document_alphabet)
return features
def extract_data_from_corpora_pickles(pickle_directory, number_of_documents, number_of_words, number_of_letters):
all_documents = []
most_common_words = {}
most_common_letters = {}
#Read data from pickle files
for filename in os.listdir(pickle_directory):
language = (filename.split('_')[-1]).split('.')[0]
if('word_counter' in filename):
all_words_for_language = pickle.load( open( pickle_directory+"/"+filename, "rb" ) )
##eliminate any words that intersect with another language
for lang in most_common_words:
all_words_for_language, most_common_words[lang] = remove_common_elements(all_words_for_language, most_common_words[lang])
most_common_words[language] = all_words_for_language
elif('alphabet' in filename):
all_letters_for_language = pickle.load( open( pickle_directory+"/"+filename, "rb" ) )
##eliminate any words that intersect with another language
for lang in most_common_letters:
all_letters_for_language, most_common_letters[lang] = remove_common_elements(all_letters_for_language, most_common_letters[lang])
most_common_letters[language] = all_letters_for_language
elif('documents' in filename):
documents = pickle.load( open( pickle_directory+"/"+filename, "rb" ) )
doc_limit = number_of_documents if(number_of_documents>=0) else len(all_documents)
all_documents += documents[:doc_limit]
#return only up to the amount required
for lang in most_common_words:
word_limit = number_of_words if(number_of_words>=0) else len(most_common_words[lang])
most_common_words[lang] = most_common_words[lang].most_common(word_limit)
for lang in most_common_letters:
letter_limit = number_of_letters if(number_of_letters>=0) else len(most_common_letters[lang])
most_common_letters[lang] = most_common_letters[lang].most_common(letter_limit)
return all_documents, most_common_words, most_common_letters
def extract_documents_from_corpora_pickles(pickle_directory, number_of_documents):
all_documents = []
#Read data from pickle files
for filename in os.listdir(pickle_directory):
language = (filename.split('_')[-1]).split('.')[0]
if('documents' in filename):
documents = pickle.load( open( pickle_directory+"/"+filename, "rb" ) )
doc_limit = number_of_documents if(number_of_documents>=0) else len(all_documents)
all_documents += documents[:doc_limit]
return all_documents
def extract_wordsletters_from_corpora_pickles(pickle_directory, number_of_words, number_of_letters):
most_common_words = {}
most_common_letters = {}
#Read data from pickle files
for filename in os.listdir(pickle_directory):
language = (filename.split('_')[-1]).split('.')[0]
if('word_counter' in filename):
all_words_for_language = pickle.load( open( pickle_directory+"/"+filename, "rb" ) )
##eliminate any words that intersect with another language
for lang in most_common_words:
all_words_for_language, most_common_words[lang] = remove_common_elements(all_words_for_language, most_common_words[lang])
most_common_words[language] = all_words_for_language
elif('alphabet' in filename):
all_letters_for_language = pickle.load( open( pickle_directory+"/"+filename, "rb" ) )
##eliminate any words that intersect with another language
for lang in most_common_letters:
all_letters_for_language, most_common_letters[lang] = remove_common_elements(all_letters_for_language, most_common_letters[lang])
most_common_letters[language] = all_letters_for_language
#return only up to the amount required
for lang in most_common_words:
word_limit = number_of_words if(number_of_words>=0) else len(most_common_words[lang])
most_common_words[lang] = most_common_words[lang].most_common(word_limit)
for lang in most_common_letters:
letter_limit = number_of_letters if(number_of_letters>=0) else len(most_common_letters[lang])
most_common_letters[lang] = most_common_letters[lang].most_common(letter_limit)
return most_common_words, most_common_letters
# +
#Classification functions
def classify_document(document, classifier):
return classifier.classify(document_features(tokenize_removepuncuation(document)))
def test_europarltest_file(eurofile, resultsfile, everyother, classifier):
#Read test file and classify each sentence in file
positive_ctr = 0
negative_ctr = 0
total_ctr = 0
#save results to file for processing
fileout = open(resultsfile,'w')
#columns
fileout.write('predicted, language given, correctly classified?\n')
processed_counter = 0
with open(eurofile,'r') as f:
for line in f:
processed_counter +=1
if(processed_counter%everyother==0):
total_ctr += 1
#language is first two letters in line
language = line[:2]
#sentence is rest, clean up spaces
sentence = line[2:].strip()
#Detect language based on model
language_detected = classify_document(sentence, classifier)
correctly_classified = language_detected==language
#tally correct and incorrect
if(correctly_classified):
#correctly classified
positive_ctr += 1
else:
#incorrectly classified
negative_ctr += 1
fileout.write(language_detected+','+language+','+str(correctly_classified)+'\n')
fileout.close()
return total_ctr, positive_ctr, negative_ctr
# +
# -------------Step 1-------------
# ----READ FROM PICKLE FILES (Pre-processed files)----
#Get data to create features from corpora
start = time.time()
pickles_directory = "pickles"
#How many documents to process for each language
number_of_documents = 1000
#how many number of most common words per language
number_of_words = 50
#letters to use per language
number_of_letters = 20
all_documents, most_common_words, most_common_letters = extract_data_from_corpora_pickles(pickles_directory, number_of_documents, 100, 100)
print("Elapsed time featureset creation:"+print_elapsed_time())
print("all_documents:"+str(len(all_documents)))
print("most_common_words:"+str(len(most_common_words)))
print("most_common_letters:"+str(len(most_common_letters)))
# +
# -------------Step 1-------------
# ----READ FROM PICKLE FILES (Pre-read)----
#Get data to create features from corpora
start = time.time()
pickles_directory = "pickles"
number_of_documents = 3000
#Part 1 - get documents
start = time.time()
all_documents = extract_documents_from_corpora_pickles(pickles_directory,number_of_documents)
print("Elapsed time reading all documents:"+print_elapsed_time())
# +
number_of_words = 700
number_of_letters = 20
#Part 2 - get common words, letters
start = time.time()
most_common_words, most_common_letters = extract_wordsletters_from_corpora_pickles(pickles_directory, number_of_words, number_of_letters)
print("Elapsed time reading all words, letters:"+print_elapsed_time())
print("all_documents:"+str(len(all_documents)))
print("most_common_words:"+str(len(most_common_words)))
print("most_common_letters:"+str(len(most_common_letters)))
# -------------Step 2-------------
# Create featureset to be used for training
# this is a list of documents with features and label
start = time.time()
#create word_features
word_features, letter_features = most_common_words_letter(most_common_words,most_common_letters)
print("words_features:"+str(len(word_features)))
print("letter_features:"+str(len(letter_features)))
#create featureset
featuresets = [(document_features(d), c) for (d,c) in all_documents]
print("Elapsed time featureset creation:"+print_elapsed_time())
print("featuresets:"+str(len(featuresets)))
# -------------Step 3-------------
# Split train, test for model classification and scoring
numpy.random.shuffle(featuresets)
#calculate how many items to slice by (95% train, 5% test)
slice_by = int((95 * len(featuresets))/100)
train_set, test_set = featuresets[:slice_by], featuresets[slice_by:]
print("Train set:"+str(len(train_set)))
print("Test set:"+str(len(test_set)))
# -------------Step 4-------------
# Build the Model
start = time.time()
classifier = nltk.NaiveBayesClassifier.train(train_set)
print("Elapsed time for training:"+print_elapsed_time())
# start = time.time()
# print("Accuracy:"+str(nltk.classify.accuracy(classifier, test_set)))
# print("Elapsed time for accuracy testing:"+print_elapsed_time())
#Save classifier for deployment
#Save to pickle so it can be tested later with
pickle_out = open("models/classifier_"+str(number_of_documents)+"_"+str(number_of_words)+"_"+str(number_of_letters)+".pickle","wb")
pickle.dump(classifier, pickle_out)
pickle_out.close()
pickle_out = open("models/word_features"+str(number_of_documents)+"_"+str(number_of_words)+"_"+str(number_of_letters)+".pickle","wb")
pickle.dump(word_features, pickle_out)
pickle_out.close()
pickle_out = open("models/letter_features"+str(number_of_documents)+"_"+str(number_of_words)+"_"+str(number_of_letters)+".pickle","wb")
pickle.dump(word_features, pickle_out)
pickle_out.close()
print("Done:"+print_elapsed_time())
# +
# -------------Step 5-------------
# Test the Model Manually
#sample french document
fr_document = """
Madame la Présidente, il s' agit d' une question sensible pour notre Parlement, et plus précisément relative au débat sur l' élargissement. Je veux parler d' une déclaration du ministère turc des Affaires étrangères au sujet du rapport de M. Poos sur l' adhésion de Chypre à l' Union européenne. Dans cette déclaration, le ministère turc des Affaires étrangères porte une attaque inique et diffamatoire contre un député qui a présenté son rapport à la commission compétente. La commission a adopté ce rapport avec une opposition de deux voix seulement. Je crois comprendre que le système parlementaire n' a pas encore atteint en Turquie un niveau de développement tel que ses membres puissent saisir le contenu du rapport concerné et la responsabilité, si tant est que l' on puisse utiliser ce terme, qu' assume <NAME>. Il s' agit d' un rapport du Parlement. En conséquence, je souhaiterais que le Parlement lui-même apporte une réaction à cette attaque injuste.
Madame la Présidente, permettez-moi de citer un seul exemple de mauvais goût en lisant le point 1 de la déclaration turque :
"le député est connu, d'une part, pour son opposition à la Turquie et, d'autre part, pour ses liens privés avec l'administration chypriote."
C' est une pure calomnie, à la limite d' un délit. Je voudrais que le Parlement, ainsi que la Commission, et non <NAME>, adoptent une réaction à cette accusation lancée par la Turquie !
Je vous remercie, Madame la Présidente !
(Applaudissements)
Je vous remercie. Nous allons regarder cela de très près.
Il n'y a pas d'autre motion d'ordre.
Je dirai simplement à <NAME> que les fonctionnaires sont actuellement occupés à remettre le drapeau britannique à sa place ; il y avait, en effet, ce matin un problème de drapeau britannique que nous avons tenu à résoudre sans attendre.
Situation au Moyen-Orient
L'ordre du jour appelle la déclaration du Conseil et de la Commission sur la situation au Moyen-Orient.
Madame la Présidente, Monsieur le Commissaire, Mesdames et Messieurs, tout d'abord veuillez excuser mon petit retard, mais je viens en droite ligne de Bruxelles. J'espère que vous voudrez bien le comprendre et je vous en remercie.
Mesdames et Messieurs, comme le ministre <NAME> l'avait déjà indiqué lors de sa comparution, le 28 août dernier, devant la commission des affaires étrangères, des droits de l'homme, de la sécurité commune et de la politique de défense de ce Parlement, on assiste depuis des mois au Proche-Orient à une escalade croissante de la violence, avec pour résultat consternant l'effondrement total de la confiance mutuelle entre les parties et, sur le terrain, cela a créé un profond sentiment d'impuissance parmi toutes les populations concernées.
Il ne se passe hélas pas un jour ou à peu près sans que des incidents sanglants et de nouvelles provocations ne se produisent et ne reportent ainsi indéfiniment la matérialisation d'un cessez-le-feu et la fin d'un cycle infernal de représailles, tandis que la liste des victimes ne fait que s'allonger. Cette situation, récemment qualifiée par le ministre Vedrine de monstrueuse et révoltante, suscite bien entendu une profonde inquiétude pour la stabilité d'une région qui est à nos portes. La communauté internationale ne saurait tolérer plus longtemps cette escalade et se doit de condamner avec fermeté les facteurs d'aggravation que constituent notamment le terrorisme et les attentats suicide perpétrés par des Palestiniens en Israël. Outre que ces actes terroristes constituent une abomination, car ils frappent des civils innocents, ils ne font qu'inciter Israël à pratiquer une politique de plus en plus répressive.
Les tirs d'activistes palestiniens contre des Israéliens, qu'ils soient colons ou militaires, à partir d'agglomérations sous contrôle palestinien, mais aussi le recours excessif et disproportionné à la force par Israël ne font qu'alimenter le cycle de la violence. L'usage d'avions de combat dans les zones résidentielles, la destruction systématique par des missiles de bâtiments abritant les services de police et de sécurité de l'autorité palestinienne et les meurtres ciblés d'activistes palestiniens ne constituent pas des éléments convaincants d'une stratégie visant à la paix et à la sécurité. Les incursions militaires israéliennes dans les zones passées sous contrôle palestinien sont autant de violations des accords signés. La fermeture des institutions palestiniennes à Jérusalem-Est, et notamment celle de la Maison Orient, et la saisie des archives sont des mesures politiques peu propices à l'apaisement.
Madame la Présidente, Mesdames et Messieurs, lorsque nous observons aujourd' hui les perspectives d' une reprise du dialogue direct, où en sommes-nous ? Pour commencer, c' est dans un contexte très inquiétant de radicalisation des différentes parties impliquées dans le conflit que le ministre israélien des Affaires étrangères, M. <NAME>, a récemment proposé, en public, la reprise d' un dialogue direct avec l' Autorité palestinienne en vue de réduire la violence et de veiller à ce que le cessez-le-feu soit respecté, deux objectifs qui semblaient inaccessibles jusqu' ici. Au cours d' une récente visite dans la région, le ministre allemand des Affaires étrangères, <NAME>, a obtenu que le dirigeant palestinien accepte une telle rencontre entre <NAME>, donc, d' un côté et <NAME> de l' autre.
"""
#sample spanish document
es_document = """
Nombramiento del Presidente del Banco Central Europeo
De conformidad con el orden del día, se procede al debate de la recomendación (A5-0307/2003), en nombre de la Comisión de Asuntos Económicos y Monetarios, relativa al nombramiento del Sr. <NAME> como Presidente del Banco Central Europeo (10893/2003 - C5-0332/2003 - 2003/0819(CNS)) (Ponente: <NAME>)
. (IT) Señor Presidente, Señorías, señores Comisarios, es un gran placer hablar sobre una cuestión de gran importancia para la Unión Europea: el nombramiento del Presidente del Banco Central Europeo.
La creación del euro es un éxito considerable en la historia de la integración europea, tanto desde el punto de vista político como técnico. El euro sigue siendo una divisa relativamente nueva, y tendrá que basarse en la experiencia, ampliamente reconocida, del Banco Central Europeo para continuar siendo un éxito.
En este momento estamos sustituyendo por primera vez al Presidente del Banco Central Europeo. Es de vital importancia para el Banco Central Europeo y para el euro que la elección de un sucesor del Presidente Duisenberg se lleve a cabo con transparencia y que se base únicamente en los criterios recogidos en el Tratado que establece la Comunidad Europea, así como en el Estatuto del Sistema Europeo de Bancos Centrales y el del Banco Central Europeo, y en el acuerdo de este último sobre el candidato más cualificado para el trabajo.
Al nombrar a un sucesor, hemos de reconocer y rendir tributo al excelente trabajo realizado por el Presidente Duisenberg, pero también debemos expresar nuestra confianza en que el Banco Central Europeo seguirá desarrollando todas sus funciones con el mismo grado de éxito que hasta la fecha.
La base legal para el procedimiento de nombramiento de un nuevo Presidente del Banco Central Europeo se encuentra en la letra b del apartado 2 del artículo 112, y en apartado 4 del artículo 122, del Tratado que establece la Comunidad Europea, así como en los artículos 11.2 y 43.3 del Protocolo del Estatuto del Sistema Europeo de Bancos Centrales y del Banco Central Europeo.
De acuerdo con estas disposiciones, el Consejo de «Información» Ecofin adoptó una recomendación el 15 de julio de 2003 que defendía el nombramiento del Sr. Trichet como Presidente del Banco Central Europeo por un periodo de ocho años con efecto a partir del 1 de noviembre de 2003. La recomendación fue enviada tanto a ustedes como al Banco Central Europeo, de acuerdo con el Tratado, para que pudieran dar su opinión antes de que la recomendación fuera presentada a los Jefes de Estado o de Gobierno para la decisión final.
El Consejo de Gobierno del Banco Central Europeo aprobó su opinión el 31 de julio y la envió al Consejo y al Parlamento. Esta opinión confirmó que el Consejo Regulador del Banco Central Europeo cree que el candidato propuesto deber ser una persona de reconocido prestigio y experiencia profesional en materia monetaria y bancaria, como establece la letra b del apartado 2 del artículo 112 del Tratado.
Espero que el Parlamento Europeo esté de acuerdo con el Consejo y con el Banco Central Europeo en que el Sr. Trichet es un candidato excelente para este importante puesto. La adopción de la opinión por parte del Parlamento permitirá que los Jefes de Estado o de Gobierno tomen una decisión final sobre la toma de posesión del nuevo Presidente del Banco Central Europeo, dentro del calendario fijado por la recomendación del Consejo.
Señor Presidente, en nombre de la Comisión de Asuntos Económicos y Monetarios, recomiendo que el Parlamento confirme el nombramiento del Sr. <NAME> como candidato adecuado para el puesto de Presidente del Banco Central Europeo. El candidato nominado ha presentado una declaración por escrito y ha dado explicaciones orales a dicha comisión en el curso del proceso de confirmación. Ha convencido a los miembros de dicha comisión no solo de su integridad personal y competencia profesional, sino también de sus visión de la política económica y monetaria en la Eurozona. Al mismo tiempo, ha demostrado ser receptivo a las exigencias de una mayor transparencia y responsabilidad democrática en el seno del Banco Central Europeo.
Cinco años después de su fundación, el Banco Central Europeo ya es mayor de edad. Su independencia -en términos políticos, económicos, financieros, organizativos y de personal- está garantizada y no está cuestionada por el Tratado que establece la Constitución para Europa. Su alto grado de independencia, que supera al de la Reserva Federal de los Estados Unidos, significa que el BCE tiene una gran responsabilidad en el desarrollo macroeconómico y social. Ello requiere la mayor transparencia posible en interés de la democracia y de la política de integración. Por ello, la transparencia de las decisiones y del procedimiento de toma de decisiones es parte integral del papel del Banco Central Europeo. Este esfuerzo para lograr la transparencia se refleja en el diálogo monetario trimestral con el Parlamento Europeo, en las publicaciones y decisiones ordinarias, pero también en los informes, las conferencias y las previsiones sobre la inflación que se publican cada seis meses. Así pues, en Europa ha tenido lugar una especie de revolución cultural. La cultura de los bancos centrales nacionales en Europa no contaba con este tipo de transparencia. Por cierto, la transparencia también va en interés del BCE porque es todavía una institución nueva, y por lo tanto, se basa especialmente en el establecimiento y la consolidación de su legitimidad, la credibilidad y confianza como autoridad europea.
En última instancia, el sistema monetario de una nación refleja todo lo que defiende esa nación y todo a lo que aspira y mantiene, si se me permite citar al renombrado economista europeo <NAME>. Creo que en la fase actual del debate, la incorporación del Tratado de Maastricht en su totalidad en el borrador del Tratado que establece la Constitución para Europa era el paso más adecuado. A lo largo de los siglos de su existencia, el papel de los bancos centrales ha sufrido un cambio radical, comenzando por su forma de organización privada en la historia, pasando por su cambio de estatus en los Estados Unidos, el concepto de lucha contra la inflación, hasta llegar a su papel de banco emisor independiente. Quizás sea demasiado pronto para encontrar respuesta a los retos actuales. La tarea que debemos emprender es definir el papel del Banco Central en un mundo globalizado dominado por el comercio y los mercados financieros internacionales. Ello implica mercados dinámicos, pero también riesgos cada vez mayores para la estabilidad financiera internacional.
¿Qué papel pueden y deben desempeñar, pues, los bancos centrales para contribuir a la estabilidad financiera, evitar las crisis financieras y proporcionar asistencia? ¿Está preparado el BCE para el papel de prestamista en última instancia? ¿Es esto lo que queremos? El euro también ha mejorado la posición internacional de Europa. El BCE tendrá que desempeñar un papel cada vez mayor en la definición y aplicación de las políticas apropiadas para una economía globalizada. Estamos preocupados por el gran desequilibrio de la economía estadounidense y los riesgos que pudiera entrañar para cualquier otra parte del mundo a medio plazo.
"""
print("Classification of Spanish document:"+classify_document(es_document,classifier))
print("Classification of French document:"+classify_document(fr_document,classifier))
# -
# -------------Step 6-------------
# Classify all sentences in europarl.test and write results to resultsfile
# This is the actual deployment of the classifier against challenge data
europarl_testfile = "europarl.test"
results_outfile = "europarl_test_classified_attempt4.csv"
#use for quick testing, to test just a subset of all documents read
#every other 1000 would only classify every 1000 document on testfile
everyother = 500
start = time.time()
total_ctr, positive_ctr, negative_ctr = test_europarltest_file(europarl_testfile, results_outfile, everyother, classifier)
#results
print(" Total attempted: "+str(total_ctr))
print(" Classified correctly: "+str(positive_ctr))
print("Classified incorrectly: "+str(negative_ctr))
accuracy = (positive_ctr/total_ctr) * 100
print(" Accuracy: %s",accuracy)
print("Elapsed time for accuracy testing:"+print_elapsed_time())
| Detect Language Attempt 3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Convert Tensorflow model to ONNX
# The general procedures to convert a tensorflow to ONNX is covered [part 1](./TensorflowToOnnx-1.ipynb).
#
# In this tutorial, we will cover the following contents in order:
# 1. convert tensorflow model with other formats
# - convert with frozen graph
# - convert with checkpoint
# 2. convert in python script
# 3. useful command line options of `tensorflow-onnx`
# ## Convert with frozen graph
# Tensorflow has API to get model's frozen graph and `tensorflow-onnx` can accept it as an input.
#
# While besides the frozen graph, the input and output tensors' names are also needed. Those names typically end with ":0", you could either get them by tensorflow [summarized tool](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/tools/graph_transforms), or specify their names using tf.identity in your tensorflow script.
# +
import tensorflow as tf
from assets.tensorflow_to_onnx_example import create_and_train_mnist
def save_model_to_frozen_proto(sess):
frozen_graph = tf.graph_util.convert_variables_to_constants(sess, sess.graph_def, [output_tensor.name[:-2]])
with open("./output/mnist_frozen.pb", "wb") as file:
file.write(frozen_graph.SerializeToString())
sess_tf, saver, input_tensor, output_tensor = create_and_train_mnist()
save_model_to_frozen_proto(sess_tf)
# -
# generating mnist.onnx using frozen_graph
# !python -m tf2onnx.convert \
# --input ./output/mnist_frozen.pb \
# --inputs {input_tensor.name} \
# --outputs {output_tensor.name} \
# --output ./output/mnist2.onnx \
# --opset 7
# ## Convert with checkpoint
# Same as frozen graph, you need to specify the path to checkpoint file and model's input and output names.
# +
def save_model_to_checkpoint(saver, sess):
save_path = saver.save(sess, "./output/ckpt/model.ckpt")
save_model_to_checkpoint(saver, sess_tf)
# -
# generating mnist.onnx using checkpoint
# !python -m tf2onnx.convert \
# --checkpoint ./output/ckpt/model.ckpt.meta \
# --inputs {input_tensor.name}\
# --outputs {output_tensor.name} \
# --output ./output/mnist3.onnx \
# --opset 7
# ## Convert in python script
# `tensorflow-onnx` exports conversion APIs so that users can convert tensorflow model into ONNX directly in their script, the following code is an example.
# +
from tf2onnx.tfonnx import process_tf_graph, tf_optimize
import tensorflow as tf
from tensorflow.graph_util import convert_variables_to_constants as freeze_graph
print("generating mnist.onnx in python script")
graph_def = freeze_graph(sess_tf, sess_tf.graph_def, [output_tensor.name[:-2]])
with tf.Graph().as_default() as graph:
tf.import_graph_def(graph_def, name='')
onnx_graph = process_tf_graph(graph, opset=7, input_names=[input_tensor.name], output_names=[output_tensor.name])
model_proto = onnx_graph.make_model("test")
print("ONNX model is saved at ./output/mnist4.onnx")
with open("./output/mnist4.onnx", "wb") as f:
f.write(model_proto.SerializeToString())
# -
# ## Useful command line options
# The first useful option is "**opset**" which has been covered in [part 1](./TensorflowToOnnx-1.ipynb).
#
# Then second option is "**inputs-as-nchw**". Tensorflow supports NCHW and NHWC while ONNX only supports NCHW for now, so if your model uses NHWC then the tool will insert extra transpose nodes to convert the model. And though "tensroflow-onnx" has optimizers to remove the transpose nodes as much as possible, it's suggested to use NCHW directly if possible. And if model with NCHW is impossible, this option will tell the tool that the real input format will be NCHW and it can remove more inserted transpose nodes now. For example --inputs input0:0,input1:0 --inputs-as-nchw input0:0 assumes that images are passed into input0:0 as nchw while the TensorFlow model given uses nhwc.
#
# As said in part 1, ONNX defines its own operations set to represent machine learning computation operations and the set is different with tensorflow's. And two main difference will make the conversion fail, unsupported input dtype and unsupported operations, so `tensorflow-onnx` has two options to fix the gap if possible. The option "**target**" may insert cast operation to convert unsupported dtype into float in some target platform, please see the detail [here](https://github.com/onnx/tensorflow-onnx/wiki/target). The option "**custom-ops**" is useful when the runtime used supports custom ops that are not defined in onnx. For example: --custom-ops Print will insert a op Print in the onnx domain ai.onnx.converters.tensorflow into the graph.
# More detail on `tensorflow-onnx` can be got from its [README](https://github.com/onnx/tensorflow-onnx/blob/master/README.md "Title") file, for example the internal procedures in `tensorflow-onnx` to convert a tensorflow model.
| tutorials/TensorflowToOnnx-2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from typing import List
import numpy as np
import scipy.sparse as sp
# -
# # Project 4: Spectral clustering users based on their preferences (50 pt)
#
# The goal of this task is to find groups of users with similar preferences using **Spectral clustering**.
# You are given a fragment of the Yelp social network, represented by an undirected weighted graph.
# Nodes in the graph represent users.
# If two users are connected by an edge of weight $w$, it means that they have both left positive reviews to the same $w$ restaurants.
#
# Additionally, you are given a matrix `F` that encodes user preferences to different categories of restaurants. If `F[i, c] = 1`, then user `i` likes restaurants in category `c`.
#
# You are allowed to use the imported functions (`eigsh`, `KMeans`, `normalize`).
#
# ## General remarks
# Do not add or modify any code outside of the following comment blocks, or where otherwise explicitly stated.
#
# ``` python
# ##########################################################
# # YOUR CODE HERE
# ...
# ##########################################################
# ```
# After you fill in all the missing code, restart the kernel and re-run all the cells in the notebook.
#
# The following things are **NOT** allowed:
# - Using additional `import` statements
# - Copying / reusing code from other sources (e.g. code by other students)
#
# If you plagiarise even for a single project task, you won't be eligible for the bonus this semester.
# ## Load the data
#
# * `N` = number of users (nodes in the graph)
# * `C` = number of categories
# * The graph is stored as a _sparse adjacency matrix_ `A` (shape `[N, N]`).
# * User preferences are stored in a _feature matrix_ `F` (shape `[N, C]`). They will only be used for the final part of the assignment (Part 3)
# * Name of each category is provided in the list `categories` (length `[C]`).
A = sp.load_npz('A.npz')
F = np.load('F.npy')
categories = np.load('categories.npy', allow_pickle=True).tolist()
assert A.shape[0] == F.shape[0]
assert F.shape[1] == len(categories)
print(f'The adjacency matrix is {"symmetric" if (A != A.T).sum() == 0 else "asymmetric"}')
# # 1. Implementing spectral clustering (35 pt)
# ## 1.1. Construct the graph Laplacian (10 pt)
# First, we need to construct the Laplacian for the given graph (*Do only use sparse operations, see [Scipy Sparse](https://docs.scipy.org/doc/scipy/reference/sparse.html)*).
#
# Given the **adjacency matrix** $A \in \mathbb{R}^{N \times N},$ we define the **degree matrix** $D \in \mathbb{R}^{N \times N}$ of an undirected graph as
# $$D_{ij} = \begin{cases}\sum_{k=1}^N A_{ik} & if \;\; i = j\\ 0 & if \;\; i \ne j\end{cases}$$
#
# If our goal is to minimize the **ratio cut**, we will need to use the **unnormalized Laplacian**, defined as
# $$L_{unnorm} = D - A.$$
#
# If our goal is to minimze the **normalized cut**, we will need to use the **normalized Laplacian** (a.k.a. symmetrized Laplacian), defined as
# $$L_{sym} = I - D^{-1/2}AD^{-1/2}$$
def construct_laplacian(A: sp.csr_matrix, norm_laplacian: bool) -> sp.csr_matrix:
"""Construct Laplacian of a graph.
Parameters
----------
A : scipy.sparse.csr_matrix, shape [N, N]
Adjacency matrix of the graph.
norm_laplacian : bool
Whether to construct the normalized graph Laplacian or not.
If True, construct the normalized (symmetrized) Laplacian, L = I - D^{-1/2} A D^{-1/2}.
If False, construct the unnormalized Laplacian, L = D - A.
Returns
-------
L : scipy.sparse.csr_matrix, shape [N, N]
Laplacian of the graph.
"""
##########################################################
# YOUR CODE HERE
N = A.shape[0]
diagonal = A.sum(axis=1).flatten()
D = sp.spdiags(diagonal, [0], N, N, format='csr')
if norm_laplacian == True:
I = sp.identity(N)
L = I - D.power(-0.5) @ A @ D.power(-0.5)
else:
L = D - A
##########################################################
return L
# ## 1.2. Spectral embedding (10 pt)
# Now, we have to compute the spectral embedding for the given graph.
#
# In order to partition the graph into $k$ clusters, such that the desired cut (ratio or normalized) is minimized, we need to consider the $k$ eigenvectors corresponding to the $k$ smallest eigenvalues of the graph Laplacian.
#
# Since the Laplacian matrix is sparse and symmetric, we can use the function `eigsh` from the `scipy.sparse.linalg` package in order to find eigendecomposition of $L$ (`eig` - eigendecomposition, `s` - sparse, `h`- Hermitian).
# The function `eigsh` directly allows you to find the smallest / largest eigenvalues by specifying the `k` and `which` parameters.
#
# Keep in mind that the Laplacian matrix is always positive semi-definite when picking the appropriate value for the `which` parameter.
from scipy.sparse.linalg import eigsh
help(eigsh)
def spectral_embedding(A: sp.csr_matrix, num_clusters: int, norm_laplacian: bool) -> np.array:
"""Compute spectral embedding of nodes in the given graph.
Parameters
----------
A : scipy.sparse.csr_matrix, shape [N, N]
Adjacency matrix of the graph.
num_clusters : int
Number of clusters to detect in the data.
norm_laplacian : bool, default False
Whether to use the normalized graph Laplacian or not.
Returns
-------
embedding : np.array, shape [N, num_clusters]
Spectral embedding for the given graph.
Each row represents the spectral embedding of a given node.
"""
if (A != A.T).sum() != 0:
raise ValueError("Spectral embedding doesn't work if the adjacency matrix is not symmetric.")
if num_clusters < 2:
raise ValueError("The clustering requires at least two clusters.")
if num_clusters > A.shape[0]:
raise ValueError(f"We can have at most {A.shape[0]} clusters (number of nodes).")
##########################################################
# YOUR CODE HERE
laplacian = construct_laplacian(A, norm_laplacian)
eigenvalues, eigenvectors = eigsh(laplacian, k=num_clusters, which='SM')
##########################################################
return np.array(eigenvectors)
# ## 1.3. Determine the clusters based on the spectral embedding (15 pt)
# You should use the K-means algorithm for assigning nodes to clusters, once the spectral embedding is computed.
#
# One thing you should keep in mind, is that when using the **normalized Laplacian**, the rows of the embedding matrix **have to** be normalized to have unit $L_2$ norm.
from sklearn.cluster import KMeans
from sklearn.preprocessing import normalize
def spectral_clustering(A: sp.csr_matrix, num_clusters: int, norm_laplacian: bool, seed: int = 42) -> np.array:
"""Perform spectral clustering on the given graph.
Parameters
----------
A : scipy.sparse.csr_matrix, shape [N, N]
Adjacency matrix of the graph.
num_clusters : int
Number of clusters to detect in the data.
norm_laplacian : bool, default False
Whether to use the normalized graph Laplacian or not.
seed : int, default 42
Random seed to use for the `KMeans` clustering.
Returns
-------
z_pred : np.array, shape [N]
Predicted cluster indicators for each node.
"""
model = KMeans(num_clusters, random_state=seed)
##########################################################
# YOUR CODE HERE
N = A.shape[0]
emb = spectral_embedding(A, num_clusters, norm_laplacian)
if norm_laplacian == True:
emb /= np.linalg.norm(emb, axis=0)
kmeans = KMeans(num_clusters, init='k-means++', tol=1e-10).fit(emb)
z_pred = kmeans.labels_
##########################################################
return z_pred
# # 2. Quantitatively evaluate the results (10 pt)
def labels_to_list_of_clusters(z: np.array) -> List[List[int]]:
"""Convert predicted label vector to a list of clusters in the graph.
This function is already implemented, nothing to do here.
Parameters
----------
z : np.array, shape [N]
Predicted labels.
Returns
-------
list_of_clusters : list of lists
Each list contains ids of nodes that belong to the same cluster.
Each node may appear in one and only one partition.
Examples
--------
>>> z = np.array([0, 0, 1, 1, 0])
>>> labels_to_list_of_clusters(z)
[[0, 1, 4], [2, 3]]
"""
return [np.where(z == c)[0] for c in np.unique(z)]
# ## 2.1. Compute ratio cut (5 pt)
# Your task is to implement functions for computing the **ratio cut** and **normalized cut** for a given partition.
#
# Ratio cut and normalized cut are defined on the slide 14 of the lecture slides.
#
#
# The function `labels_to_list_of_clusters` can be helpful here.
def compute_ratio_cut(A: sp.csr_matrix, z: np.array) -> float:
"""Compute the ratio cut for the given partition of the graph.
Parameters
----------
A : scipy.sparse.csr_matrix, shape [N, N]
Adjacency matrix of the graph.
z : np.array, shape [N]
Cluster indicators for each node.
Returns
-------
ratio_cut : float
Value of the cut for the given partition of the graph.
"""
##########################################################
# YOUR CODE HERE
cluster_lst = labels_to_list_of_clusters(z)
ratio_cut = 0.0
for cluster in cluster_lst:
cluster_all_edges = A[cluster,:]
cluster_inter_edges = cluster_all_edges[:,cluster]
ratio_cut += (np.sum(cluster_all_edges) - np.sum(cluster_inter_edges))/cluster.shape[0]
##########################################################
return ratio_cut
# ## 2.2. Compute normalized cut (5 pt)
# **Important**: if a cluster only contains a single node, define its volume to be 1 to avoid division by zero errors.
def compute_normalized_cut(A: sp.csr_matrix, z: np.array) -> float:
"""Compute the normalized cut for the given partition of the graph.
Parameters
----------
A : scipy.sparse.csr_matrix, shape [N, N]
Adjacency matrix of the graph.
z : np.array, shape [N]
Cluster indicators for each node.
Returns
-------
norm_cut : float
Value of the normalized cut for the given partition of the graph.
"""
##########################################################
# YOUR CODE HERE
cluster_lst = labels_to_list_of_clusters(z)
norm_cut = 0
for cluster in cluster_lst:
cluster_all_edges = A[cluster,:]
cluster_inter_edges = cluster_all_edges[:,cluster]
norm_cut += (np.sum(cluster_all_edges) - np.sum(cluster_inter_edges))/np.sum(cluster_all_edges)
##########################################################
return norm_cut
# Notice, how using the unnormalized Laplacian leads to a much better ratio cut, while the normalized Laplacian leads to better normalized cut.
num_clusters = 6
np.random.seed(12903)
norm_laplacian = False
z_unnorm = spectral_clustering(A, num_clusters, norm_laplacian)
print('When using L_unnorm:')
print(' ratio cut = {:.3f}'.format(compute_ratio_cut(A, z_unnorm)))
print(' normalized cut = {:.3f}'.format(compute_normalized_cut(A, z_unnorm)))
print(' sizes of partitions are: {}'.format([len(clust) for clust in labels_to_list_of_clusters(z_unnorm)]))
np.random.seed(12323)
norm_laplacian = True
z_norm = spectral_clustering(A, num_clusters, norm_laplacian)
print('When using L_norm:')
print(' ratio cut = {:.3f}'.format(compute_ratio_cut(A, z_norm)))
print(' normalized cut = {:.3f}'.format(compute_normalized_cut(A, z_norm)))
print(' sizes of partitions are: {}'.format([len(clust) for clust in labels_to_list_of_clusters(z_norm)]))
# # 3. Visualize the results (5 pt)
# + active=""
# # In the final part of the assignment, your task is to print out the 5 most popular types of restaurants visited by the users in each cluster.
# -
def print_top_categories_for_each_cluster(top_k: int, z: np.array, F: sp.csr_matrix, categories: List[str]):
"""Print the top-K categories among users in each cluster.
For each cluster, the function prints names of the top-K categories,
and number of users that like the respective category (separated by a comma).
The function doesn't return anything, just prints the output.
Parameters
----------
top_k : int
Number of most popular categories to print for each cluster.
z : np.array, shape [N]
Cluster labels.
F : sp.csr_matrix, shape [N, C]
Matrix that tells preferences of each user to each category.
F[i, c] = 1 if user i gave at least one positive review to at least one restaurant in category c.
categories : list, shape [C]
Names of the categories.
"""
##########################################################
# YOUR CODE HERE
cluster_lst = labels_to_list_of_clusters(z)
for cluster_idx, cluster in enumerate(cluster_lst):
print(f'Most popular categories in cluster {cluster_idx}')
cluster_pref = F[cluster].sum(axis=0)
indexes = cluster_pref.argsort()[::-1]
for i in range(top_k):
idx = indexes[i]
print(f' - {categories[idx]}, {int(cluster_pref[idx])}')
print('')
##########################################################
np.random.seed(23142)
z_norm = spectral_clustering(A, num_clusters, True)
r = print_top_categories_for_each_cluster(5, z_norm, F, categories)
| project_4/spectral_clustering.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: nlp_founder
# language: python
# name: nlp_founder
# ---
import pandas as pd
import datetime
import yaml
import os
import tweepy
# # Read in Data
acc_incub_general_info_df = pd.read_excel("../data/raw/CA_Accel-Incub-Seed_PitchBook/Company_General_Information.xlsx",header=6)
acc_incub_general_info_df.head(2)
# interesting columns= Company ID (primary key), Description, Company Name, HQ Post Code,
#Primary Industry Code, Primary Contact, Year Founded, Active Investors
# +
acc_incub_last_financing_df = pd.read_excel("../data/raw/CA_Accel-Incub-Seed_PitchBook/Last_Financing_Details.xlsx",header=6)
acc_incub_last_financing_df .head(2)
# interesting columns = Company ID ( primary key), Company Name, Growth Rate, Size Multiple, last financing date,
# last financing Size, Last financing valuation, Last Financing Deal Type 2
# Note : Only want series A or later, filter OUT the seed rounds
# -
acc_incub_company_financials_df = pd.read_excel("../data/raw/CA_Accel-Incub-Seed_PitchBook/Public_Company_Financials.xlsx",header=6)
acc_incub_company_financials_df.head(2)
# Interesting columns are NOTHING
acc_incub_social_web_df = pd.read_excel("../data/raw/CA_Accel-Incub-Seed_PitchBook/Social_and_Web_Presence.xlsx",header=6)
acc_incub_social_web_df.head(2)
# interesting columns = company id (primary key), company name, growth rate, size multiple, majestic referring domains
# facebook likes, Tiwtter followers, Employees, Total raised
# # Join Dataframes together
acc_incub_general_info_colDrop_df = acc_incub_general_info_df[["Company ID", "Description", "Company Name", "HQ Post Code", "Primary Industry Code",
"Primary Contact", "Year Founded", "Active Investors","HQ Location"]]
acc_incub_last_financing_colDrop_df =acc_incub_last_financing_df [["Company ID", "Growth Rate", "Size Multiple",
"Last Financing Date","Last Financing Size","Last Financing Valuation",
"Last Financing Deal Type 2 "]]
acc_incub_social_web_colDrop_df =acc_incub_social_web_df [["Company ID", "Growth Rate",
"Size Multiple", "Majestic Referring Domains",
"Facebook Likes", "Twitter Followers", "Employees", "Total Raised"]]
final_acc_incub_df = acc_incub_general_info_colDrop_df.merge(acc_incub_last_financing_colDrop_df, on='Company ID').merge(acc_incub_social_web_colDrop_df,
on='Company ID')
final_acc_incub_df.info()
# ## Drop Excess Columns
final_acc_incub_df .drop([
'Growth Rate_y','Size Multiple_y'],axis=1,inplace=True)
final_acc_incub_df.rename(columns={'Growth Rate_x':'Growth Rate',"Size Multiple_x":'Size Multiple',
"Company Name_x":"Company Name"},inplace=True) # rename cols
# # Filter out Series A-F Rounds
final_acc_incub_df['Last Financing Deal Type 2 '].unique()
final_acc_incub_filter_df= final_acc_incub_df.loc[(final_acc_incub_df['Last Financing Deal Type 2 ']!='Series A') &
(final_acc_incub_df['Last Financing Deal Type 2 ']!='Series AA') &
(final_acc_incub_df['Last Financing Deal Type 2 ']!='Series B') &
(final_acc_incub_df['Last Financing Deal Type 2 ']!='Series A1') &
(final_acc_incub_df['Last Financing Deal Type 2 ']!='Series 2') &
(final_acc_incub_df['Last Financing Deal Type 2 ']!='Series A2') &
(final_acc_incub_df['Last Financing Deal Type 2 ']!='Series B1') &
(final_acc_incub_df['Last Financing Deal Type 2 ']!='Series C') &
(final_acc_incub_df['Last Financing Deal Type 2 ']!='Series A3'),: ]
final_acc_incub_filter_df.info()
# # Drop companies missing the zip code, year founded, and primary contact
# - ALso drop if we don't have the last financing date. Need this to determine the 'runway' for each company
# - Also drop if don't have the last financing size. Need this to calculate 'runway"
# - Can't impute this
final_acc_incub_filter_df = final_acc_incub_filter_df.loc[
(final_acc_incub_filter_df['HQ Post Code'].isnull()==False) &
(final_acc_incub_filter_df['Year Founded'].isnull()==False) &
(final_acc_incub_filter_df['Primary Contact'].isnull()==False) &
(final_acc_incub_filter_df['Last Financing Date'].isnull()==False) &
(final_acc_incub_filter_df['Last Financing Size'].isnull()==False),: ]
final_acc_incub_filter_df.info()
# # Impute missing values
# - Growth Rate : impute median
# - Size Multiple: imput median
# - Last Financing Size : impute median
# - Last Financing Valuation: Drop this ( too many nulls)
# - Majestic Referring Domains: Impute Median
# - Facebook Like: Impute median
# - Twitter followers: impute emdian
# - Employees: impute median (even though we will use this to calculate VC invest = 0, 7 employees is conservative)
# - Total raised: impute median
final_acc_incub_filter_df.describe()
median_values={}
for row in final_acc_incub_filter_df.describe(): # get median values
if row =='Last Financing Valuation': # don't have enough data for this
pass
else:
median_values[row]=final_acc_incub_filter_df.describe()[row]["50%"]
median_values
imputed_final_df = final_acc_incub_filter_df.copy()
for key in median_values: # update the nan values with the median
updated_col = final_acc_incub_filter_df.loc[:,key].copy()
updated_col = updated_col.fillna(median_values[key])
imputed_final_df.loc[:,key] = updated_col
imputed_final_df.info()
# # Determine How many companies are past their 'runway'
# - Read in the median family income for all CA counties
# - Last Financing Size is based on millions of dollars
# - i.e. have spend more then their funding for the past number of months for the number of employees
# ### Load in median income for palces in california
ca_income = pd.read_csv("../data/raw/median_income_tables/ca_income_by_county.csv")
ca_income.median_household_income[ca_income.median_household_income!="[7"].describe()
# +
# For places without median income information, fill in with the median of CA
# -
ca_income.median_household_income = ca_income.median_household_income.replace("[7","$42,500")
ca_income.median_household_income = \
ca_income.median_household_income.apply( lambda x: int(x.strip("$").replace(",",""))) # conver to numbers
ca_income.median_household_income.describe()
# #### Find the number of days since the offer
# - Use the date the data was pulledd
date_pulled = datetime.datetime(2017,6,9)
imputed_final_df["days_since_offer"]= imputed_final_df["Last Financing Date"].apply(
lambda x: (date_pulled - x).days)
imputed_final_df["days_since_offer"].head()
# ### Determine the number of days of 'runway' that the last financing size gives the company
# - Based on the number of employees and median income of location
imputed_final_df.info()
t = ca_income[ca_income.Place=="Mountain View"]['median_household_income'].values
vc_invest = []
for row in imputed_final_df.iterrows():
row = row[1]
place = row['HQ Location'].split(",")[0]
state = row['HQ Location'].split(",")[1]
try:
# median year income
median_yr_income = int(ca_income[ca_income.Place==place]['median_household_income'].values)
daily_income = median_yr_income/365
daily_income_for_company = row['Employees'] * daily_income
#print(row['Employees'],"employees")
#print(daily_income_for_company ,'daily_income_for_company ')
required_funding_total = daily_income_for_company * (row['days_since_offer'])
#print(required_funding_total,'required_funding_total')
# funding so far in dollars (from last fundraising round)
funding_so_far = row['Last Financing Size']*1_000_000
#print(funding_so_far,'funding_so_far')
#print()
if required_funding_total > funding_so_far:
vc_invest.append(0)
else:
vc_invest.append('NaN')
except Exception as e:
if state == "CA":
# median year income
median_yr_income = int(ca_income[ca_income.Place==place]['median_household_income'].values[1])
daily_income = median_yr_income/365
daily_income_for_company = row['Employees'] * daily_income
required_funding_total = daily_income_for_company *row['days_since_offer']
funding_so_far = row['Last Financing Size']*1_000_000
if required_funding_total < funding_so_far:
vc_invest.append(0)
else:
vc_invest.append('NaN')
else:
vc_invest.append('NaN')
imputed_final_df['vc_invest']=vc_invest
imputed_final_df= imputed_final_df[imputed_final_df['vc_invest']!='NaN']
imputed_final_df.info()
# # Use Tweepy to find the usernames of founders
def username_search(name, company, state, c = 20):
"""Run a search on twitter for the given name. Returns the first username (should be the most relevant).
Looks to match a state location with the state locatio nof the company
First try searching for the person's name + company. If that does not work, try just searching for the
person's name.
Count is for the number of results to return. Default to twenty. If not in the first twenty results, probably not
the correct user"""
state = state.lower()
credentials = yaml.load(open(os.path.expanduser('~/.ssh/api_credentials.yml')))
auth = tweepy.OAuthHandler(credentials['twitter']['consumer_key'], credentials['twitter']['consumer_secret'],)
auth.set_access_token(credentials['twitter']['token'], credentials['twitter']['token_secret'])
api = tweepy.API(auth,wait_on_rate_limit=True, wait_on_rate_limit_notify=True)
try: # search the name and the company
tweets = api.search_users(q=str(name)+" "+str(company), count=c)
test = result[0].screen_name
screen_n = None
for result in tweets:
location = result.location.lower().split(" ") # see if the location is in the companies state
if state in location:
return result.screen_name
if screen_n == None:
return 'NaN'
else:
return screen_n
except Exception as e: # try just the name
try:
tweets = api.search_users(q=name, count = c)
screen_n = None
for result in tweets:
if state in result.location.lower().split(" "):
return result.screen_name
if screen_n == None:
return "NaN"
else:
return screen_n
except Exception as e:
return "NaN"
twitter_usernames_accel_incub_df = []
for idx,row in enumerate(imputed_final_df.iterrows()):
location = row[1]['HQ Location'].split(",")[1].strip(" ")
company = row[1]['Company Name']
founder = row[1]['Primary Contact']
twitter_usernames_accel_incub_df.append(username_search(founder,company, location ))
if idx%100 ==0:
print(f"Finished {idx/len(imputed_final_df)}")
imputed_final_df['Twitter_Username'] = twitter_usernames_accel_incub_df
# # Drop NaN Usernames
finished_acc_incub_df = imputed_final_df[
(imputed_final_df.Twitter_Username!='NaN') ]
finished_acc_incub_df .info()
finished_acc_incub_df.to_csv("../data/processed/PitchBook_CA_VCInvest=0.csv")
| notebooks/Explore_PitchBook_Accel-Incub_VCInvest=0_JH.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: alaska-ml
# language: python
# name: alaska-ml
# ---
# ## Code to compare original EQT performance to Seisbench EQT performance
#
# #### What I have done here is run the original EQT model from https://github.com/smousavi05/EQTransformer, as well as supposedly the "same" EQT model available through seisbench https://github.com/seisbench/seisbench, on the same day of data for a subset of the AACSE stations. In both scenarios, I bandpass filtered the data between 5-20 Hz prior to applying the models.
#
# #### This exercise was done as a "gut check" to ensure that the trained model available via seisbench is identical to the original model published by Mousavi et al. Based on the following results, it is not, and though the models show similar results, the original EQT from Mousavi et al. appears to be trained much more robustly.
import numpy as np
import pandas as pd
import shutil
import os
from zipfile import ZipFile
import glob
from datetime import datetime, timedelta
import matplotlib.pyplot as plt
import obspy
import glob2 as glob
# #### Pull in original EQT results from one day of data: 2019/05/27
#
# Do some manipulation to get the pick results into a format to compare to seisbench
base_dir = '/home/zkrauss/alaska-continuous/krauss-repo/aacse_detection_all/'
res_files = glob.glob(base_dir + '**/X*results.csv')
eqt = pd.concat([pd.read_csv(f) for f in res_files])
# Since P and S waves are save together, need to split them to compare--
eqt_p = eqt.drop(columns=['s_arrival_time','s_probability','s_uncertainty','s_snr'])
eqt_p=eqt_p.rename(columns={'p_arrival_time': "arrival_time", 'p_probability': "probability",'p_uncertainty':"uncertainty",'p_snr':"snr"})
eqt_p['arrival_time']=pd.to_datetime(eqt_p['arrival_time'],infer_datetime_format=True)
eqt_p = eqt_p.dropna(subset=['arrival_time'])
eqt_s = eqt.drop(columns=['p_arrival_time','p_probability','p_uncertainty','p_snr'])
eqt_s=eqt_s.rename(columns={'s_arrival_time': "arrival_time", 's_probability': "probability",'s_uncertainty':"uncertainty",'s_snr':"snr"})
eqt_s['arrival_time']=pd.to_datetime(eqt_s['arrival_time'],infer_datetime_format=True)
eqt_s = eqt_s.dropna(subset=['arrival_time'])
eqt_p['phase']=['P']*len(eqt_p)
eqt_s['phase']=['S']*len(eqt_s)
eqt = pd.concat([eqt_p,eqt_s])
eqt.head()
# #### Pull in seisbench results from the same day
seisb = pd.read_parquet('/home/zkrauss/lynx/picks_20190527_bp0520_overlapped.parquet')
seisb['timestamp']=pd.to_datetime(seisb['timestamp'],infer_datetime_format=True)
seisb.head()
# ### Compare the two! They SHOULD be exactly the same, IF the models are identical and the peak-picking postprocessing is identical.
#
# #### I already know that the peak-picking postprocessing is not the same, since for the seisbench picks I used the method employed by the authors of PhaseNet to pick peaks (https://github.com/wayneweiqiang/PhaseNet/blob/f119e28e8ebe9f4e7771cd7df8c1ec015aa09cbb/phasenet/postprocess.py), which is not used by EQTransformer.
#
# #### But, let's go ahead and see how much they differ.
# +
# Sort both dataframes by time:
eqt.sort_values(by=['arrival_time'],inplace=True)
seisb.sort_values(by=['timestamp'],inplace=True)
# Merge dataframes, only merging picks if they have matching station ID,
# phase type, and are within 0.1 s of each other
resid_max = 0.1
comp = pd.merge_asof(left=eqt,right=seisb,left_on=['arrival_time'],right_on=['timestamp'],left_by=['network','station','instrument_type','phase'],right_by=['network','station','channel','phase'],tolerance = pd.Timedelta(resid_max,'seconds'),direction='nearest')
# -
comp
# ### We see that the seisbench version got over twice as many picks:
print('EQTransformer got',str(len(eqt)),'picks for the day.')
print("Seisbench's version of EQTransformer got",str(len(seisb)),'picks for the day.')
# EQTransformer got 2198 picks for the day.
# Seisbench's version of EQTransformer got 4925 picks for the day.
# ### So, clearly, EQTransformer's method of peak picking does a much better job of filtering out picks which are likely erroneous. That's a solvable problem though, as long as the correct picks are the same between the two. How many of the picks that EQTransformer found are also found by Seisbench?
matched = comp.dropna(subset=['prob'])
print("Seisbench's version of EQT got only",str(len(matched)),'of the same picks as the original EQT.')
# Seisbench's version of EQT got only 649 of the same picks as the original EQT.
# ### OK, that is not good.
# ### Here's a rather messy but informative figure that allows us to see at a glance the number of picks throughout time made by both EQT and Seisbench, in relation to the probability assigned to the pick by both models.
#
# #### This figure shows that there is not a huge difference in performance between the two phase types, and also no clear trend in difference of probabilities between the two models.
# + tags=[]
eqtp_picks = eqt[eqt['phase']=='P']
eqts_picks = eqt[eqt['phase']=='S']
sbppicks = seisb[seisb['phase']=='P']
sbspicks = seisb[seisb['phase']=='S']
# Filter by probability?
fig, axs = plt.subplots(2,1, figsize=(20,10),sharex=True)
axs[0].scatter(sbppicks['timestamp'],sbppicks['prob'])
#axs[0].scatter(sbppicks['timestamp'],[0.5]*len(sbppicks))
axs[0].scatter(eqtp_picks['arrival_time'],eqtp_picks['probability'])
#axs[0].scatter(eqtp_picks['arrival_time'],[0.5]*len(eqtp_picks))
axs[0].set_xlabel('time')
axs[0].set_ylabel('pick probability')
axs[0].set_title('P-picks')
axs[0].legend(['Seisbench','EQT'])
axs[1].scatter(sbspicks['timestamp'],sbspicks['prob'])
#axs[1].scatter(sbspicks['timestamp'],[0.5]*len(sbspicks))
axs[1].scatter(eqts_picks['arrival_time'],eqts_picks['probability'])
#axs[1].scatter(eqts_picks['arrival_time'],[0.5]*len(eqts_picks))
axs[1].set_xlabel('time')
axs[1].set_ylabel('pick probability')
axs[1].set_title('S-picks')
axs[1].legend(['Seisbench','EQT'])
#axs[0].set_xlim([datetime(2019,5,27),datetime(2019,5,27,1)])
# -
# ## Okay, so obviously the peak-picking process is different. But are the probability curves of the annotated streams themselves different? That is what would be most concerning, since it would mean that the Seisbench EQT model was trained differently than the original Mousavi model. Let's plot up some examples to see.
import h5py
import obspy
from obspy.clients.fdsn import Client
import seisbench
import seisbench.models as sbm
client = Client("iris")
# #### Here, in our first example, the EQT and Seisbench probability curves look very similar. That is encouraging! However, we see that they are not identical-- the Seisbench P-wave probability curve has a small extra peak just before the P-arrival, and the S-wave peak has a different amplitude between the two models. Hmm.
# +
filename='/home/zkrauss/alaska-continuous/krauss-repo/aacse_detection_forfigs/EP22_outputs/prediction_probabilities.hdf5'
f = h5py.File(filename, 'r')
list(f.keys())
dset = f['2019-05-27 03:15:18.009998']
d_prob = dset['Earthquake'][:]
p_prob = dset['P_arrival'][:]
s_prob = dset['S_arrival'][:]
t1 = obspy.UTCDateTime('2019-05-27T03:15:18.009998')
t2 = t1 + 60
st = client.get_waveforms("XO", "EP22", "--", "HH*", t1, t2)
st.filter('bandpass',freqmin=5,freqmax=20)
# Load model
model = sbm.EQTransformer.from_pretrained("original")
model.default_args["blinding"] = (0,0)
sb = model.annotate(st)
fig = plt.figure(figsize=(15, 10))
axs = fig.subplots(3, 1, sharex=True, gridspec_kw={'hspace': 0})
axs[0].plot(st[0].times(),st[0].data)
axs[0].plot(st[1].times(),st[1].data)
axs[0].plot(st[2].times(),st[2].data)
axs[0].set_title('Bandpass filtered waveform')
axs[1].plot(sb[0].times(),sb[0].data)
axs[1].plot(sb[1].times(),sb[1].data)
axs[1].plot(sb[2].times(),sb[2].data)
axs[1].legend(['Detection','P-wave','S-wave'])
axs[1].set_title('Seisbench')
axs[1].set_ylabel('Probability')
axs[2].plot(sb[0].times()[0:6000],d_prob)
axs[2].plot(sb[1].times()[0:6000],p_prob)
axs[2].plot(sb[2].times()[0:6000],s_prob)
axs[2].set_title('EQTransformer')
axs[2].set_ylabel('Probability')
axs[2].set_xlabel('Time (s)')
# -
# #### This next example is quite concerning. EQT finds two earthquakes within the window, but Seisbench finds nothing.
# +
filename='/home/zkrauss/alaska-continuous/krauss-repo/aacse_detection_forfigs/KS11_outputs/prediction_probabilities.hdf5'
f = h5py.File(filename, 'r')
list(f.keys())
dset = f['2019-05-27 00:00:00.009996']
d_prob = dset['Earthquake'][:]
p_prob = dset['P_arrival'][:]
s_prob = dset['S_arrival'][:]
t1 = obspy.UTCDateTime('2019-05-27T00:00:00.009996')
t2 = t1 + 60
st = client.get_waveforms("XO", "KS11", "--", "HH*", t1, t2)
st.filter('bandpass',freqmin=5,freqmax=20)
# Load model
model = sbm.EQTransformer.from_pretrained("original")
model.default_args["blinding"] = (0,0)
sb = model.annotate(st)
fig = plt.figure(figsize=(15, 10))
axs = fig.subplots(3, 1, sharex=True, gridspec_kw={'hspace': 0})
axs[0].plot(st[0].times(),st[0].data)
axs[0].plot(st[1].times(),st[1].data)
axs[0].plot(st[2].times(),st[2].data)
axs[0].set_title('Bandpass filtered waveform')
axs[1].plot(sb[0].times(),sb[0].data)
axs[1].plot(sb[1].times(),sb[1].data)
axs[1].plot(sb[2].times(),sb[2].data)
axs[1].legend(['Detection','P-wave','S-wave'])
axs[1].set_title('Seisbench')
axs[2].plot(sb[0].times()[0:6000],d_prob)
axs[2].plot(sb[1].times()[0:6000],p_prob)
axs[2].plot(sb[2].times()[0:6000],s_prob)
axs[2].set_title('EQTransformer')
axs[2].set_ylabel('Probability')
axs[2].set_xlabel('Time (s)')
# -
# #### The following example shows that both models detect an earthquake, but only the EQT model finds both a P and S wave peak > 0.3 probability.
# +
filename='/home/zkrauss/alaska-continuous/krauss-repo/aacse_detection_forfigs/LA32_outputs/prediction_probabilities.hdf5'
f = h5py.File(filename, 'r')
list(f.keys())
dset = f['2019-05-27 13:11:41.999600']
d_prob = dset['Earthquake'][:]
p_prob = dset['P_arrival'][:]
s_prob = dset['S_arrival'][:]
t1 = obspy.UTCDateTime('2019-05-27T13:11:41.999600')
t2 = t1 + 61
st = client.get_waveforms("XO", "LA32", "--", "HH*", t1, t2)
st.filter('bandpass',freqmin=5,freqmax=20)
# Load model
model = sbm.EQTransformer.from_pretrained("original")
model.default_args["blinding"] = (0,0)
sb = model.annotate(st)
fig = plt.figure(figsize=(15, 10))
axs = fig.subplots(3, 1, sharex=True, gridspec_kw={'hspace': 0})
axs[0].plot(st[0].times(),st[0].data)
axs[0].plot(st[1].times(),st[1].data)
axs[0].plot(st[2].times(),st[2].data)
axs[0].set_title('Bandpass filtered waveform')
axs[1].plot(sb[0].times(),sb[0].data)
axs[1].plot(sb[1].times(),sb[1].data)
axs[1].plot(sb[2].times(),sb[2].data)
axs[1].legend(['Detection','P-wave','S-wave'])
axs[1].set_title('Seisbench')
axs[2].plot(sb[0].times()[0:6000],d_prob)
axs[2].plot(sb[1].times()[0:6000],p_prob)
axs[2].plot(sb[2].times()[0:6000],s_prob)
axs[2].set_title('EQTransformer')
axs[2].set_ylabel('Probability')
axs[2].set_xlabel('Time (s)')
# -
# #### And the following examples just show more of the same. Yikes! The Seisbench model is certainly not the same as the original EQT model, and from these first glance examples, performs not nearly as well!
# +
filename='/home/zkrauss/alaska-continuous/krauss-repo/aacse_detection_forfigs/LA32_outputs/prediction_probabilities.hdf5'
f = h5py.File(filename, 'r')
list(f.keys())
dset = f['2019-05-27 15:09:17.999600']
d_prob = dset['Earthquake'][:]
p_prob = dset['P_arrival'][:]
s_prob = dset['S_arrival'][:]
t1 = obspy.UTCDateTime('2019-05-27T15:09:17.999600')
t2 = t1 + 61
st = client.get_waveforms("XO", "LA32", "--", "HH*", t1, t2)
st.filter('bandpass',freqmin=5,freqmax=20)
# Load model
model = sbm.EQTransformer.from_pretrained("original")
model.default_args["blinding"] = (0,0)
sb = model.annotate(st)
fig = plt.figure(figsize=(15, 10))
axs = fig.subplots(3, 1, sharex=True, gridspec_kw={'hspace': 0})
axs[0].plot(st[0].times(),st[0].data)
axs[0].plot(st[1].times(),st[1].data)
axs[0].plot(st[2].times(),st[2].data)
axs[0].set_title('Bandpass filtered waveform')
axs[1].plot(sb[0].times(),sb[0].data)
axs[1].plot(sb[1].times(),sb[1].data)
axs[1].plot(sb[2].times(),sb[2].data)
axs[1].legend(['Detection','P-wave','S-wave'])
axs[1].set_title('Seisbench')
axs[2].plot(sb[0].times()[0:6000],d_prob)
axs[2].plot(sb[1].times()[0:6000],p_prob)
axs[2].plot(sb[2].times()[0:6000],s_prob)
axs[2].set_title('EQTransformer')
# +
filename='/home/zkrauss/alaska-continuous/krauss-repo/aacse_detection_forfigs/WD63_outputs/prediction_probabilities.hdf5'
f = h5py.File(filename, 'r')
list(f.keys())
dset = f['2019-05-27 01:01:36.003000']
d_prob = dset['Earthquake'][:]
p_prob = dset['P_arrival'][:]
s_prob = dset['S_arrival'][:]
t1 = obspy.UTCDateTime('2019-05-27T01:01:36.003000')
t2 = t1 + 61
st = client.get_waveforms("XO", "WD63", "--", "HH*", t1, t2)
st.filter('bandpass',freqmin=5,freqmax=20)
# Load model
model = sbm.EQTransformer.from_pretrained("original")
model.default_args["blinding"] = (0,0)
sb = model.annotate(st)
fig = plt.figure(figsize=(15, 10))
axs = fig.subplots(3, 1, sharex=True, gridspec_kw={'hspace': 0})
axs[0].plot(st[0].times(),st[0].data)
axs[0].plot(st[1].times(),st[1].data)
axs[0].plot(st[2].times(),st[2].data)
axs[1].plot(sb[0].times(),sb[0].data)
axs[1].plot(sb[1].times(),sb[1].data)
axs[1].plot(sb[2].times(),sb[2].data)
axs[1].legend(['Detection','P-wave','S-wave'])
axs[1].set_title('Seisbench')
axs[2].plot(sb[0].times()[0:6000],d_prob)
axs[2].plot(sb[1].times()[0:6000],p_prob)
axs[2].plot(sb[2].times()[0:6000],s_prob)
axs[2].set_title('EQTransformer')
# +
filename='/home/zkrauss/alaska-continuous/krauss-repo/aacse_detection_forfigs/WS71_outputs/prediction_probabilities.hdf5'
f = h5py.File(filename, 'r')
list(f.keys())
dset = f['2019-05-27 05:05:54.009800']
d_prob = dset['Earthquake'][:]
p_prob = dset['P_arrival'][:]
s_prob = dset['S_arrival'][:]
t1 = obspy.UTCDateTime('2019-05-27T05:05:54.009800')
t2 = t1 + 61
st = client.get_waveforms("XO", "WS71", "--", "HH*", t1, t2)
st.filter('bandpass',freqmin=5,freqmax=20)
# Load model
model = sbm.EQTransformer.from_pretrained("original")
model.default_args["blinding"] = (0,0)
sb = model.annotate(st)
fig = plt.figure(figsize=(15, 10))
axs = fig.subplots(3, 1, sharex=True, gridspec_kw={'hspace': 0})
axs[0].plot(st[0].times(),st[0].data)
axs[0].plot(st[1].times(),st[1].data)
axs[0].plot(st[2].times(),st[2].data)
axs[0].set_title('Bandpass filtered waveform')
axs[1].plot(sb[0].times(),sb[0].data)
axs[1].plot(sb[1].times(),sb[1].data)
axs[1].plot(sb[2].times(),sb[2].data)
axs[1].legend(['Detection','P-wave','S-wave'])
axs[1].set_title('Seisbench')
axs[2].plot(sb[0].times()[0:6000],d_prob)
axs[2].plot(sb[1].times()[0:6000],p_prob)
axs[2].plot(sb[2].times()[0:6000],s_prob)
axs[2].set_title('EQTransformer')
# +
filename='/home/zkrauss/alaska-continuous/krauss-repo/aacse_detection_forfigs/WS71_outputs/prediction_probabilities.hdf5'
f = h5py.File(filename, 'r')
list(f.keys())
dset = f['2019-05-27 18:07:48.009800']
d_prob = dset['Earthquake'][:]
p_prob = dset['P_arrival'][:]
s_prob = dset['S_arrival'][:]
t1 = obspy.UTCDateTime('2019-05-27T18:07:48.009800')
t2 = t1 + 61
st = client.get_waveforms("XO", "WS71", "--", "HH*", t1, t2)
st.filter('bandpass',freqmin=5,freqmax=20)
# Load model
model = sbm.EQTransformer.from_pretrained("original")
model.default_args["blinding"] = (0,0)
sb = model.annotate(st)
fig = plt.figure(figsize=(15, 10))
axs = fig.subplots(3, 1, sharex=True, gridspec_kw={'hspace': 0})
axs[0].plot(st[0].times(),st[0].data)
axs[0].plot(st[1].times(),st[1].data)
axs[0].plot(st[2].times(),st[2].data)
axs[0].set_title('Bandpass filtered waveform')
axs[1].plot(sb[0].times(),sb[0].data)
axs[1].plot(sb[1].times(),sb[1].data)
axs[1].plot(sb[2].times(),sb[2].data)
axs[1].legend(['Detection','P-wave','S-wave'])
axs[1].set_title('Seisbench')
axs[2].plot(sb[0].times()[0:6000],d_prob)
axs[2].plot(sb[1].times()[0:6000],p_prob)
axs[2].plot(sb[2].times()[0:6000],s_prob)
axs[2].set_title('EQTransformer')
# -
| compare_eqt_seisbench.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Tutorial 2: Bird migration analysis
#
# <img align="right" src="https://anitagraser.github.io/movingpandas/pics/movingpandas.png">
#
# This tutorial uses data published on Movebank, specifically: [Navigation experiments in lesser black-backed gulls (data from Wikelski et al. 2015)-gps.csv](https://www.datarepository.movebank.org/handle/10255/move.494)
#
# This tutorial covers:
# 1. Trajectory data preprocessing
# 1. Loading movement data from common geospatial file formats
# 1. Exploring spatial & non-spatial data distributions
# 1. Converting GeoDataFrames into Trajectories describing continuous tracks of moving objects
# 1. Trajectory data analysis
# 1. Investigating individual trajectories
# 1. Comparing different years
# 1. Investigating trajectories of multiple individuals
# %matplotlib inline
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:100% !important; }</style>"))
# +
import urllib
import os
import pandas as pd
from geopandas import GeoDataFrame, read_file
from shapely.geometry import Point, LineString, Polygon
from datetime import datetime, timedelta
import matplotlib.pyplot as plt
import sys
sys.path.append("..")
import movingpandas as mpd
import warnings
warnings.simplefilter("ignore")
# -
# ## Loading the bird movement data
#
# %%time
df = read_file('data/demodata_gulls.gpkg')
wgs84 = df.crs
df['t'] = pd.to_datetime(df['timestamp'])
df = df.set_index('t')
print("Finished reading {}".format(len(df)))
# This is what the data looks like:
df.head()
df.plot()
# Let's see how many individuals we have in the dataset:
df['individual-local-identifier'].unique()
# The records per individual are not evenly distributed:
df['individual-local-identifier'].value_counts().plot(kind='bar', figsize=(17,3))
# Finally, let's create trajectories:
# +
MIN_LENGTH = 100 # meters
traj_collection = mpd.TrajectoryCollection(df, 'individual-local-identifier', min_length=MIN_LENGTH)
all_trajectories = traj_collection.trajectories
print("Finished creating {} trajectories".format(len(all_trajectories)))
# -
# ## Investigating individual trajectories
# Let's pick out a specific individual. For example, '91916A' is the individual with most records in our dataset:
filtered = traj_collection.filter('individual-local-identifier', '91916A')
my_traj = filtered.trajectories[0].copy()
my_traj.df.head()
my_traj.hvplot(line_width=2)
# This individual has been travelling back and forth for quite a few years!
#
# One way to take a closer look at this individual's travels is to split the overall track into yearly trips:
trips_by_year = mpd.TemporalSplitter(filtered).split(mode='year')
for trip in trips_by_year:
print(trip.id)
# Now we can explore individual years:
one_year = trips_by_year.get_trajectory('91916A_2010-12-31 00:00:00')
print(one_year)
one_year.hvplot(line_width=5.0, c='speed', cmap='RdYlGn', colorbar=True, clim=(0,15))
# Let's see where this individual was on a specific day:
def plot_location_at_timestamp(traj, t, fig_size=300):
loc = GeoDataFrame([traj.get_row_at(t)])
return (loc.hvplot(geo=True, tiles='OSM', size=200, color='red', width=fig_size, height=fig_size) *
traj.hvplot(line_width=1.0, color='black', tiles=False, width=fig_size, height=fig_size))
( plot_location_at_timestamp(one_year, datetime(2010,9,1)) +
plot_location_at_timestamp(one_year, datetime(2010,10,1)) +
plot_location_at_timestamp(one_year, datetime(2010,11,1)) )
# Of course, it might also be of interest to see the different locations on a certain day each year:
# +
def plot_location_at_day_of_year(traj, month, day, ax=None):
ts = [datetime(year, month, day) for year in traj.df.index.year.unique()]
return plot_locations_at_timestamps(traj, ts, ax=ax)
def plot_locations_at_timestamps(traj, ts, ax=None):
loc = GeoDataFrame([traj.get_row_at(t) for t in ts])
loc['date_label'] = loc.index.strftime('%Y-%m-%d')
return (loc.hvplot(c='date_label', size=200, geo=True, tiles='OSM') *
traj.hvplot(line_width=1.0, color='black', geo=True, tiles=False) )
# -
plot_location_at_day_of_year(my_traj, month=10, day=1)
# It's pretty clear that this individual does not follow the same schedule and route every year. However, it seems to always be heading to the same area Red Sea coast to spend the winter there.
#
# Let's find its arrival times in this area:
area_of_interest = Polygon([(30, 25), (50, 25), (50, 15), (30, 15), (30, 25)])
plotted_area_of_interest = GeoDataFrame(pd.DataFrame([{'geometry': area_of_interest, 'id': 1}]), crs=wgs84).hvplot(geo=True, color='yellow', alpha=0.5)
# +
arrivals = [traj for traj in my_traj.clip(area_of_interest)]
print("Found {} arrivals".format(len(arrivals)))
for traj in arrivals:
print("Individual '{}' arrived at {}".format(traj.df['individual-local-identifier'].iloc[0], traj.get_start_time()))
# -
( plot_locations_at_timestamps(my_traj, [traj.get_start_time() for traj in arrivals]) * plotted_area_of_interest )
# ## Investigating trajectories of multiple individuals
#
# Multiple individuals travel to this area every year. Let's have a closer look:
def get_trajectories_by_year(trajs, year):
result = []
for traj in trajs:
if traj.get_start_time().year <= year_of_interest and traj.get_end_time().year >= year_of_interest:
result.append(traj)
return result
# +
year_of_interest = 2010
arrivals = [ traj.clip(area_of_interest) for traj in all_trajectories ] # list of lists
arrivals = [ item for sublist in arrivals for item in sublist ] # flat list
relevant = get_trajectories_by_year(arrivals, year_of_interest)
print("Found {} arrivals".format(len(relevant)))
# -
for traj in relevant:
print("Individual '{}' arrived at {} (duration: {})".format(
traj.df['individual-local-identifier'].iloc[0], traj.get_start_time().date(),
traj.get_end_time()-traj.get_start_time()))
# Based on the duration of the individuals' trajectory segments within our area of interest, it looks like some individuals spend the winter here while others only pass through.
#
# For example, Individual '91761A' passed through twice? What has it been up to?
# +
def get_individual_traj_for_year(trajs, id, year):
individual = traj_collection.get_trajectory(id)
return individual.get_segment_between(datetime(year,1,1), datetime(year,12,31))
plotted_trajectory = get_individual_traj_for_year(all_trajectories, '91761A', year_of_interest).hvplot(color='black', line_width=1.0)
( plotted_trajectory * plotted_area_of_interest )
# -
# Turns out that this individual does not stay at the Red Sea but continues its journey into Africa.
# ## Continue exploring MovingPandas
#
# * [Tutorial 3: Horse collar data](3_horse_collar.ipynb)
| tutorials/2_bird_migration_analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/waelCh/Q-Learning_Parking-Space-Search/blob/master/Parking_exercice_(Q_learning).ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="ESt-lv4gbP6v" colab_type="code" colab={}
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
from tqdm import trange
from random import randint, uniform, random
matplotlib.use('Agg')
random.seed(42)
# + id="TLEUsnDFpiNK" colab_type="code" colab={}
# Actions definition
GARER = 'g'
CONTINUER = 'c'
ACTIONS = [GARER, CONTINUER]
# + id="S8tR53YKsvo_" colab_type="code" colab={}
class Parking:
def __init__(self, car_position, parking_size=100):
self.epsilon = epsilon
self.parking_size = parking_size
# Generate random positions' state in the parking(0=free, 1=reserved)
self.parking_space = list([randint(0, 1) for i in range(self.parking_size)])
def reset(self):
self.parking_space = list([randint(0, 1) for i in range(self.parking_size)])
def place_availability(self, position):
return self.parking_space[position]
# + id="T9-rm6oavFZt" colab_type="code" colab={}
class Car:
def __init__(self, epsilon=0):
self.epsilon = epsilon
q_table = dict()
print("Creating new Car")
def q(self, state, action):
if state not in q_table:
q_table[state] = np.zeros(len(ACTIONS))
if action is None:
return q_table[state]
return q_table[state][action]
def choose_action(state):
if (uniform(0,1) < self.epsilon):
return random.choice(ACTIONS)
return np.argmax(q(state))
def act(self, state, action):
return State()
# + id="FthhvetO7AdZ" colab_type="code" colab={}
tt = dict()
# + id="d7cwhn2s7DeF" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="741ebbb8-2def-45c0-ccf8-ee3b8df26b83"
tt
| Parking_exercice_(Q_learning).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <small><small><i>
# All of these python notebooks are available at [https://gitlab.erc.monash.edu.au/andrease/Python4Maths.git]
# </i></small></small>
# # Functions
# Functions can represent mathematical functions. More importantly, in programmming functions are a mechansim to allow code to be re-used so that complex programs can be built up out of simpler parts.
# This is the basic syntax of a function
#
# ```python
# def funcname(arg1, arg2,... argN):
# ''' Document String'''
# statements
# return <value>```
# Read the above syntax as, A function by name "funcname" is defined, which accepts arguements "arg1,arg2,....argN". The function is documented and it is '''Document String'''. The function after executing the statements returns a "value".
#
# Return values are optional (by default every function returns **None** if no return statement is executed)
# + jupyter={"outputs_hidden": false}
print("Hello Jack.")
print("Jack, how are you?")
# -
# Instead of writing the above two statements every single time it can be replaced by defining a function which would do the job in just one line.
#
# Defining a function firstfunc().
# + jupyter={"outputs_hidden": false}
def firstfunc():
print("Hello Jack.")
print("Jack, how are you?")
firstfunc() # execute the function
# -
# **firstfunc()** every time just prints the message to a single person. We can make our function **firstfunc()** to accept arguements which will store the name and then prints respective to that accepted name. To do so, add a argument within the function as shown.
def firstfunc(username):
print("Hello %s." % username)
print(username + ',' ,"how are you?")
# + jupyter={"outputs_hidden": false}
name1 = 'sally' # or use input('Please enter your name : ')
# -
# So we pass this variable to the function **firstfunc()** as the variable username because that is the variable that is defined for this function. i.e name1 is passed as username.
# + jupyter={"outputs_hidden": false}
firstfunc(name1)
# -
# ## Return Statement
# When the function results in some value and that value has to be stored in a variable or needs to be sent back or returned for further operation to the main algorithm, a return statement is used.
def times(x,y):
z = x*y
return z
# The above defined **times( )** function accepts two arguements and return the variable z which contains the result of the product of the two arguements
# + jupyter={"outputs_hidden": false}
c = times(4,5)
print(c)
# -
# The z value is stored in variable c and can be used for further operations.
# Instead of declaring another variable the entire statement itself can be used in the return statement as shown.
# + jupyter={"outputs_hidden": false}
def times(x,y):
'''This multiplies the two input arguments'''
return x*y
# + jupyter={"outputs_hidden": false}
c = times(4,5)
print(c)
# -
# Since the **times( )** is now defined, we can document it as shown above. This document is returned whenever **times( )** function is called under **help( )** function.
# + jupyter={"outputs_hidden": false}
help(times)
# -
# Multiple variable can also be returned as a tuple. However this tends not to be very readable when returning many value, and can easily introduce errors when the order of return values is interpreted incorrectly.
eglist = [10,50,30,12,6,8,100]
def egfunc(eglist):
highest = max(eglist)
lowest = min(eglist)
first = eglist[0]
last = eglist[-1]
return highest,lowest,first,last
# If the function is just called without any variable for it to be assigned to, the result is returned inside a tuple. But if the variables are mentioned then the result is assigned to the variable in a particular order which is declared in the return statement.
# + jupyter={"outputs_hidden": false}
egfunc(eglist)
# + jupyter={"outputs_hidden": false}
a,b,c,d = egfunc(eglist)
print(' a =',a,' b =',b,' c =',c,' d =',d)
# -
# ## Default arguments
# When an argument of a function is common in majority of the cases this can be specified with a default value. This is also called an implicit argument.
def implicitadd(x,y=3,z=0):
print("%d + %d + %d = %d"%(x,y,z,x+y+z))
return x+y+z
# **implicitadd( )** is a function accepts up to three arguments but most of the times the first argument needs to be added just by 3. Hence the second argument is assigned the value 3 and the third argument is zero. Here the last two arguments are default arguments.
# Now if the second argument is not defined when calling the **implicitadd( )** function then it considered as 3.
# + jupyter={"outputs_hidden": false}
implicitadd(4)
# -
# However we can call the same function with two or three arguments. A useful feature is to explicitly name the argument values being passed into the function. This gives great flexibility in how to call a function with optional arguments. All off the following are valid:
# + jupyter={"outputs_hidden": false}
implicitadd(4,4)
implicitadd(4,5,6)
implicitadd(4,z=7)
implicitadd(2,y=1,z=9)
implicitadd(x=1)
# -
# ## Any number of arguments
# If the number of arguments that is to be accepted by a function is not known then a asterisk symbol is used before the name of the argument to hold the remainder of the arguments. The following function requires at least one argument but can have many more.
def add_n(first,*args):
"return the sum of one or more numbers"
reslist = [first] + [value for value in args]
print(reslist)
return sum(reslist)
# The above function defines a list of all of the arguments, prints the list and returns the sum of all of the arguments.
# + jupyter={"outputs_hidden": false}
add_n(1,2,3,4,5)
# + jupyter={"outputs_hidden": false}
add_n(6.5)
# -
# Arbitrary numbers of named arguments can also be accepted using `**`. When the function is called all of the additional named arguments are provided in a dictionary
# + jupyter={"outputs_hidden": false}
def namedArgs(**names):
'print the named arguments'
# names is a dictionary of keyword : value
print(" ".join(name+"="+str(value)
for name,value in names.items()))
namedArgs(x=3*4,animal='mouse',z=(1+2j))
# -
# ## Global and Local Variables
# Whatever variable is declared inside a function is local variable and outside the function in global variable.
# + jupyter={"outputs_hidden": false}
eg1 = [1,2,3,4,5]
# -
# In the below function we are appending a element to the declared list inside the function. eg2 variable declared inside the function is a local variable.
def egfunc1():
x=1
def thirdfunc():
x=2
print("Inside thirdfunc x =", x)
thirdfunc()
print("Outside x =", x)
# + jupyter={"outputs_hidden": false}
egfunc1()
# -
# If a **global** variable is defined as shown in the example below then that variable can be called from anywhere. Global values should be used sparingly as they make functions harder to re-use.
# + jupyter={"outputs_hidden": false}
eg3 = [1,2,3,4,5]
# -
def egfunc1():
x = 1.0 # local variable for egfunc1
def thirdfunc():
global x # globally defined variable
x = 2.0
print("Inside thirdfunc x =", x)
thirdfunc()
print("Outside x =", x)
# + jupyter={"outputs_hidden": false}
egfunc1()
print("Globally defined x =",x)
# -
# ## Lambda Functions
# These are small functions which are not defined with any name and carry a single expression whose result is returned. Lambda functions comes very handy when operating with lists. These function are defined by the keyword **lambda** followed by the variables, a colon and the respective expression.
z = lambda x: x * x
# + jupyter={"outputs_hidden": false}
z(8)
# -
# ### Composing functions
# Lambda functions can also be used to compose functions
# + jupyter={"outputs_hidden": false}
def double(x):
return 2*x
def square(x):
return x*x
def f_of_g(f,g):
"Compose two functions of a single variable"
return lambda x: f(g(x))
doublesquare= f_of_g(double,square)
print("doublesquare is a",type(doublesquare))
doublesquare(3)
| 001-Jupyter/001-Tutorials/005-Python4Maths/06.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Fermi-LAT data with Gammapy
#
# ## Introduction
#
# This tutorial will show you how to work with Fermi-LAT data with Gammapy. As an example, we will look at the Galactic center region using the high-energy dataset that was used for the 3FHL catalog, in the energy range 10 GeV to 2 TeV.
#
# We note that support for Fermi-LAT data analysis in Gammapy is very limited. For most tasks, we recommend you use
# [Fermipy](http://fermipy.readthedocs.io/), which is based on the [Fermi Science Tools](https://fermi.gsfc.nasa.gov/ssc/data/analysis/software/) (Fermi ST).
#
# Using Gammapy with Fermi-LAT data could be an option for you if you want to do an analysis that is not easily possible with Fermipy and the Fermi Science Tools. For example a joint likelihood fit of Fermi-LAT data with data e.g. from H.E.S.S., MAGIC, VERITAS or some other instrument, or analysis of Fermi-LAT data with a complex spatial or spectral model that is not available in Fermipy or the Fermi ST.
#
# Besides Gammapy, you might want to look at are [Sherpa](http://cxc.harvard.edu/sherpa/) or [3ML](https://threeml.readthedocs.io/). Or just using Python to roll your own analyis using several existing analysis packages. E.g. it it possible to use Fermipy and the Fermi ST to evaluate the likelihood on Fermi-LAT data, and Gammapy to evaluate it e.g. for IACT data, and to do a joint likelihood fit using e.g. [iminuit](http://iminuit.readthedocs.io/) or [emcee](http://dfm.io/emcee).
#
# To use Fermi-LAT data with Gammapy, you first have to use the Fermi ST to prepare an event list (using ``gtselect`` and ``gtmktime``, exposure cube (using ``gtexpcube2`` and PSF (using ``gtpsf``). You can then use `~gammapy.data.EventList`, `~gammapy.maps` and the `~gammapy.irf.EnergyDependentTablePSF` to read the Fermi-LAT maps and PSF, i.e. support for these high-level analysis products from the Fermi ST is built in. To do a 3D map analyis, you can use Fit for Fermi-LAT data in the same way that it's use for IACT data. This is illustrated in this notebook. A 1D region-based spectral analysis is also possible, this will be illustrated in a future tutorial.
#
# ## Setup
#
# **IMPORTANT**: For this notebook you have to get the prepared ``3fhl`` dataset provided in your $GAMMAPY_DATA.
#
# Note that the ``3fhl`` dataset is high-energy only, ranging from 10 GeV to 2 TeV.
# Check that you have the prepared Fermi-LAT dataset
# We will use diffuse models from here
# !ls -1 $GAMMAPY_DATA/fermi_3fhl
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
from astropy import units as u
from astropy.coordinates import SkyCoord
from gammapy.data import EventList
from gammapy.datasets import MapDataset
from gammapy.datasets.map import MapEvaluator
from gammapy.irf import EnergyDependentTablePSF, PSFMap, EDispMap
from gammapy.maps import Map, MapAxis, WcsNDMap, WcsGeom
from gammapy.modeling.models import (
PowerLawSpectralModel,
PointSpatialModel,
SkyModel,
SkyDiffuseCube,
Models,
create_fermi_isotropic_diffuse_model,
BackgroundModel,
)
from gammapy.modeling import Fit
# ## Events
#
# To load up the Fermi-LAT event list, use the `~gammapy.data.EventList` class:
events = EventList.read(
"$GAMMAPY_DATA/fermi_3fhl/fermi_3fhl_events_selected.fits.gz"
)
print(events)
# The event data is stored in a [astropy.table.Table](http://docs.astropy.org/en/stable/api/astropy.table.Table.html) object. In case of the Fermi-LAT event list this contains all the additional information on positon, zenith angle, earth azimuth angle, event class, event type etc.
events.table.colnames
events.table[:5][["ENERGY", "RA", "DEC"]]
print(events.time[0].iso)
print(events.time[-1].iso)
energy = events.energy
energy.info("stats")
# As a short analysis example we will count the number of events above a certain minimum energy:
for e_min in [10, 100, 1000] * u.GeV:
n = (events.energy > e_min).sum()
print(f"Events above {e_min:4.0f}: {n:5.0f}")
# ## Counts
#
# Let us start to prepare things for an 3D map analysis of the Galactic center region with Gammapy. The first thing we do is to define the map geometry. We chose a TAN projection centered on position ``(glon, glat) = (0, 0)`` with pixel size 0.1 deg, and four energy bins.
gc_pos = SkyCoord(0, 0, unit="deg", frame="galactic")
energy_axis = MapAxis.from_edges(
[1e4, 3e4, 1e5, 3e5, 2e6], name="energy", unit="MeV", interp="log"
)
counts = Map.create(
skydir=gc_pos,
npix=(100, 80),
proj="TAN",
frame="galactic",
binsz=0.1,
axes=[energy_axis],
dtype=float,
)
# We put this call into the same Jupyter cell as the Map.create
# because otherwise we could accidentally fill the counts
# multiple times when executing the ``fill_by_coord`` multiple times.
counts.fill_by_coord({"skycoord": events.radec, "energy": events.energy})
counts.geom.axes[0]
counts.sum_over_axes().smooth(2).plot(stretch="sqrt", vmax=30);
# ## Exposure
#
# The Fermi-LAT datatset contains the energy-dependent exposure for the whole sky as a HEALPix map computed with ``gtexpcube2``. This format is supported by `~gammapy.maps` directly.
#
# Interpolating the exposure cube from the Fermi ST to get an exposure cube matching the spatial geometry and energy axis defined above with Gammapy is easy. The only point to watch out for is how exactly you want the energy axis and binning handled.
#
# Below we just use the default behaviour, which is linear interpolation in energy on the original exposure cube. Probably log interpolation would be better, but it doesn't matter much here, because the energy binning is fine. Finally, we just copy the counts map geometry, which contains an energy axis with `node_type="edges"`. This is non-ideal for exposure cubes, but again, acceptable because exposure doesn't vary much from bin to bin, so the exact way interpolation occurs in later use of that exposure cube doesn't matter a lot. Of course you could define any energy axis for your exposure cube that you like.
exposure_hpx = Map.read(
"$GAMMAPY_DATA/fermi_3fhl/fermi_3fhl_exposure_cube_hpx.fits.gz"
)
# Unit is not stored in the file, set it manually
exposure_hpx.unit = "cm2 s"
print(exposure_hpx.geom)
print(exposure_hpx.geom.axes[0])
exposure_hpx.plot();
# +
# For exposure, we choose a geometry with node_type='center',
# whereas for counts it was node_type='edge'
axis = MapAxis.from_nodes(
counts.geom.axes[0].center, name="energy_true", unit="MeV", interp="log"
)
geom = WcsGeom(wcs=counts.geom.wcs, npix=counts.geom.npix, axes=[axis])
coord = geom.get_coord()
data = exposure_hpx.interp_by_coord(coord)
# -
exposure = WcsNDMap(geom, data, unit=exposure_hpx.unit, dtype=float)
print(exposure.geom)
print(exposure.geom.axes[0])
# Exposure is almost constant accross the field of view
exposure.slice_by_idx({"energy_true": 0}).plot(add_cbar=True);
# Exposure varies very little with energy at these high energies
energy = [10, 100, 1000] * u.GeV
exposure.get_by_coord({"skycoord": gc_pos, "energy_true": energy})
# ## Galactic diffuse background
# The Fermi-LAT collaboration provides a galactic diffuse emission model, that can be used as a background model for
# Fermi-LAT source analysis.
#
# Diffuse model maps are very large (100s of MB), so as an example here, we just load one that represents a small cutout for the Galactic center region.
# +
diffuse_galactic_fermi = Map.read(
"$GAMMAPY_DATA/fermi_3fhl/gll_iem_v06_cutout.fits"
)
# Unit is not stored in the file, set it manually
diffuse_galactic_fermi.unit = "cm-2 s-1 MeV-1 sr-1"
print(diffuse_galactic_fermi.geom)
print(diffuse_galactic_fermi.geom.axes[0])
# +
# Interpolate the diffuse emission model onto the counts geometry
# The resolution of `diffuse_galactic_fermi` is low: bin size = 0.5 deg
# We use ``interp=3`` which means cubic spline interpolation
coord = counts.geom.get_coord()
data = diffuse_galactic_fermi.interp_by_coord(
{"skycoord": coord.skycoord, "energy": coord["energy"]}, interp=3
)
diffuse_galactic = WcsNDMap(
exposure.geom, data, unit=diffuse_galactic_fermi.unit
)
print(diffuse_galactic.geom)
print(diffuse_galactic.geom.axes[0])
# -
diffuse_gal = SkyDiffuseCube(diffuse_galactic, name="diffuse-gal")
# Let's look at the map of first energy band of the cube:
diffuse_gal.map.slice_by_idx({"energy_true": 0}).plot(add_cbar=True);
# Here is the spectrum at the Glaactic center:
# Exposure varies very little with energy at these high energies
energy = np.logspace(1, 3, 10) * u.GeV
dnde = diffuse_gal.map.interp_by_coord(
{"skycoord": gc_pos, "energy_true": energy},
interp="linear",
fill_value=None,
)
plt.plot(energy.value, dnde, "+")
plt.loglog()
plt.xlabel("Energy (GeV)")
plt.ylabel("Flux (cm-2 s-1 MeV-1 sr-1)")
# +
# TODO: show how one can fix the extrapolate to high energy
# by computing and padding an extra plane e.g. at 1e3 TeV
# that corresponds to a linear extrapolation
# -
# ## Isotropic diffuse background
#
# To load the isotropic diffuse model with Gammapy, use the `~gammapy.modeling.models.TemplateSpectralModel`. We are using `'fill_value': 'extrapolate'` to extrapolate the model above 500 GeV:
# +
filename = "$GAMMAPY_DATA/fermi_3fhl/iso_P8R2_SOURCE_V6_v06.txt"
diffuse_iso = create_fermi_isotropic_diffuse_model(
filename=filename, interp_kwargs={"fill_value": None}
)
# -
# We can plot the model in the energy range between 50 GeV and 2000 GeV:
erange = [50, 2000] * u.GeV
diffuse_iso.spectral_model.plot(erange, flux_unit="1 / (cm2 MeV s)");
# ## PSF
#
# Next we will tke a look at the PSF. It was computed using ``gtpsf``, in this case for the Galactic center position. Note that generally for Fermi-LAT, the PSF only varies little within a given regions of the sky, especially at high energies like what we have here. We use the `~gammapy.irf.EnergyDependentTablePSF` class to load the PSF and use some of it's methods to get some information about it.
psf_table = EnergyDependentTablePSF.read(
"$GAMMAPY_DATA/fermi_3fhl/fermi_3fhl_psf_gc.fits.gz"
)
print(psf_table)
# To get an idea of the size of the PSF we check how the containment radii of the Fermi-LAT PSF vari with energy and different containment fractions:
plt.figure(figsize=(8, 5))
psf_table.plot_containment_vs_energy(linewidth=2, fractions=[0.68, 0.95])
plt.xlim(50, 2000)
plt.show()
# In addition we can check how the actual shape of the PSF varies with energy and compare it against the mean PSF between 50 GeV and 2000 GeV:
# +
plt.figure(figsize=(8, 5))
for energy in [100, 300, 1000] * u.GeV:
psf_at_energy = psf_table.table_psf_at_energy(energy)
psf_at_energy.plot_psf_vs_rad(label=f"PSF @ {energy:.0f}", lw=2)
erange = [50, 2000] * u.GeV
spectrum = PowerLawSpectralModel(index=2.3)
psf_mean = psf_table.table_psf_in_energy_band(
energy_band=erange, spectrum=spectrum
)
psf_mean.plot_psf_vs_rad(label="PSF Mean", lw=4, c="k", ls="--")
plt.xlim(1e-3, 0.3)
plt.ylim(1e3, 1e6)
plt.legend();
# -
# Let's compute a PSF kernel matching the pixel size of our map
psf = PSFMap.from_energy_dependent_table_psf(psf_table)
psf_kernel = psf.get_psf_kernel(
position=geom.center_skydir, geom=geom, max_radius="1 deg"
)
psf_kernel.psf_kernel_map.sum_over_axes().plot(stretch="log", add_cbar=True);
# ### Energy Dispersion
# For simplicity we assume a diagonal energy dispersion:
e_true = exposure.geom.get_axis_by_name("energy_true")
edisp = EDispMap.from_diagonal_response(energy_axis_true=e_true)
# ### Pre-processing
#
# The model components for which only a norm is fitted can be pre-processed so we don't have to apply the IRF at each iteration of the fit and then save computation time. This can be done using the `MapEvaluator`.
#
# +
# pre-compute iso model
evaluator = MapEvaluator(diffuse_iso)
evaluator.update(exposure=exposure, psf=psf, edisp=edisp, geom=counts.geom)
diffuse_iso = BackgroundModel(
evaluator.compute_npred(), name="bkg-iso", norm=3.3
)
# pre-compute diffuse model
evaluator = MapEvaluator(diffuse_gal)
evaluator.update(exposure=exposure, psf=psf, edisp=edisp, geom=counts.geom)
diffuse_gal = BackgroundModel(evaluator.compute_npred(), name="bkg-iem")
# -
# ## Fit
# Finally, the big finale: let’s do a 3D map fit for the source at the Galactic center, to measure it’s position and spectrum. We keep the background normalization free.
# +
spatial_model = PointSpatialModel(
lon_0="0 deg", lat_0="0 deg", frame="galactic"
)
spectral_model = PowerLawSpectralModel(
index=2.7, amplitude="5.8e-10 cm-2 s-1 TeV-1", reference="100 GeV"
)
source = SkyModel(
spectral_model=spectral_model,
spatial_model=spatial_model,
name="source-gc",
)
models = Models([source, diffuse_gal, diffuse_iso])
dataset = MapDataset(
models=models, counts=counts, exposure=exposure, psf=psf, edisp=edisp,
)
# -
# %%time
fit = Fit([dataset])
result = fit.run()
print(result)
print(models)
residual = counts - dataset.npred()
residual.sum_over_axes().smooth("0.1 deg").plot(
cmap="coolwarm", vmin=-3, vmax=3, add_cbar=True
);
# ## Exercises
#
# - Fit the position and spectrum of the source [SNR G0.9+0.1](http://gamma-sky.net/#/cat/tev/110).
# - Make maps and fit the position and spectrum of the [Crab nebula](http://gamma-sky.net/#/cat/tev/25).
# ## Summary
#
# In this tutorial you have seen how to work with Fermi-LAT data with Gammapy. You have to use the Fermi ST to prepare the exposure cube and PSF, and then you can use Gammapy for any event or map analysis using the same methods that are used to analyse IACT data.
#
# This works very well at high energies (here above 10 GeV), where the exposure and PSF is almost constant spatially and only varies a little with energy. It is not expected to give good results for low-energy data, where the Fermi-LAT PSF is very large. If you are interested to help us validate down to what energy Fermi-LAT analysis with Gammapy works well (e.g. by re-computing results from 3FHL or other published analysis results), or to extend the Gammapy capabilities (e.g. to work with energy-dependent multi-resolution maps and PSF), that would be very welcome!
| docs/tutorials/fermi_lat.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/yeyomuri/probabilidad/blob/main/DistribucionesDeProbabilidadDiscreta.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="Tq1c5_KzUaCG"
# # Distribuciones discretas (e.j. binomial)
#
# ---
#
#
#
# Recordando que la distribución binomial está dada por:
#
# $$
# P(k, n; p) = {n \choose k} p^k (1-p)^{n-k} = \frac{n!}{k!(n-k)!}p^k (1-p)^{n-k}
# $$
#
# donde $P(k, n; p)$ representa la probabilidad de obtener $k$ éxitos de $n$ intentos con posibilidad **binaria** (por ejemplo, lanzamientos de moneda).
#
# `Ejemplo`: la probabilidad de obtener 4 caras a partir de 10 lanzamientos consecutivos de moneda, está dada por (tomando $p=0.5$, por lo tanto $1-p=0.5$):
#
# $$
# P(k=4, n=10; p=0.5) = {10 \choose 4} \left( \frac{1}{2}\right)^{10} = \frac{10!}{4!6!} \left( \frac{1}{2}\right)^{10}
# $$
#
# ---
#
# Ahora, la probabilidad de obtener $k$ o menos éxitos a partir de $n$ intentos está dada por la distribución acumulada:
#
# $$
# C(k, n; p) = \sum_{i=0}^k P(i, n;p) = \sum_{i=0}^k {n \choose i} p^i (1-p)^{n-i}
# $$
#
# Por convención entendemos que:
#
# $$C(k=3,n=6;p=0.5) = P(k \leq 3, n=6, p=0.5 )$$
#
#
# `Ejemplo`: la probabilidad de obtener 3 o menos caras a partir de 6 lanzamientos consecutivos está dada por (tomando $p=0.5$, por lo tanto $1-p=0.5$):
#
# $$P(k \leq 3, n=6;p=0.5) = \sum_{i=0}^3 {6 \choose i} \left(\frac{1}{2} \right)^6 $$
#
# $$P(k \leq 3, n=6;p=0.5) = \left(\frac{1}{2} \right)^6 \sum_{i=0}^3 {6 \choose i} $$
#
# $$P(k \leq 3, n=6;p=0.5) = \left(\frac{1}{2} \right)^6 \left\{ {6 \choose 0} + {6 \choose 1} + {6 \choose 2} + {6 \choose 3} \right\}$$
#
# + [markdown] id="_03BrIqTkkH1"
# # Ejercicios (bloque 1)
#
# Calcula a mano las siguientes probabilidades (tomando $p=0.5$, por lo tanto $1-p=0.5$):
#
# 1. Probabilidad de obtener 3 caras a partir de 12 lanzamientos de moneda.
# R = 0,054
# 2. Probabilidad de obtener 5 o menos caras a partir de 10 lanzamientos de moneda. R = 0.62
#
# 3. Probabilidad de obtener menos de 6 caras a partir de 10 lanzamientos de moneda. R = 0.62
#
# Calcula a mano las mismas probabilidades anteriores pero considerando ahora $p=0.3$.
#
# 1. R = 0.24
# 2. R = 0.95
# + [markdown] id="JW8Nt8avmVuB"
# # Bonus en Python
# + id="8cphK5HBUTaj"
from math import factorial
# definición de la distribución binomial
def my_binomial(k, n, p):
return factorial(n)/(factorial(k)*(factorial(n-k)))*pow(p,k)*pow(1-p, n-k)
# + [markdown] id="F-UASz0RmZVA"
# Usando la función `my_binomial()`, definida previamente, verifica el cálculo de todas las probabilidades del punto anterior.
#
# Ejemplo:
#
# $$P(k \leq 3, n=6, p=0.5)$$
#
# Se traduce en :
#
# ```python
# total = 0
# for n in range(4):
# total += my_binomial(i,6,0.5)
#
# print(total)
# ```
# + id="6p9n3hA-mYoP" colab={"base_uri": "https://localhost:8080/"} outputId="ddf22fa8-d194-4927-819c-17fc46f32dac"
# escribe tu codigo aquí:
def c_binomial(k, n, p):
total = 0
for i in range(k+1):
total += my_binomial(i,n,p)
return total
print(c_binomial(5,10,0.5))
| retorsYapuntes/DistribucionesDeProbabilidadDiscreta.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .sh
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Bash
# language: bash
# name: bash
# ---
# # Scientific documents with $\LaTeX$
# ## Introduction
#
# In your research, you will produce papers, reports and—very importantly—your thesis. These documents can be written using a WYSIWYG (What You See Is What You Get) editor (e.g., Word). However, an alternative especially suited for scientific publications is LaTeX. In LaTeX, the document is written in a text file (`.tex`) with certain typesetting (tex) syntax. Text formatting is done using markups (like HTML). The file is then "compiled" (like source code of a programming language) into a file – typically in PDF.
#
# ### Why $\LaTeX$?
#
# A number of reasons:
#
# * The input is a small, portable text file
# * LaTeX compilers are freely available for all OS'
# * Exactly the same result on any computer (not true for Word, for example)
# * LaTeX produces beautiful, professional looking docs
# * Images are easy to embed and annotate
# * Mathematical formulas (esp complex ones) are easy to write
# * LaTeX is very stable – current version basically same since 1994! (9 major versions of MS Word since 1994 – with compatibility issues)
# * LaTeX is free!
# * You can focus on content, and not worry so much about formatting while writing
# * An increasing number of Biology journals provide $\LaTeX$ templates, making formatting quicker.
# * Referencing (bibliography) is easy (and can also be version controlled) and works with tools like Mendeley and Zotero
# * Plenty of online support available – your question has probably already been answered
# * You can integrate LaTeX into a workflow to auto-generate lengthy and complex documents (like your thesis).
#
# ---
#
# :::{figure-md} Word-vs-Latex
#
# <img src="./graphics/WordVTex.svg" alt="Word vs Latex" width="300px">
#
# **LaTeX documents scale up better then WYSIWYG editors.** Latex files for really large and complex documents (such as PhD Theses) are much easier to edit, manage and publish in specific formats (like pdf) than Word or Open Office documents.
#
# :::
#
# ---
#
#
# ### Limitations of $\LaTeX$
#
# * It has a steeper learning curve.
# * Can be difficult to manage revisions with multiple authors – especially if they don't use LaTeX! (Cue: Windows on a virtual machine!)
# * Tracking changes are not available out of the box (but can be enabled using a suitable package)
# * Typesetting tables can be a bit complex.
# * Images and floats are easy to embed, and won't jump around like Word, but if you don't use the right package, they can be difficult to place where you want!
# ### Installing LaTeX
#
# Type this in terminal:
#
# ```bash
# sudo apt-get install texlive-full texlive-fonts-recommended texlive-pictures texlive-latex-extra imagemagick
# ```
# It's a large installation, and will take some time.
# We will use a text editor in this lecture, but you can use one of a number of dedicated editors (e.g., texmaker,
# Gummi, TeXShop, etc.) There are also WYSIWYG frontends (e.g., Lyx, TeXmacs).
#
# [Overleaf](https://www.overleaf.com/) is also very good (and works with git), especially for collaborating with non LaTeX-ers (your university may have a blanket license for the pro version).
# ## Key $\LaTeX$ features
#
# ### Environments
#
# Environments are used to format blocks of text or graphics in a Latex document. They are delimited by an opening `\begin` and a closing `\end` tags (except for certain math environments). Everything inside will be formatted in a specific manner depending on the type of environment. For example, the code
#
# ```latex
# \begin{center}
# Here is some text
# \end{center}
# ```
# Will produce "Here is some text" centered in the middle of the page.
#
# The most commonly used Latex environments are:
#
# |Environment| Purpose|
# |:-|:-|
# |`\begin{center} ... \end{center}`| Center the elements (works for text as well as graphics) |
# |`\begin{itemize} ... \end{itemize}`| An itemized list (default is bullet points)|
# |`\begin{enumerate} ... \end{enumerate}`| An enumerated list (default is Arabic numerals) |
# |`\begin{figure} ... \end{figure}`| For displaying a Figure |
# |`\begin{table} ... \end{table}`| For displaying a table |
# |`\begin{figure} ... \end{figure}`| For displaying a Figure |
# |`\( ... \)`, `$ ... $`, or `\begin{math}...\end{math}`| For displaying an equation inline (as part of a sentence) |
# |`\[ ... \]`, `$$ ... $$`, or `\begin{displaymath}...\end{displaymath}`| For producing an equation as a separate, display item (separate from the text)|
# |`\begin{equation} ... \end{equation}`| For displaying a centered, numbered equation as a separate, display item|
#
# In all of these environments, you can use modifier directives to the environment to tailor them. For example, in the `itemize` environment, the default is to create a list with bullets, but you can pick any symbol.
#
# Below in your first Latex document example, you will see some examples of environments.
#
# ### Special characters
#
# Some characters are "special" in Latex. These characters have a specific purpose, either inside a particular environment (e.g., table or equation), or both outside and inside an environment.
#
# |Character| What it does|
# |:-|:-|
# |`#`| Used to reference arguments for a latex command; similar to the way `$` is an argument reference in [shell scripts](02-ShellScripting-Variables) |
# |`$`| Used for opening or closing a mathematical equation or symbol; e.g.,`$y = mx + c$` gives $$y = mx + c$$|
# |`%` | Comment character; everything from this symbol up to the end of line is ignored and will not appear in the final document |
# |`&`| Alignment character; used to align columns in tables, and also equations in math environments|
# |`_`| Subscript in math environments|
# |`^`| Superscript in math environments|
# |`{` and `}`| Use to group characters in math environments, and to enclose arguments in Latex commands|
# |`~`| (equivalent to the command `\nobreakspace{}`) An "unbreakable" space, which can be used to add one or more "hard" spaces inside as well as outside math environments; for example, `$x y$` gives $$x y$$, but `$x~~y$` gives $$x~~y$$|
# |`\`| Indicates a LaTeX command, as in `\LaTeX` or `\maketitle` (or can be used to escape as special character - see below)|
#
# #### Rendering special characters
#
# If you want to actually reproduce these special characters in your document, you have to "escape" them by adding a *backslash* (`\`) in front of them. For example, writing `$\%$` produces the actual percentage symbol, $\%$.
#
# ### Latex commands
#
# Every LaTeX command starts with a `\` . There are two types of commands:
#
# * **Commands without arguments**: These commands are standalone, and do not take any additional arguments.
# * For example, in your first latex document above, the `\maketitle` command tells latex to render the title in the typeset document.
# * Another example: to render $\LaTeX$, you need the command `\LaTeX`
#
# * **Commands with arguments**: These commands can (and often must) take arguments with curly brackets, which can be modified by including additional directives in square brackets before the main argument.
# * For example, in your first latex example above, the `\documentclass[12pt]{article}`
# * Another example: `\date{}` inserts the current date.
#
# ### Spaces and new lines
#
# Note that:
#
# * Several spaces in your text editor are treated as one space in the typeset document
# * Several empty lines are treated as one empty line
# * One empty line defines a new paragraph
#
# ### Typesetting math
#
# There are two ways to display math
#
# 1. Inline (i.e., within the text).
#
# 2. Stand-alone, numbered equations and formulae.
#
# For inline math, the "dollar" sign flanks the math to be typeset. For example, the code:
#
# ```
# $\int_0^1 p^x (1-p)^y dp$
# ```
#
# becomes $\int_0^1 p^x (1-p)^y dp$
#
# For numbered equations, LaTeX provides the `equation` environment. For example,
#
# ```
# \begin{equation}
# \int_0^1 \left(\ln \left( \frac{1}{x} \right)
# \right)^y dx = y!
# \end{equation}
# ```
#
# becomes
#
# \begin{equation}
# \int_0^1 \left(\ln \left( \frac{1}{x} \right)
# \right)^y dx = y!
# \end{equation}
#
#
# ### Document structure
#
# Latex documents have a very specific structure in terms of the sequence in which certain elements must appear.
#
# #### The start of the document
#
# The first command is always `\documentclass[]{}` defining the type of document (e.g., `article, book, report, letter`).
#
# Here, you can set several options. For example, to set size of text to 10 points and the letter paper size:
#
# ```latex
# \documentclass[10pt,letterpaper]{article}
# ```
#
# #### Defining packages
#
# After having declared the type of document, you can specify special packages you want to use. Some particularly useful ones are:
#
# |||
# |:-|:-|
# |`\usepackage{color}`| Use colors for text in your document|
# |`\usepackage{amsmath,amssymb}`|Formats and commands for typesetting mathematical symbols and equations|
# |`\usepackage{fancyhdr}`| Fine tune the formatting of headers and footers|
# |`\usepackage{graphicx}`| Include figures in different formats: pdf, ps, eps, gif and jpeg|
# |`\usepackage{listings}`| Typeset source code for different programming languages|
# |`\usepackage{rotating}`| Allow rotation of tables and figures|
# |`\usepackage{hyperref}`| Allow formatting of hyperlinks.
# |`\usepackage{lineno}`| Allow line numbers|`_`| |
# |
#
# #### The main body
#
# * Once you select the packages, you must start the main body of your document with `\begin{document}` and end it with `\end{document}`.
#
# ## A first LaTeX example
#
# Let's try writing an example Latex document.
#
# ★ In your code editor, type the following and save it as a file called `FirstExample.tex` in a suitable location(e.g, in a `code` directory):
# ```latex
#
# \documentclass[12pt]{article}
#
# \title{A Simple Document}
#
# \author{<NAME>}
#
# \date{}
#
# \begin{document}
# \maketitle
#
# \begin{abstract}
# This paper analyzes a seminal equation in population biology.
# \end{abstract}
#
# \section{Introduction}
# Blah Blah
#
# \section{Materials \& Methods}
#
# A foundational equation of population biology is:
#
# \begin{equation}
# \frac{dN}{dt} = r N (1 - \frac{N}{K})
# \end{equation}
#
# It was first proposed by Verhulst in 1838 \cite{verhulst1838notice}.
#
# \bibliographystyle{plain}
#
# \bibliography{FirstBiblio}
#
# \end{document}
# ```
# ```{note}
# Look carefully at the way some of the elements such as special characters and environments are used in this first example document.
# ```
#
# ### Referencing and bibliography
#
# Now, let's get a citation for the paper.
#
# ★ In the search box in Google Scholar type "verhulst population 1838"
#
# The paper should be the only one (or the top one) to appear.
#
# Click on the "Cite" icon (looks like two hollow commas) below the paper's title etc., and a small Cite window will appear. Click on "BibTeX" in the list of format options at the bottom, which should lead to a page with just the following text:
#
# ```bash
# @article{verhulst1838notice,
# title={Notice sur la loi que la population suit dans son accroissement},
# author={<NAME>{c}}ois},
# journal={Corresp. Math. Phys.},
# volume={10},
# pages={113--126},
# year={1838}
# }
# ```
#
# Copy and paste this into a file called `FirstBiblio.bib`, saved in the same directory as `FirstExample.tex`
#
# ### Compiling the Latex document
#
# Now we can create a `.pdf` file of the document.
#
# ★ In a terminal type (making sure you are the same directory where `FirstExample.tex` and `FirstBiblio.bib` are):
#
# ``` bash
# pdflatex FirstExample.tex
# bibtex FirstExample
# pdflatex FirstExample.tex
# pdflatex FirstExample.tex
# ```
# This should produce the file `FirstExample.pdf`:
# <img style="border:1px solid black;" src="./graphics/FirstExample.svg" alt="Latex Example" />
# In the above bash script, we repeated the `pdflatex` command 3 times. Here's why:
#
# * The first `pdflatex` run generates two files:`FirstExample.log` and `FirstExample.aux` (and an incomplete `.pdf`).
# * At this step, all cite{...} arguments info that bibtex needs are written into the `.aux` file.
# * Then, the second `bibtex` command (followed by the filename without the `.tex` extension) results in bibtex reading the `.aux` file that was generated. It then produces two more files: `FirstExample.bbl` and `FirstExample.blg`
# * At this step, bibtex takes the citation info in the aux file and puts the relevant biblogrphic entries into the `.bbl` file (you can take a peek at all these files), formatted according to the instructions provided by the bibliography style that you have specified using `bibliographystyle{plain}`.
# * The second `pdflatex` run updates `FirstExample.log` and `FirstExample.aux` (and a still-incomplete `.pdf` - the citations are not correctly formatted yet)
# * At this step, the reference list in the `.bbl` generated in the above step is included in the document, and the correct labels for the in-text `cite{...}` commands are written in `.aux` file (but the non in the actual pdf).
# * The third and final `pdflatex` run then updates `FirstExample.log` and `FirstExample.aux` one last time, and now produces the complete `.pdf` file, with citations correctly formatted.
# * At this step, latex knows what the correct in-text citation labels are, and includes them in the pdf document.
#
# Throughout all this, the `.log` file plays no role except to record info about how the commands are running.
#
# PHEW! Why go through this repetitive sequence of commands? Well, "it is what it is" – $\LaTeX$, with all its advantages does have its quirks. The reason why it is this way, is probably that back then (<NAME>'s PhD Thesis writing days – late 1950's to early 1960's), computers had *tiny* memories (RAMs), and writing files to disk and then reading them back in for the next step of the algorithm/program was the best (and only) way to go. Why has this not been fixed? I am not sure - keep an eye out, and it might well be (and then, raise an issue on TheMulQuaBio's [Github](https://github.com/mhasoba/TheMulQuaBio/issues)!)
#
# Anyway, as such, you don't have to run these commands literally step by step, because you can create a bash script that does it for you, as we will now learn.
# ### A bash script to compile $\LaTeX$
#
# Let's write a useful little bash script to compile latex with bibtex.
#
# ★ Type the following script and call it `CompileLaTeX.sh` (you know where to put it!):
#
# ```bash
# #!/bin/bash
# pdflatex $1.tex
# bibtex $1
# pdflatex $1.tex
# pdflatex $1.tex
# evince $1.pdf &
#
# ## Cleanup
# rm *.aux
# rm *.log
# rm *.bbl
# rm *.blg
# ```
# How do you run this script? The same as your previous bash scripts, so:
#
# ```latex
# bash CompileLaTeX.sh FirstExample
# ```
#
# ### Exercise
#
# Note that I have not written the `.tex` extension of `FirstExample` when feeding it to the in latex compilation bash script above. Make this bash script more convenient to use by allowing users to compile the script by using
#
# ```latex
# bash CompileLaTeX.sh FirstExample.tex
# ```
# ## Some more $\LaTeX$ features and tips
#
# Here are some more Latex features, and tips that might prove handy:
#
# * LaTeX can render pretty much every mathematical symbol and operator that you can think of (plenty of lists and cheat-sheets online)
# * Long documents can be split into separate `.tex` documents and combined using the `\input{}` command
# * You can use bibliography managers such as [Mendeley](https://www.mendeley.com/?interaction_required=true) or [Zotero](https://www.zotero.org/) to export and maintain/update `.bib` files that are then ready to be used in a Latex document
# * You can create new environments and commands, and create new ones in the preamble (which can also be kept as a separate document and inserted using the `\input{}` command)
# ## Practicals
#
# ### First $\LaTeX$ example
#
# Test `CompileLaTeX.sh` with `FirstExample.tex` and bring it under verson control under`week1` in your repository. Make sure that `CompileLaTeX.sh` will work if somebody else ran it from their computer using `FirstExample.tex` as an input.
#
# ### Practicals wrap-up
#
# Make sure you have your `Week 1` directory organized with `Data`, `Sandbox` and `Code` with the necessary files and this week's (functional!) scripts in there. Every script should run without errors on my computer. This includes the five solutions (single-line commands you came up with) in `UnixPrac1.txt`.
#
# *Commit and push every time you do some significant amount of coding work (after testing it), and then again before the given deadline (this will be announced in class).*
# ## Readings & Resources
#
# ### General
#
# * [http://en.wikibooks.org/wiki/LaTeX/Introduction](http://en.wikibooks.org/wiki/LaTeX/Introduction)
# * [The not so Short Introduction to LaTeX](https://ctan.org/tex-archive/info/lshort/english/)
# * [The Visual LaTeX FAQ](https://mirror.ox.ac.uk/sites/ctan.org/info/visualfaq/visualFAQ.pdf): sometimes it is difficult to describe what you want to do!
# * [The Overleaf knowledge base](https://www.overleaf.com/learn), including
# * [Learn LaTeX in 30 minutes](https://www.overleaf.com/learn/latex/Learn_LaTeX_in_30_minutes)
# * [Presentations in LaTeX](https://www.overleaf.com/learn/latex/Beamer_Presentations:_A_Tutorial_for_Beginners_(Part_1)—Getting_Started)
# * [Bibliographies in LaTeX](https://www.overleaf.com/learn/latex/Bibliography_management_with_bibtex)
#
# ### $\LaTeX$ Templates
# * There are lots of LaTeX templates online, such for typesetting theses from particular institutions, or papers for a specific journal. There are some examples the `TheMulQuaBio` repo (under `code`).
# * The [Overleaf templates](https://www.overleaf.com/latex/templates) are extensive
# * These includes many [Imperial College Dissertation templates](https://www.overleaf.com/latex/templates?addsearch=imperial%20college)).
#
# ### $\LaTeX$ Tables
# * [$\LaTeX$ table generator](http://www.tablesgenerator.com/)
| content/_build/jupyter_execute/notebooks/04-LaTeX.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Visualizing Frame Dragging in Kerr Spacetime
#
# ### Importing required modules
# +
import numpy as np
from einsteinpy.geodesic import Nulllike
from einsteinpy.plotting import StaticGeodesicPlotter
# -
# ### Setting up the system
# - Initial position & momentum of the test partcle
# - Spin of the Kerr Black Hole
# - Other solver parameters
#
# Note that, we are working in _M_-Units ($G = c = M = 1$). Also, setting momentum's $\phi$-component to negative, implies an initial retrograde trajectory.
position = [2.5, np.pi / 2, 0.]
momentum = [0., 0., -2.]
a = 0.99
end_lambda = 150.
step_size = 0.0005
# ### Calculating the geodesic, using the Julia back-end
geod = Nulllike(
position=position,
momentum=momentum,
a=a,
end_lambda=end_lambda,
step_size=step_size,
return_cartesian=True,
julia=True
)
# ### Plotting the geodesic in 2D
sgpl = StaticGeodesicPlotter(bh_colors=("red", "blue"))
sgpl.plot2D(geod, coordinates=(1, 2), figsize=(10, 10), color="indigo") # Plot X vs Y
sgpl.show()
# As can be seen in the plot above, the photon's trajectory is reversed, due to frame-dragging effects, so that, it moves in the direction of the black hole's spin, before eventually falling into the black hole.
| docs/source/examples/Visualizing Frame Dragging in Kerr Spacetime using EinsteinPy!.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.5 64-bit (''py385'': conda)'
# metadata:
# interpreter:
# hash: 9b3d8300e9bf6fadd6aa28c0ec09f1234eff4629f9d55be73fc80f5c66de2bf6
# name: 'Python 3.8.5 64-bit (''py385'': conda)'
# ---
import qiskit as qs
q_circuit_1 = qs.QuantumCircuit(3)
q_circuit_1.h(1)
q_circuit_1.draw()
q_circuit_2 = qs.QuantumCircuit(2, 1)
q_circuit_2.h(0)
q_circuit_2.cx(0, 1)
q_circuit_2.measure(1, 0)
q_circuit_2.draw()
| projects/qiskit_tutorials/tutorial_1_3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Preprocessing data for Task 1
# ## Import section
import pandas as pd
import numpy as np
from copy import copy
# ## Load all tables onto dataframes
train_t_1 = pd.read_csv("data/phase1_training/20min_avg_travel_time_training_phase1.csv")
train_t_2 = pd.read_csv("data/phase1_training/20min_avg_volume_training_phase1.csv")
train_t_3 = pd.read_csv("data/road/links_table3.csv")
train_t_4 = pd.read_csv("data/road/routes_table 4.csv")
train_t_5 = pd.read_csv("data/phase1_training/trajectories_training_phase1_table5.csv")
train_t_6 = pd.read_csv("data/phase1_training/volume_training_phase1_table6.csv")
train_t_7 = pd.read_csv("data/weather/weather_July_01_Oct_17_table7.csv")
sample_data_final = pd.read_csv("data/submission_sample/submission_sample_travelTime.csv")
test_t_1 = pd.read_csv("data/phase1_test/20min_avg_travel_time_test_phase1.csv")
test_t_2 = pd.read_csv("data/phase1_test/20min_avg_volume_test_phase1.csv")
test_t_5 = pd.read_csv("data/phase1_test/trajectories_test_phase1_table5.csv")
test_t_6 = pd.read_csv("data/phase1_test/volume_test_phase1_table6.csv")
test_t_7 = pd.read_csv("data/weather/weather_Oct_18_Oct_24_table7.csv")
# ## Removing outliers from table 5 -- trajectory data
for k, row in train_t_5.iterrows():
if row['travel_time'] > 600:
last_value = train_t_5.loc[k-1,'travel_time']
next_value = train_t_5.loc[k+1,'travel_time']
if last_value < 600:
train_t_5.loc[k, 'travel_time'] = (last_value + next_value)/2.0
else:
train_t_5.loc[k, 'travel_time'] = last_value
for k, row in test_t_5.iterrows():
if row['travel_time'] > 600:
last_value = test_t_5.loc[k-1,'travel_time']
next_value = test_t_5.loc[k+1,'travel_time']
if last_value < 600:
test_t_5.loc[k, 'travel_time'] = (last_value + next_value)/2.0
else:
test_t_5.loc[k, 'travel_time'] = last_value
# ## Grouping the travel time of individual into average travel time for every 20 mins time window
# +
# processing training data -- table 5
train_t_5['starting_time'] = pd.to_datetime(train_t_5['starting_time'], format='%Y-%m-%d %H:%M:%S')
train_t_5 = train_t_5.set_index(['starting_time'])
train_t_5 = train_t_5.groupby([pd.Grouper(freq='20Min'),
'intersection_id',
'tollgate_id']).travel_time.mean().reset_index().rename(
columns={'travel_time':'average_travl_time'})
# processing test data -- table 5
test_t_5['starting_time'] = pd.to_datetime(test_t_5['starting_time'], format="%Y-%m-%d %H:%M:%S")
test_t_5 = test_t_5.set_index(['starting_time'])
test_t_5 = test_t_5.groupby([pd.Grouper(freq='20Min'),
'intersection_id',
'tollgate_id']).travel_time.mean().reset_index().rename(
columns={'travel_time':'average_travl_time'})
print(train_t_5.shape, test_t_5.shape)
# -
# ## create unique pairs of all intersection and toll
# +
all_toll_intersections = []
for j in range(sample_data_final.shape[0]):
intersection=sample_data_final.loc[j]['intersection_id']
tollgate=sample_data_final.loc[j]['tollgate_id']
token = (intersection,tollgate)
if token not in all_toll_intersections:
all_toll_intersections.append(token)
sample_time = []
sample_times = sample_data_final[(sample_data_final['tollgate_id']==1)&
(sample_data_final['intersection_id']=='B')]['time_window']
for st in sample_times:
sample_time.append(pd.to_datetime(st.split(',')[0][1:], format="%Y-%m-%d %H:%M:%S") - pd.DateOffset(hours=2))
sample_time = pd.Series(sample_time).values
# -
# ## method to replace missing time
def replace_missing_time(test, tollgate, intersection, iteration, sample_time):
while iteration > 0:
try:
missing_time = test[(test['tollgate_id']==tollgate)&
(test['starting_time'] == sample_time[iteration - 1])&
(test['intersection_id']==intersection)]['average_travl_time']
return missing_time.values[0]
except Exception as e:
iteration = iteration - 1
continue
# ## replace the missing time in table 5
# +
for intersection, tollgate in all_toll_intersections:
test_toll_intersections = copy(test_t_5[(test_t_5['tollgate_id']==tollgate) &
(test_t_5['intersection_id']==intersection)
].reset_index())
test_time= test_t_5[(test_t_5['tollgate_id']==tollgate) &
(test_t_5['intersection_id']==intersection)
]['starting_time'].values
test_toll_intersections.drop('index',axis=1,inplace=True)
test_toll_intersections = test_toll_intersections.loc[0]
for k in range(len(sample_time)):
if sample_time[k] not in test_time:
test_toll_intersections['starting_time'] = sample_time[k]
test_toll_intersections['average_travl_time'] = replace_missing_time(test_t_5,
tollgate,
intersection,
k,
sample_time)
test_t_5 = test_t_5.append(test_toll_intersections)
test_t_5 = test_t_5.reset_index()
test_t_5.drop('index', axis=1, inplace=True)
# -
train_t_5 = train_t_5.append(test_t_5)
train_t_5['lag1'] = train_t_5['average_travl_time'].shift(1)
train_t_5['lag2'] = train_t_5['average_travl_time'].shift(2)
train_t_5['lag3'] = train_t_5['average_travl_time'].shift(3)
train_t_5['lag4'] = train_t_5['average_travl_time'].shift(4)
train_t_5['lag5'] = train_t_5['average_travl_time'].shift(5)
train_t_5['lag6'] = train_t_5['average_travl_time'].shift(6)
train_t_5['lag7'] = train_t_5['average_travl_time'].shift(7)
# ## create a heat map of the table 5
import seaborn as sns
sns.heatmap(train_t_5.corr(), annot = True, fmt = ".2f")
display()
# ## create another dataframe from table 5 with a date offset of 2 hours
test_t_5['starting_time'] = test_t_5['starting_time'] + pd.DateOffset(hours=2)
test_t_5_dup = test_t_5
test_t_5_dup.drop('average_travl_time', axis=1, inplace=True)
test_t_5_dup.head()
print(test_t_5_dup.shape)
print(train_t_5.shape)
# ## Additional info -- adding festival day's data
# +
# Chinese festival days
from datetime import datetime
start_date = datetime(2016, 9, 15)
end_date = datetime(2016, 9, 17)
holiday_range = pd.date_range(start_date, end_date)
start_date2 = datetime(2016, 10, 1)
end_date2 = datetime(2016, 10, 7)
holiday_range= holiday_range.append(pd.date_range(start_date2, end_date2))
# +
# Adding Extra column with the name ch_holidays. If the date exists in between Holiday range,the value of
#the column will be 1 or else 0
def identify_holiday_dates(data, start_time):
day_of_the_week = pd.get_dummies(data[start_time].dt.weekday_name)
hr_of_the_day = pd.get_dummies(data[start_time].dt.hour, prefix='hour_')
minute= pd.get_dummies(data[start_time].dt.minute)
data = pd.concat([data, day_of_the_week, hr_of_the_day, minute], axis=1)
data['date'] = data[start_time].dt.date
data['date'] = data['date'].astype(str)
data['date'] = pd.to_datetime(data['date'], format='%Y-%m-%d')
data['hour'] = data[start_time].dt.hour.astype(int)
start_time_date = data[start_time].dt.date
for k, row in data.iterrows():
data.loc[k,"ch_holidays"] = 0
if start_time_date.loc[k] in holiday_range: data.loc[k, "ch_holidays"] = 1
return data
# -
train_t_5 = identify_holiday_dates(train_t_5, "starting_time")
test_t_5_dup = identify_holiday_dates(test_t_5_dup, "starting_time")
print(test_t_5_dup.shape)
train_t_5.head()
# ## Adding table 7 (weather data)
# +
# loading and appending weather test data to weather train data
train_t_7 = train_t_7.append(test_t_7).reset_index()
train_t_7['date'] = pd.to_datetime(train_t_7['date'], format='%Y-%m-%d')
# -
# ## remove outlier from weather table
# +
# replacing the outlier value of 99017 in wind_direction of weatherData by avg of previous and next value
# 99017 is the only outlier
for i, row in train_t_7.iterrows():
if row['wind_direction']== 999017.0:
previous_value = train_t_7.loc[i-1,'wind_direction']
next_value = train_t_7.loc[i+1,'wind_direction']
if next_value != 999017.0:
train_t_7.loc[i, 'wind_direction'] = (previous_value + next_value)/2.0
else:
train_t_7.loc[i, 'wind_direction'] = previous_value
# -
train_t_7.head()
train_t_5.shape
# ## Combining table 5 with table 7 (trajectory data + weather data)
# +
# Turn hour into 3 hour intervals and then combine with weather data
def addWeatherData(df):
for i, row in df.iterrows():
if row['hour'] in [23,0,1]: df.loc[i, "hour"] = 0
elif row['hour'] in [2,3,4]: df.loc[i, "hour"] = 3
elif row['hour'] in [5,6,7]: df.loc[i, "hour"] = 6
elif row['hour'] in [8,9,10]: df.loc[i, "hour"] = 9
elif row['hour'] in [11,12,13]: df.loc[i, "hour"] = 12
elif row['hour'] in [14,15,16]: df.loc[i, "hour"] = 15
elif row['hour'] in [17,18,19]: df.loc[i, "hour"] = 18
elif row['hour'] in [20,21,22]: df.loc[i, "hour"] = 21
return pd.merge(df, train_t_7, on =['date', 'hour'], how='left')
# -
train_t_5 = addWeatherData(train_t_5)
test_t_5_dup = addWeatherData(test_t_5_dup)
train_t_5.head()
train_t_5 = train_t_5.drop(['hour','date'],axis=1)
test_t_5_dup = test_t_5_dup.drop(['hour','date'],axis=1)
train_t_5.shape
# ## Processing table 4
# +
divison_row = []
def divide(seq):
return seq.split(',')
train_t_4.link_seq = train_t_4.link_seq.apply(divide)
X = train_t_4.apply(lambda row: [divison_row.append([row['intersection_id'], row['tollgate_id'], link])
for link in row.link_seq], axis=1)
table_headers = ['intersection_id', 'tollgate_id', 'link_id']
train_t_4_new= pd.DataFrame(divison_row, columns=table_headers)
train_t_4_new['link_id'] = train_t_4_new['link_id'].astype(str)
# -
# ## Processing table 3
# +
train_t_3['cross_in'] = 0
train_t_3['cross_out'] = 0
for k, row in train_t_3.iterrows():
if ',' in str(row['out_top']):
train_t_3.loc[k, 'cross_out'] = 1
if ',' in str(row['in_top']):
train_t_3.loc[k, 'cross_in'] = 1
train_t_3['link_id'] = train_t_3['link_id'].astype(str)
train_t_4_new = pd.merge(train_t_4_new, train_t_3, on='link_id', how='left')
train_t_4_new.drop(['in_top', 'out_top'], axis=1, inplace=True)
# -
# ## Merging table 4 with table 5
# +
join_incount= train_t_4_new[['intersection_id', 'tollgate_id', 'cross_in']].groupby([
'intersection_id', 'tollgate_id']).cross_in.sum().reset_index().rename(columns={
'cross_in':'inlink_cross_count'})
join_outcount = train_t_4_new[['intersection_id', 'tollgate_id', 'cross_out']].groupby([
'intersection_id', 'tollgate_id']).cross_out.sum().reset_index().rename(columns={
'cross_out':'outlink_cross_count'})
final = pd.merge(join_incount, join_outcount, on=['intersection_id', 'tollgate_id'], how='left')
_len = train_t_4_new[['intersection_id', 'tollgate_id', 'length']].groupby([
'intersection_id', 'tollgate_id']).length.sum().reset_index()
final = pd.merge(final, _len, on=['intersection_id', 'tollgate_id'], how='left')
link_count = train_t_4_new[['intersection_id', 'tollgate_id']].groupby([
'intersection_id', 'tollgate_id']).size().reset_index().rename(columns={0:'link_count'})
final = pd.merge(final, link_count, on=['intersection_id', 'tollgate_id'], how='left')
lane1_length = train_t_4_new[train_t_4_new.lanes==1][['intersection_id', 'tollgate_id', 'length']].groupby([
'intersection_id', 'tollgate_id']).length.sum().reset_index().rename(columns={'length':'lane1_length'})
final = pd.merge(final, lane1_length, on=['intersection_id', 'tollgate_id'], how='left')
lane1_count = train_t_4_new[train_t_4_new.lanes== 1][['intersection_id', 'tollgate_id']].groupby([
'intersection_id', 'tollgate_id']).size().reset_index().rename(columns = {0:'lane1_count'})
final = pd.merge(final, lane1_count, on =['intersection_id', 'tollgate_id'], how='left')
lane2_length = train_t_4_new[train_t_4_new.lanes==2][['intersection_id', 'tollgate_id', 'length']].groupby([
'intersection_id', 'tollgate_id']).length.sum().reset_index().rename(columns={'length':'lane2_length'})
final = pd.merge(final, lane2_length, on=['intersection_id', 'tollgate_id'], how='left')
lane2_count = train_t_4_new[train_t_4_new.lanes== 2][['intersection_id', 'tollgate_id']].groupby([
'intersection_id', 'tollgate_id']).size().reset_index().rename(columns = {0:'lane2_count'})
final = pd.merge(final, lane2_count, on =['intersection_id', 'tollgate_id'], how='left')
lane3_length = train_t_4_new[train_t_4_new.lanes==3][['intersection_id', 'tollgate_id', 'length']].groupby([
'intersection_id', 'tollgate_id']).length.sum().reset_index().rename(columns={'length':'lane3_length'})
final = pd.merge(final, lane3_length, on=['intersection_id', 'tollgate_id'], how='left')
lane3_count = train_t_4_new[train_t_4_new.lanes==3][['intersection_id', 'tollgate_id']].groupby([
'intersection_id', 'tollgate_id']).size().reset_index().rename(columns = {0:'lane3_count'})
final = pd.merge(final,lane3_count,on =['intersection_id', 'tollgate_id'] ,how='left')
lane4_length = train_t_4_new[train_t_4_new.lanes==4][['intersection_id', 'tollgate_id', 'length']].groupby([
'intersection_id', 'tollgate_id']).length.sum().reset_index().rename(columns={'length':'lane4_length'})
final = pd.merge(final, lane4_length, on=['intersection_id', 'tollgate_id'],how='left')
lane4_count = train_t_4_new[train_t_4_new.lanes==4][['intersection_id', 'tollgate_id']].groupby([
'intersection_id', 'tollgate_id']).size().reset_index().rename(columns = {0:'lane4_count'})
final = pd.merge(final,lane4_count,on =['intersection_id', 'tollgate_id'] ,how='left')
final.fillna(0, inplace=True)
train_t_5 = pd.merge(train_t_5, final, on=['intersection_id', 'tollgate_id'], how='left')
test_t_5_dup = pd.merge(test_t_5_dup, final, on=['intersection_id', 'tollgate_id'], how='left')
# -
# ## Formatting starting and end time
def time_period(data, start_time, end_time):
st = data[start_time].apply(lambda x: x.strftime("%Y-%m-%d %H:%M:%S"))
et = data[end_time].apply(lambda x: x.strftime("%Y-%m-%d %H:%M:%S"))
data['time_window'] = '[' + st + ',' + et + ')'
return data.drop([start_time, end_time], axis=1)
test_t_5_dup['end'] = test_t_5_dup['starting_time'] + pd.DateOffset(minutes=20)
train_t_5['end'] = train_t_5['starting_time'] + pd.DateOffset(minutes=20)
test_t_5_dup = time_period(test_t_5_dup, 'starting_time', 'end')
train_t_5 = time_period(train_t_5, 'starting_time', 'end')
test_t_5_dup = test_t_5_dup.set_index(['intersection_id', 'tollgate_id', 'time_window'])
train_t_5 = train_t_5.set_index(['intersection_id','tollgate_id','time_window'])
train_t_5.shape
test_t_5_columns, train_t_5_columns = list(test_t_5_dup.columns.values), list(train_t_5.columns.values)
m_t5 = [data for data in train_t_5_columns if data not in test_t_5_columns]
for label in m_t5:
test_t_5_dup[label] = 0
test_t_5_dup = test_t_5_dup[train_t_5_columns]
# ## Handling the missing values with mean value in table 5
# +
def fill_nullvalues(data):
return data.fillna(data.mean())
test_t_5_dup = fill_nullvalues(test_t_5_dup)
train_t_5 =fill_nullvalues(train_t_5)
# -
print(test_t_5_dup.shape, train_t_5.shape)
test_t_5_dup.to_csv("data/preprocessing_data/task1_preprocess_test_data.csv")
train_t_5.to_csv("data/preprocessing_data/task1_preprocess_training_data.csv")
train_t_5.columns.values
test_t_5_dup.shape
| task1_data_preprocessing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/", "height": 85} colab_type="code" id="zl5g0USZZMvI" outputId="1a4892f1-2fb9-4a1b-b75b-1592008361f6"
import numpy as np
A = np.array([[1, 4, 5],
[-5, 8, 9],
[-6, 7, 11]]) # создаем матрицу 3х3
B = np.linalg.eig(A) # находим собственные вектора (с.в.) и собственные числа (с.ч.) через np.linalg.eig
B
# Обратите внимание, что B - кортеж (tuple). Его компоненты имеют тип np.ndarray, с которыми вы уже должны быть знакомы.
# Нулевая компонента B[0] - вектор собственных чисел, а компонента B[1] - матрица, столбцами которой являются
# собственные вектора, соответствующие данному числу.; в данном случае собственному числу 13.59373746
# соответствует собственный вектор [0.45145779, 0.62348353, 0.63832135]
# + colab={} colab_type="code" id="fTnKUWS0nhA5"
| unit_3/Module_1_notebook.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# [](https://colab.research.google.com/github/AyanSinhaMahapatra/scancode-results-analyzer/blob/master/src/notebooks/load_results_files.ipynb)
# ## The Following 6 Cells are Only to be run in Google Colab
#
# [Link to Installing Conda in Colab Docs, which is used below](https://towardsdatascience.com/conda-google-colab-75f7c867a522)
# %env PYTHONPATH=
# + language="bash"
#
# MINICONDA_INSTALLER_SCRIPT=Miniconda3-4.5.4-Linux-x86_64.sh
# MINICONDA_PREFIX=/usr/local
# wget https://repo.continuum.io/miniconda/$MINICONDA_INSTALLER_SCRIPT
# chmod +x $MINICONDA_INSTALLER_SCRIPT
# ./$MINICONDA_INSTALLER_SCRIPT -b -f -p $MINICONDA_PREFIX
# conda install --channel defaults conda python=3.6 --yes
# conda update --channel defaults --all --yes
# -
import sys
_ = (sys.path.append("/usr/local/lib/python3.6/site-packages"))
# !conda install -c conda-forge pandas numpy matplotlib seaborn -y
# !git clone -l -s git://github.com/AyanSinhaMahapatra/scancode-results-analyzer.git scancode-results-analyzer
# %cd scancode-results-analyzer
# !ls
sys.path.append('/content/scancode-results-analyzer/src')
# # `load_results_file.py`
# +
import sys
import numpy as np
import pandas as pd
import os
# Path To Local Folder
sys.path.append('/home/ayan/Desktop/nexB/gsoc20/scancode-results-analyzer/src')
# -
from results_analyze.load_results_package import ResultsDataFramePackage
results_package = ResultsDataFramePackage()
# ## Import Data From JSON instead of Database, on Google Colab
json_filename = "lic_scancode_before.json"
json_filepath = os.path.join(results_package.json_input_dir, json_filename)
mock_metadata_filepath = os.path.join(results_package.json_input_dir, results_package.mock_metadata_filename)
path_json_dataframe = results_package.mock_db_data_from_json(json_filepath, mock_metadata_filepath)
path_json_dataframe
# ## Importing Data From Postgres DataBase
# Simulating Data going into `ResultsDataFrameFile.create_file_level_dataframe` function, which is called by `ResultsDataFramePackage.create_package_level_dataframe`.
# Using code snippets from `ResultsDataFramePackage.create_package_level_dataframe`.
path_json_dataframe = results_package.convert_records_to_json(20)
# Creates `files_dataframe` and breaks at a good example, `file_list` is passed into `ResultsDataFrameFile.create_file_level_dataframe`.
# +
files_dataframe, metadata_dataframe = results_package.modify_package_level_dataframe(path_json_dataframe)
for package_scan_result in files_dataframe.itertuples():
file_list = package_scan_result[2]
if package_scan_result[0] == 0:
break
# -
type(file_list)
np.shape(file_list)
# One of the entries inside the list of dicts.
file_list[3]
# ## Loads List of Dicts into DataFrame
from results_analyze.load_results_file import ResultsDataFrameFile
results_file = ResultsDataFrameFile()
file_level_dataframe = pd.DataFrame(file_list)
# Viewing DataFrame Columns and their types by calling `DataFrame.dtypes`
file_level_dataframe.dtypes
file_level_dataframe.shape
# +
# results_file.modify_file_level_dataframe?
# -
results_file.modify_file_level_dataframe(file_level_dataframe)
file_level_dataframe.shape
file_level_dataframe.dtypes
# Some entries inside `file_level_dataframe`, here `licenses` column contains list of dicts, where list length is number of license detections per file.
file_level_dataframe.head(5)
# These lines takes out all these licenses into `DataFrames`.
lic_level_dataframe = file_level_dataframe.groupby('sha1').licenses.apply(lambda x: pd.DataFrame(x.values[0])).reset_index()
lic_level_dataframe.rename(columns={'level_1': 'lic_det_num'}, inplace=True)
# These are only license level information in the columns.
lic_level_dataframe.dtypes
# +
# results_file.modify_lic_level_dataframe?
# -
lic_level_dataframe = results_file.modify_lic_level_dataframe(lic_level_dataframe)
lic_level_dataframe.dtypes
lic_level_dataframe.set_index('sha1', inplace=True)
# Joins License level and File level Dataframes using Join operations, by the primary key `sha1`.
merged_df = file_level_dataframe.join(lic_level_dataframe, lsuffix='_file', rsuffix='_lic')
merged_df.reset_index(inplace=True)
merged_df.set_index(['sha1', 'lic_det_num'], inplace=True)
# ## Notice how under one file, there can be many license rows, and there are 2 Primary key columns on the left, where there is a one-to-many relationship.
merged_df.head(5)
# This is returned to the `create_package_level_dataframe` function in the package level, where this happends for every row, i.e. every package. They all get merged into One main dataframe.
| etc/notebooks/load_results_files.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import os
os.chdir("/Users/kailong/Desktop/rtEnv/rtSynth_rt/")
# !pwd
# +
# This code should be run in console room computer to display the feedback morphings
from __future__ import print_function, division
import os
if 'watts' in os.getcwd():
main_dir = "/home/watts/Desktop/ntblab/kailong/rtSynth_rt/"
else:
main_dir="/Users/kailong/Desktop/rtEnv/rtSynth_rt/"
import sys
sys.path.append(main_dir)
sys.path.append(main_dir+"expScripts/feedback/")
from psychopy import visual, event, core, logging, gui, data, monitors
from psychopy.hardware.emulator import launchScan, SyncGenerator
from PIL import Image
import string
import fmrisim as sim
import numpy as np
import pandas as pd
import pylink
from tqdm import tqdm
import time
import re
import logging
import threading
import argparse
alpha = string.ascii_uppercase
from rtCommon.subjectInterface import SubjectInterface
from rtCommon.wsRemoteService import WsRemoteService, parseConnectionArgs
from rtCommon.utils import installLoggers
from rtCommon.cfg_loading import mkdir,cfg_loading
if False:
scanmode = 'Scan' # 'Scan' or 'Test' or None
screenmode = True # fullscr True or False
monitor_name = "scanner"
else:
scanmode = 'Test' # 'Scan' or 'Test' or None
screenmode = False # fullscr True or False
monitor_name = "testMonitor" #"testMonitor"
class SubjectService:
def __init__(self, args, webSocketChannelName='wsSubject'):
"""
Uses the WsRemoteService framework to parse connection-related args and establish
a connection to a remote projectServer. Instantiates a local version of
SubjectInterface to handle client requests coming from the projectServer connection.
Args:
args: Argparse args related to connecting to the remote server. These include
"-s <server>", "-u <username>", "-p <password>", "--test",
"-i <retry-connection-interval>"
webSocketChannelName: The websocket url extension used to connecy and communicate
to the remote projectServer, 'wsSubject' will connect to 'ws://server:port/wsSubject'
"""
self.subjectInterface = SubjectInterface(subjectRemote=False)
self.wsRemoteService = WsRemoteService(args, webSocketChannelName)
self.wsRemoteService.addHandlerClass(SubjectInterface, self.subjectInterface)
def runDetached(self):
"""Starts the receiver in it's own thread."""
self.recvThread = threading.Thread(name='recvThread',
target=self.wsRemoteService.runForever)
self.recvThread.setDaemon(True)
self.recvThread.start()
argParser = argparse.ArgumentParser()
argParser.add_argument('-c', '--config', action="store", dest="config", default='sub001.ses2.toml', type=str, help='experiment file (.json or .toml)')
argParser.add_argument('-r', '--run', action="store", dest="run", default='1', type=str, help='current run')
argParser.add_argument('-e', '--sess', action="store", dest="sess", default='1', type=str, help='current session')
argParser.add_argument('-s', action="store", dest="server", default="localhost:7777",
help="Server Address with Port [server:port]")
argParser.add_argument('-i', action="store", dest="interval", type=int, default=5,
help="Retry connection interval (seconds)")
argParser.add_argument('-u', '--username', action="store", dest="username", default='kp578',
help="rtcloud website username")
argParser.add_argument('-p', '--password', action="store", dest="password", default='<PASSWORD>',
help="rtcloud website password")
argParser.add_argument('--test', default=False, action='store_true',
help='Use unsecure non-encrypted connection')
args = argParser.parse_args("")
if not re.match(r'.*:\d+', args.server):
print("Error: Expecting server address in the form <servername:port>")
argParser.print_help()
sys.exit()
# Check if the ssl certificate is valid for this server address
from rtCommon.projectUtils import login, certFile, checkSSLCertAltName, makeSSLCertFile
addr, _ = args.server.split(':')
if checkSSLCertAltName(certFile, addr) is False:
# Addr not listed in sslCert, recreate ssl Cert
makeSSLCertFile(addr)
cfg = cfg_loading(args.config)
sub = cfg.subjectName
run = int(args.run) # 1
sess = int(args.sess)
TR=int(cfg.TR)
cfg.feedback_expScripts_dir = f"{cfg.projectDir}expScripts/feedback/"
gui = True if screenmode == False else False
scnWidth, scnHeight = monitors.Monitor(monitor_name).getSizePix()
frameTolerance = 0.001 # how close to onset before 'same' frame
TRduration=2.0
# mywin = visual.Window(
# size=[1280, 800], fullscr=screenmode, screen=0,
# winType='pyglet', allowGUI=False, allowStencil=False,
# monitor=monitor_name, color=[0,0,0], colorSpace='rgb', #color=[0,0,0]
# blendMode='avg', useFBO=True,
# units='height')
mywin = visual.Window(
size=[scnWidth - 100, scnHeight - 100], fullscr=screenmode, screen=1,
winType='pyglet', allowGUI=False, allowStencil=False,
monitor=monitor_name, color=[0,0,0], colorSpace='rgb', #color=[0,0,0]
blendMode='avg', useFBO=True,
units='height')
# similation specific
step=3 #in simulation, how quickly the morph changes ramp up. Note this is only for simulation, has nothing to do with real experiment
# trial_list designing parameters
parameterRange=np.arange(1,11) #for saving time for now. np.arange(1,20) #define the range for possible parameters for preloading images. Preloading images is to make the morphing smooth during feedback
tune=4 # this parameter controls how much to morph (how strong the morphing is) (used in preloading function), tune can range from (1,6.15] when paremeterrange is np.arange(1,20)
TrialNumber=180 # how many trials are required #test trial ,each trial is 14s, 10 trials are 140s.
## - design the trial list: the sequence of the different types of components:
## - e.g: ITI + waiting for fMRI signal + feedback (receive model output from feedbackReceiver.py)
trial_list = pd.DataFrame(columns=['Trial','time','TR','state','newWobble'])
curTime=0
curTR=0
state=''
trial_list.append({'Trial':None,
'time':None,
'TR':None,
'state':None,
'newWobble':None},
ignore_index=True)
for currTrial in range(1,1+TrialNumber):
# ITI
for i in range(6): # should be 6TR=12s
trial_list=trial_list.append({'Trial':currTrial,
'time':curTime,
'TR':curTR,
'state':'ITI',
'newWobble':0},
ignore_index=True)
curTime=curTime+TR
curTR=curTR+1
# waiting for metric calculation
for i in range(3): # should be 3TR=6s
trial_list=trial_list.append({'Trial':currTrial,
'time':curTime,
'TR':curTR,
'state':'waiting',
'newWobble':0},
ignore_index=True)
curTime=curTime+TR
curTR=curTR+1
# feedback trial: try minimize the whobbling
for i in range(5): #5TR=10s
trial_list=trial_list.append({'Trial':currTrial,
'time':curTime,
'TR':curTR,
'state':'feedback',
'newWobble':1},
ignore_index=True)
curTime=curTime+TR
curTR=curTR+1
# ITI
for i in range(6): # should be 6TR=12s
trial_list=trial_list.append({'Trial':currTrial,
'time':curTime,
'TR':curTR,
'state':'ITI',
'newWobble':0},
ignore_index=True)
curTime=curTime+TR
curTR=curTR+1
# for currTrial in range(1,1+TrialNumber):
# for i in range(1): # should be 6TR=12s
# trial_list=trial_list.append({'Trial':currTrial,
# 'time':curTime,
# 'TR':curTR,
# 'state':'ITI',
# 'newWobble':0},
# ignore_index=True)
# curTime=curTime+TR
# curTR=curTR+1
# for i in range(1): # should be 3TR=6s
# trial_list=trial_list.append({'Trial':currTrial,
# 'time':curTime,
# 'TR':curTR,
# 'state':'waiting',
# 'newWobble':0},
# ignore_index=True)
# curTime=curTime+TR
# curTR=curTR+1
# for i in range(5): #5TR=10s
# trial_list=trial_list.append({'Trial':currTrial,
# 'time':curTime,
# 'TR':curTR,
# 'state':'feedback',
# 'newWobble':1},
# ignore_index=True)
# curTime=curTime+TR
# curTR=curTR+1
# for i in range(1): # should be 6TR=12s
# trial_list=trial_list.append({'Trial':currTrial,
# 'time':curTime,
# 'TR':curTR,
# 'state':'ITI',
# 'newWobble':0},
# ignore_index=True)
# curTime=curTime+TR
# curTR=curTR+1
# parameters = np.arange(1,step*(sum((trial_list['newWobble']==1)*1)),step) #[1,2,3,4,5,6,7,8]
print('total trial number=',TrialNumber)
# print('neighboring morph difference=',tune)
print('preloaded parameter range=',parameterRange)
# print('used parameters=',parameters)
def sample(L,num=10):
# This functional uniformly sample the list to be num points
# e.g, if L is 0-99, num is 10, newList would be [9, 19, 29, 39, 49, 59, 69, 79, 89, 99]
# e.g, if L is 0-95, num is 10, newList would be [8, 18, 27, 37, 47, 56, 66, 75, 85, 95]
# e.g, if L is 0-5, num is 10, newList would be [0, 0, 0, 1, 2, 2, 3, 3, 4, 5]
sampleStep=len(L)/num
newList=[]
for i in range(1,num):
newList.append(L[int(i*sampleStep-1)])
newList.append(L[-1])
return newList
# preload image list for parameter from 1 to 19.
def preloadimages(parameterRange=np.arange(1,20),tune=1):
'''
purpose:
preload images into image object sequences corrresponding too each parameter
each parameter corresponds to 40 image objects
steps:
'''
tune=tune-1
start = time.time()
imageLists={}
numberOfUpdates=16 # corresponds to 66 updates
last_image=''
for currParameter in tqdm(parameterRange): #49
images=[]
print('maximum morph=',round((tune*currParameter*numberOfUpdates+2)/numberOfUpdates+1))
for axis in ['bedTable', 'benchBed']:
tmp_images=[]
for currImg in range(1,int(round(tune*currParameter*numberOfUpdates+2)),int((currParameter*numberOfUpdates+2)/numberOfUpdates)):
currMorph=100-round(currImg/numberOfUpdates+1) if axis=='benchBed' else round(currImg/numberOfUpdates+1)
if currMorph<1 or currMorph>99:
raise Exception('morphing outside limit')
curr_image=cfg.feedback_expScripts_dir+'carchair_exp_feedback/{}_{}_{}.png'.format(axis,currMorph,5)
if curr_image!=last_image:
currImage=visual.ImageStim(win=mywin,
name='image',
image=cfg.feedback_expScripts_dir+'carchair_exp_feedback/{}_{}_{}.png'.format(axis,currMorph,5), mask=None,
ori=0, pos=(0, 0), size=(0.5, 0.5),
color=[1,1,1], colorSpace='rgb', opacity=1,
flipHoriz=False, flipVert=False,
texRes=128, interpolate=True, depth=-4.0)
tmp_images.append(currImage)
last_image=cfg.feedback_expScripts_dir+'carchair_exp_feedback/{}_{}_{}.png'.format(axis,currMorph,5)
images=images+sample(tmp_images)
tmp_images=[]
for currImg in reversed(range(1,int(round(tune*currParameter*numberOfUpdates+1)),int((currParameter*numberOfUpdates+2)/numberOfUpdates))):
currMorph=100-round(currImg/numberOfUpdates+1) if axis=='benchBed' else round(currImg/numberOfUpdates+1)
curr_image=cfg.feedback_expScripts_dir+'carchair_exp_feedback/{}_{}_{}.png'.format(axis,currMorph,5)
if curr_image!=last_image:
currImage=visual.ImageStim(win=mywin,
name='image',
image=cfg.feedback_expScripts_dir+'carchair_exp_feedback/{}_{}_{}.png'.format(axis,currMorph,5), mask=None,
ori=0, pos=(0, 0), size=(0.5, 0.5),
color=[1,1,1], colorSpace='rgb', opacity=1,
flipHoriz=False, flipVert=False,
texRes=128, interpolate=True, depth=-4.0)
tmp_images.append(currImage)
last_image=cfg.feedback_expScripts_dir+'carchair_exp_feedback/{}_{}_{}.png'.format(axis,currMorph,5)
images=images+sample(tmp_images)
imageLists.update( {currParameter : images} )
end = time.time()
print("preload image duration=", end - start)
return imageLists
imageLists=preloadimages(parameterRange=parameterRange,tune=tune)
# Open data file for eye tracking
# datadir = "./data/feedback/"
datadir = main_dir + f"subjects/{sub}/ses{sess}/feedback/"
maxTR=int(trial_list['TR'].iloc[-1])+6
# Settings for MRI sequence
MR_settings = {'TR': TRduration, 'volumes': maxTR, 'sync': 5, 'skip': 0, 'sound': True} #{'TR': 2.000, 'volumes': maxTR, 'sync': 5, 'skip': 0, 'sound': True}
# check if there is a data directory and if there isn't, make one.
if not os.path.exists('./data'):
os.mkdir('./data')
if not os.path.exists('./data/feedback/'):
os.mkdir('./data/feedback/')
# check if data for this subject and run already exist, and raise an error if they do (prevent overwriting)
newfile = datadir+"{}_{}.csv".format(str(sub), str(run))
if os.path.exists(newfile):
raise Exception(f'{newfile} exists')
# create empty dataframe to accumulate data
data = pd.DataFrame(columns=['Sub', 'Run', 'TR', 'time'])
# Create the fixation dot, and initialize as white fill.
fix = visual.Circle(mywin, units='deg', radius=0.05, pos=(0, 0), fillColor='white',
lineColor='black', lineWidth=0.5, opacity=0.5, edges=128)
# start global clock and fMRI pulses (start simulated or wait for real)
print('Starting sub {} in run #{}'.format(sub, run))
vol = launchScan(mywin, MR_settings, simResponses=None, mode=scanmode,
esc_key='escape', instr='select Scan or Test, press enter',
wait_msg='waiting for scanner...', wait_timeout=300, log=True)
image = visual.ImageStim(
win=mywin,
name='image',
image=cfg.feedback_expScripts_dir + './carchair_exp_feedback/bedChair_1_5.png', mask=None,
ori=0, pos=(0, 0), size=(0.5, 0.5),
color=[1,1,1], colorSpace='rgb', opacity=1,
flipHoriz=False, flipVert=False,
texRes=128, interpolate=True, depth=-4.0)
backgroundImage = visual.ImageStim(
win=mywin,
name='image',
image=cfg.feedback_expScripts_dir+'./carchair_exp_feedback/greyBackground.png', mask=None,
ori=0, pos=(0, 0), size=(0.5, 0.5),
color=[1,1,1], colorSpace='rgb', opacity=1,
flipHoriz=False, flipVert=False,
texRes=128, interpolate=True, depth=-4.0)
# trialClock is reset in each trial to change image every TR (2s), time for each image is 2/numOfImages
trialClock = core.Clock()
# trialClock.add(10) # initialize as a big enough number to avoid text being shown at the first time.
TR=list(trial_list['TR'])
states=list(trial_list['state'])
newWobble=list(trial_list['newWobble'])
# parameters=np.round(np.random.uniform(0,10,sum((trial_list['newWobble']==1)*1)))
# parameters = np.arange(1,1+sum((trial_list['newWobble']==1)*1)) #[1,2,3,4,5,6,7,8]
ParameterUpdateDuration=np.diff(np.where(trial_list['newWobble']==1))[0][0]*TRduration
curr_parameter=0
remainImageNumber=[]
# feedbackParameterFileName=main_dir+f"subjects/{sub}/ses{sess}_feedbackParameter/run_{run}.csv"
# # While the running clock is less than the total time, monitor for 5s, which is what the scanner sends for each TR
# _=1
# while not os.path.exists(feedbackParameterFileName):
# keys = event.getKeys(["5","0"])
# if '0' in keys: # whenever you want to quite, type 0
# mywin.close()
# core.quit()
# time.sleep(0.01)
# if _ % 100==0:
# print(f'waiting {feedbackParameterFileName}')
# _+=1
# parameters=pd.read_csv(feedbackParameterFileName)
# while np.isnan(parameters['value'].iloc[-1]):
# keys = event.getKeys(["5","0"])
# if '0' in keys: # whenever you want to quite, type 0
# mywin.close()
# core.quit()
# time.sleep(0.01)
# if _ % 100==0:
# print(f'waiting parameters nan')
# _+=1
# parameters=pd.read_csv(feedbackParameterFileName)
# from rtCommon.feedbackReceiver import WsFeedbackReceiver
# WsFeedbackReceiver.startReceiverThread(args.server,
# retryInterval=5,
# username="kp578",
# password="<PASSWORD>",
# testMode=True)
installLoggers(logging.INFO, logging.INFO, filename=f'{cfg.feedback_dir}SubjectService_{run}_{sess}.log')
# parse connection args
# These include: "-s <server>", "-u <username>", "-p <password>", "--test",
# "-i <retry-connection-interval>"
connectionArgs = parseConnectionArgs()
subjectService = SubjectService(connectionArgs)
subjectService.runDetached()
default_parameter=19
# -
TR
cfg.TR
# + active=""
#
# -
subjectService = SubjectService(args)
subjectService.runDetached()
# +
# curr_parameter=len(parameters['value'])-1
while len(TR)>1: #globalClock.getTime() <= (MR_settings['volumes'] * MR_settings['TR']) + 3:
trialTime = trialClock.getTime()
keys = event.getKeys(["5","0"]) # check for triggers
if '0' in keys: # whenever you want to quite, type 0
mywin.close()
core.quit()
if len(keys):
TR.pop(0)
states.pop(0)
newWobble.pop(0)
print(states[0])
if states[0] == 'feedback' and newWobble[0]==1:
# fetch parameter from preprocessing process on Milgram
# feedbackMsg = WsFeedbackReceiver.msgQueue.get(block=True, timeout=None)
feedbackMsg = subjectService.subjectInterface.msgQueue.get(block=True, timeout=None)
runId,trID,value,timestamp=feedbackMsg.get('runId'),feedbackMsg.get('trId'),feedbackMsg.get('value'),feedbackMsg.get('timestamp')
if value==None:
parameter = default_parameter
else:
parameter = value
# print('feedbackParameterFileName=',feedbackParameterFileName)
# parameters=pd.read_csv(feedbackParameterFileName)
# if curr_parameter>(len(parameters['value'])-1):
# curr_parameter=curr_parameter-1
# curr_parameter=(len(parameters['value'])-1)
# parameter=parameters['value'].iloc[curr_parameter]
# print('curr_parameter=',curr_parameter)
# print('parameter=',parameter)
print(f'TR[0]={TR[0]},trID={trID},parameter={parameter},timestamp={timestamp},runId={runId}')
# curr_parameter=curr_parameter+1
# start new clock for current updating duration (the duration in which only a single parameter is used, which can be 1 TR or a few TRs, the begining of the updateDuration is indicated by the table['newWobble'])
trialClock=core.Clock()
trialTime=trialClock.getTime()
# update the image list to be shown based on the fetched parameter
imagePaths=imageLists[parameter] #list(imageLists[parameter])
# calculated how long each image should last.
eachTime=ParameterUpdateDuration/len(imagePaths)
# update the image
# image.image=imagePaths[0]
image.setAutoDraw(False)
imagePaths[0].setAutoDraw(True)
# currImage*eachTime is used in the calculation of the start time of next image in the list.
# save when the image is presented and which image is presented.
data = data.append({'Sub': sub,
'Run': run,
'TR': TR[0],
'time': trialTime,
'imageTime':imagePaths[0].image,
'eachTime':eachTime},
ignore_index=True)
oldMorphParameter=re.findall(r"_\w+_",imagePaths[0].image)[1]
# print('curr morph=',oldMorphParameter)
remainImageNumber.append(0)
currImage=1
# # discard the first image since it has been used.
# imagePaths.pop(0)
if (states[0] == 'feedback') and (trialTime>currImage*eachTime):
try: # sometimes the trialTime accidentally surpasses the maximum time, in this case just do nothing, pass
imagePaths[currImage-1].setAutoDraw(False)
imagePaths[currImage].setAutoDraw(True)
# print('currImage=',imagePaths[currImage],end='\n\n')
remainImageNumber.append(currImage)
# write the data!
data = data.append({'Sub': sub,
'Run': run,
'TR': TR[0],
'time': trialTime,
'imageTime':imagePaths[currImage].image,
'eachTime':eachTime},
ignore_index=True)
currMorphParameter=re.findall(r"_\w+_",imagePaths[currImage].image)[1]
if currMorphParameter!=oldMorphParameter:
pass
# print('curr morph=',currMorphParameter)
oldMorphParameter=currMorphParameter
currImage=currImage+1
except:
pass
elif states[0] == 'ITI':
backgroundImage.setAutoDraw(True)
fix.draw()
elif states[0] == 'waiting':
backgroundImage.setAutoDraw(False)
image.setAutoDraw(True)
# refresh the screen
mywin.flip()
# write data out!
data.to_csv(newfile)
mywin.close()
core.quit()
# ##############################################################################
# ##############################################################################
# ##############################################################################
# ####################### simmulated data forparameters#########################
# ##############################################################################
# ##############################################################################
# ##############################################################################
# parameters = pd.DataFrame(columns=['runId','trId','value','timestamp'])
# main_dir="/Volumes/GoogleDrive/My Drive/Turk_Browne_Lab/rtcloud_kp/"
# parameterWriteFolder=main_dir+'./subjects/pilot_sub001/ses1_feedbackParameter/'
# if not os.path.isdir(parameterWriteFolder):
# os.mkdir(parameterWriteFolder)
# for i in range(20):
# runId=1
# trId=int(np.random.uniform(1,20,1))
# value=int(np.random.uniform(1,20,1))
# timestamp=int(np.random.uniform(0,10,1))
# parameterFileName=f"{parameterWriteFolder}run_{runId}.csv"
# print("Dequeue run: {}, tr: {}, value: {}, timestamp: {}".
# format(runId,trId,value,timestamp))
# parameters = parameters.append({'runId':runId,
# 'trId':trId,
# 'value':value,
# 'timestamp':timestamp},
# ignore_index=True)
# print('parameters=',parameters)
# parameters.to_csv(parameterFileName)
# +
# This code should be run in console room computer to display the feedback morphings
from __future__ import print_function, division
import os
if 'watts' in os.getcwd():
main_dir = "/home/watts/Desktop/ntblab/kailong/rtSynth_rt/"
else:
main_dir="/Users/kailong/Desktop/rtEnv/rtSynth_rt/"
import sys
sys.path.append(main_dir)
sys.path.append(main_dir+"expScripts/feedback/")
from psychopy import visual, event, core, logging, gui, data, monitors
from psychopy.hardware.emulator import launchScan, SyncGenerator
from PIL import Image
import string
import fmrisim as sim
import numpy as np
import pandas as pd
import pylink
from tqdm import tqdm
import time
import re
import logging
import threading
import argparse
alpha = string.ascii_uppercase
from rtCommon.subjectInterface import SubjectInterface
from rtCommon.wsRemoteService import WsRemoteService, parseConnectionArgs
from rtCommon.utils import installLoggers
from rtCommon.cfg_loading import mkdir,cfg_loading
if False:
scanmode = 'Scan' # 'Scan' or 'Test' or None
screenmode = True # fullscr True or False
monitor_name = "scanner"
else:
scanmode = 'Test' # 'Scan' or 'Test' or None
screenmode = False # fullscr True or False
monitor_name = "testMonitor" #"testMonitor"
class SubjectService:
def __init__(self, args, webSocketChannelName='wsSubject'):
"""
Uses the WsRemoteService framework to parse connection-related args and establish
a connection to a remote projectServer. Instantiates a local version of
SubjectInterface to handle client requests coming from the projectServer connection.
Args:
args: Argparse args related to connecting to the remote server. These include
"-s <server>", "-u <username>", "-p <password>", "--test",
"-i <retry-connection-interval>"
webSocketChannelName: The websocket url extension used to connecy and communicate
to the remote projectServer, 'wsSubject' will connect to 'ws://server:port/wsSubject'
"""
self.subjectInterface = SubjectInterface(subjectRemote=False)
self.wsRemoteService = WsRemoteService(args, webSocketChannelName)
self.wsRemoteService.addHandlerClass(SubjectInterface, self.subjectInterface)
def runDetached(self):
"""Starts the receiver in it's own thread."""
self.recvThread = threading.Thread(name='recvThread',
target=self.wsRemoteService.runForever)
self.recvThread.setDaemon(True)
self.recvThread.start()
argParser = argparse.ArgumentParser()
argParser.add_argument('-c', '--config', action="store", dest="config", default='sub001.ses2.toml', type=str, help='experiment file (.json or .toml)')
argParser.add_argument('-r', '--run', action="store", dest="run", default='1', type=str, help='current run')
argParser.add_argument('-e', '--sess', action="store", dest="sess", default='1', type=str, help='current session')
argParser.add_argument('-s', action="store", dest="server", default="localhost:7777",
help="Server Address with Port [server:port]")
argParser.add_argument('-i', action="store", dest="interval", type=int, default=5,
help="Retry connection interval (seconds)")
argParser.add_argument('-u', '--username', action="store", dest="username", default='kp578',
help="rtcloud website username")
argParser.add_argument('-p', '--password', action="store", dest="password", default='<PASSWORD>',
help="rtcloud website password")
argParser.add_argument('--test', default=False, action='store_true',
help='Use unsecure non-encrypted connection')
args = argParser.parse_args("")
if not re.match(r'.*:\d+', args.server):
print("Error: Expecting server address in the form <servername:port>")
argParser.print_help()
sys.exit()
# Check if the ssl certificate is valid for this server address
from rtCommon.projectUtils import login, certFile, checkSSLCertAltName, makeSSLCertFile
addr, _ = args.server.split(':')
if checkSSLCertAltName(certFile, addr) is False:
# Addr not listed in sslCert, recreate ssl Cert
makeSSLCertFile(addr)
cfg = cfg_loading(args.config)
sub = cfg.subjectName
run = int(args.run) # 1
sess = int(args.sess)
cfg.feedback_expScripts_dir = f"{cfg.projectDir}expScripts/feedback/"
gui = True if screenmode == False else False
scnWidth, scnHeight = monitors.Monitor(monitor_name).getSizePix()
frameTolerance = 0.001 # how close to onset before 'same' frame
TRduration=int(cfg.TR)
# mywin = visual.Window(
# size=[1280, 800], fullscr=screenmode, screen=0,
# winType='pyglet', allowGUI=False, allowStencil=False,
# monitor=monitor_name, color=[0,0,0], colorSpace='rgb', #color=[0,0,0]
# blendMode='avg', useFBO=True,
# units='height')
mywin = visual.Window(
size=[scnWidth - 100, scnHeight - 100], fullscr=screenmode, screen=1,
winType='pyglet', allowGUI=False, allowStencil=False,
monitor=monitor_name, color=[0,0,0], colorSpace='rgb', #color=[0,0,0]
blendMode='avg', useFBO=True,
units='height')
# similation specific
step=3 #in simulation, how quickly the morph changes ramp up. Note this is only for simulation, has nothing to do with real experiment
# trial_list designing parameters
parameterRange=np.arange(1,11) #for saving time for now. np.arange(1,20) #define the range for possible parameters for preloading images. Preloading images is to make the morphing smooth during feedback
tune=4 # this parameter controls how much to morph (how strong the morphing is) (used in preloading function), tune can range from (1,6.15] when paremeterrange is np.arange(1,20)
TrialNumber=180 # how many trials are required #test trial ,each trial is 14s, 10 trials are 140s.
## - design the trial list: the sequence of the different types of components:
## - e.g: ITI + waiting for fMRI signal + feedback (receive model output from feedbackReceiver.py)
trial_list = pd.DataFrame(columns=['Trial','time','TR','state','newWobble'])
curTime=0
curTR=0
state=''
trial_list.append({'Trial':None,
'time':None,
'TR':None,
'state':None,
'newWobble':None},
ignore_index=True)
for currTrial in range(1,1+TrialNumber):
# ITI
for i in range(6): # should be 6TR=12s
trial_list=trial_list.append({'Trial':currTrial,
'time':curTime,
'TR':curTR,
'state':'ITI',
'newWobble':0},
ignore_index=True)
curTime=curTime+TRduration
curTR=curTR+1
# waiting for metric calculation
for i in range(3): # should be 3TR=6s
trial_list=trial_list.append({'Trial':currTrial,
'time':curTime,
'TR':curTR,
'state':'waiting',
'newWobble':0},
ignore_index=True)
curTime=curTime+TRduration
curTR=curTR+1
# feedback trial: try minimize the whobbling
for i in range(5): #5TR=10s
trial_list=trial_list.append({'Trial':currTrial,
'time':curTime,
'TR':curTR,
'state':'feedback',
'newWobble':1},
ignore_index=True)
curTime=curTime+TRduration
curTR=curTR+1
# ITI
for i in range(6): # should be 6TR=12s
trial_list=trial_list.append({'Trial':currTrial,
'time':curTime,
'TR':curTR,
'state':'ITI',
'newWobble':0},
ignore_index=True)
curTime=curTime+TRduration
curTR=curTR+1
# for currTrial in range(1,1+TrialNumber):
# for i in range(1): # should be 6TR=12s
# trial_list=trial_list.append({'Trial':currTrial,
# 'time':curTime,
# 'TR':curTR,
# 'state':'ITI',
# 'newWobble':0},
# ignore_index=True)
# curTime=curTime+TR
# curTR=curTR+1
# for i in range(1): # should be 3TR=6s
# trial_list=trial_list.append({'Trial':currTrial,
# 'time':curTime,
# 'TR':curTR,
# 'state':'waiting',
# 'newWobble':0},
# ignore_index=True)
# curTime=curTime+TR
# curTR=curTR+1
# for i in range(5): #5TR=10s
# trial_list=trial_list.append({'Trial':currTrial,
# 'time':curTime,
# 'TR':curTR,
# 'state':'feedback',
# 'newWobble':1},
# ignore_index=True)
# curTime=curTime+TR
# curTR=curTR+1
# for i in range(1): # should be 6TR=12s
# trial_list=trial_list.append({'Trial':currTrial,
# 'time':curTime,
# 'TR':curTR,
# 'state':'ITI',
# 'newWobble':0},
# ignore_index=True)
# curTime=curTime+TR
# curTR=curTR+1
# parameters = np.arange(1,step*(sum((trial_list['newWobble']==1)*1)),step) #[1,2,3,4,5,6,7,8]
print('total trial number=',TrialNumber)
# print('neighboring morph difference=',tune)
print('preloaded parameter range=',parameterRange)
# print('used parameters=',parameters)
def sample(L,num=10):
# This functional uniformly sample the list to be num points
# e.g, if L is 0-99, num is 10, newList would be [9, 19, 29, 39, 49, 59, 69, 79, 89, 99]
# e.g, if L is 0-95, num is 10, newList would be [8, 18, 27, 37, 47, 56, 66, 75, 85, 95]
# e.g, if L is 0-5, num is 10, newList would be [0, 0, 0, 1, 2, 2, 3, 3, 4, 5]
sampleStep=len(L)/num
newList=[]
for i in range(1,num):
newList.append(L[int(i*sampleStep-1)])
newList.append(L[-1])
return newList
# preload image list for parameter from 1 to 19.
def preloadimages(parameterRange=np.arange(1,20),tune=1):
'''
purpose:
preload images into image object sequences corrresponding too each parameter
each parameter corresponds to 40 image objects
steps:
'''
tune=tune-1
start = time.time()
imageLists={}
numberOfUpdates=16 # corresponds to 66 updates
last_image=''
for currParameter in tqdm(parameterRange): #49
images=[]
print('maximum morph=',round((tune*currParameter*numberOfUpdates+2)/numberOfUpdates+1))
for axis in ['bedTable', 'benchBed']:
tmp_images=[]
for currImg in range(1,int(round(tune*currParameter*numberOfUpdates+2)),int((currParameter*numberOfUpdates+2)/numberOfUpdates)):
currMorph=100-round(currImg/numberOfUpdates+1) if axis=='benchBed' else round(currImg/numberOfUpdates+1)
if currMorph<1 or currMorph>99:
raise Exception('morphing outside limit')
curr_image=cfg.feedback_expScripts_dir+'carchair_exp_feedback/{}_{}_{}.png'.format(axis,currMorph,5)
if curr_image!=last_image:
currImage=visual.ImageStim(win=mywin,
name='image',
image=cfg.feedback_expScripts_dir+'carchair_exp_feedback/{}_{}_{}.png'.format(axis,currMorph,5), mask=None,
ori=0, pos=(0, 0), size=(0.5, 0.5),
color=[1,1,1], colorSpace='rgb', opacity=1,
flipHoriz=False, flipVert=False,
texRes=128, interpolate=True, depth=-4.0)
tmp_images.append(currImage)
last_image=cfg.feedback_expScripts_dir+'carchair_exp_feedback/{}_{}_{}.png'.format(axis,currMorph,5)
images=images+sample(tmp_images)
tmp_images=[]
for currImg in reversed(range(1,int(round(tune*currParameter*numberOfUpdates+1)),int((currParameter*numberOfUpdates+2)/numberOfUpdates))):
currMorph=100-round(currImg/numberOfUpdates+1) if axis=='benchBed' else round(currImg/numberOfUpdates+1)
curr_image=cfg.feedback_expScripts_dir+'carchair_exp_feedback/{}_{}_{}.png'.format(axis,currMorph,5)
if curr_image!=last_image:
currImage=visual.ImageStim(win=mywin,
name='image',
image=cfg.feedback_expScripts_dir+'carchair_exp_feedback/{}_{}_{}.png'.format(axis,currMorph,5), mask=None,
ori=0, pos=(0, 0), size=(0.5, 0.5),
color=[1,1,1], colorSpace='rgb', opacity=1,
flipHoriz=False, flipVert=False,
texRes=128, interpolate=True, depth=-4.0)
tmp_images.append(currImage)
last_image=cfg.feedback_expScripts_dir+'carchair_exp_feedback/{}_{}_{}.png'.format(axis,currMorph,5)
images=images+sample(tmp_images)
imageLists.update( {currParameter : images} )
end = time.time()
print("preload image duration=", end - start)
return imageLists
imageLists=preloadimages(parameterRange=parameterRange,tune=tune)
# Open data file for eye tracking
# datadir = "./data/feedback/"
datadir = main_dir + f"subjects/{sub}/ses{sess}/feedback/"
maxTR=int(trial_list['TR'].iloc[-1])+6
# Settings for MRI sequence
MR_settings = {'TR': TRduration, 'volumes': maxTR, 'sync': 5, 'skip': 0, 'sound': True} #{'TR': 2.000, 'volumes': maxTR, 'sync': 5, 'skip': 0, 'sound': True}
# check if there is a data directory and if there isn't, make one.
if not os.path.exists('./data'):
os.mkdir('./data')
if not os.path.exists('./data/feedback/'):
os.mkdir('./data/feedback/')
# check if data for this subject and run already exist, and raise an error if they do (prevent overwriting)
newfile = datadir+"{}_{}.csv".format(str(sub), str(run))
if os.path.exists(newfile):
raise Exception(f'{newfile} exists')
# create empty dataframe to accumulate data
data = pd.DataFrame(columns=['Sub', 'Run', 'TR', 'time'])
# Create the fixation dot, and initialize as white fill.
fix = visual.Circle(mywin, units='deg', radius=0.05, pos=(0, 0), fillColor='white',
lineColor='black', lineWidth=0.5, opacity=0.5, edges=128)
# start global clock and fMRI pulses (start simulated or wait for real)
print('Starting sub {} in run #{}'.format(sub, run))
vol = launchScan(mywin, MR_settings, simResponses=None, mode=scanmode,
esc_key='escape', instr='select Scan or Test, press enter',
wait_msg='waiting for scanner...', wait_timeout=300, log=True)
image = visual.ImageStim(
win=mywin,
name='image',
image=cfg.feedback_expScripts_dir + './carchair_exp_feedback/bedChair_1_5.png', mask=None,
ori=0, pos=(0, 0), size=(0.5, 0.5),
color=[1,1,1], colorSpace='rgb', opacity=1,
flipHoriz=False, flipVert=False,
texRes=128, interpolate=True, depth=-4.0)
backgroundImage = visual.ImageStim(
win=mywin,
name='image',
image=cfg.feedback_expScripts_dir+'./carchair_exp_feedback/greyBackground.png', mask=None,
ori=0, pos=(0, 0), size=(0.5, 0.5),
color=[1,1,1], colorSpace='rgb', opacity=1,
flipHoriz=False, flipVert=False,
texRes=128, interpolate=True, depth=-4.0)
# trialClock is reset in each trial to change image every TR (2s), time for each image is 2/numOfImages
trialClock = core.Clock()
# trialClock.add(10) # initialize as a big enough number to avoid text being shown at the first time.
TR=list(trial_list['TR'])
states=list(trial_list['state'])
newWobble=list(trial_list['newWobble'])
# parameters=np.round(np.random.uniform(0,10,sum((trial_list['newWobble']==1)*1)))
# parameters = np.arange(1,1+sum((trial_list['newWobble']==1)*1)) #[1,2,3,4,5,6,7,8]
ParameterUpdateDuration=np.diff(np.where(trial_list['newWobble']==1))[0][0]*TRduration
curr_parameter=0
remainImageNumber=[]
# feedbackParameterFileName=main_dir+f"subjects/{sub}/ses{sess}_feedbackParameter/run_{run}.csv"
# # While the running clock is less than the total time, monitor for 5s, which is what the scanner sends for each TR
# _=1
# while not os.path.exists(feedbackParameterFileName):
# keys = event.getKeys(["5","0"])
# if '0' in keys: # whenever you want to quite, type 0
# mywin.close()
# core.quit()
# time.sleep(0.01)
# if _ % 100==0:
# print(f'waiting {feedbackParameterFileName}')
# _+=1
# parameters=pd.read_csv(feedbackParameterFileName)
# while np.isnan(parameters['value'].iloc[-1]):
# keys = event.getKeys(["5","0"])
# if '0' in keys: # whenever you want to quite, type 0
# mywin.close()
# core.quit()
# time.sleep(0.01)
# if _ % 100==0:
# print(f'waiting parameters nan')
# _+=1
# parameters=pd.read_csv(feedbackParameterFileName)
# from rtCommon.feedbackReceiver import WsFeedbackReceiver
# WsFeedbackReceiver.startReceiverThread(args.server,
# retryInterval=5,
# username="kp578",
# password="<PASSWORD>",
# testMode=True)
installLoggers(logging.INFO, logging.INFO, filename=f'{cfg.feedback_dir}SubjectService_{run}_{sess}.log')
# parse connection args
# These include: "-s <server>", "-u <username>", "-p <password>", "--test",
# "-i <retry-connection-interval>"
# connectionArgs = parseConnectionArgs()
subjectService = SubjectService(args)
subjectService.runDetached()
# +
default_parameter=19
# curr_parameter=len(parameters['value'])-1
while len(TR)>1: #globalClock.getTime() <= (MR_settings['volumes'] * MR_settings['TR']) + 3:
trialTime = trialClock.getTime()
keys = event.getKeys(["5","0"]) # check for triggers
if '0' in keys: # whenever you want to quite, type 0
mywin.close()
core.quit()
if len(keys):
TR.pop(0)
states.pop(0)
newWobble.pop(0)
print(states[0])
if states[0] == 'feedback' and newWobble[0]==1:
# fetch parameter from preprocessing process on Milgram
# feedbackMsg = WsFeedbackReceiver.msgQueue.get(block=True, timeout=None)
feedbackMsg = subjectService.subjectInterface.msgQueue.get(block=True, timeout=None)
runId,trID,value,timestamp=feedbackMsg.get('runId'),feedbackMsg.get('trId'),feedbackMsg.get('value'),feedbackMsg.get('timestamp')
if value==None:
parameter = default_parameter
else:
parameter = value
# print('feedbackParameterFileName=',feedbackParameterFileName)
# parameters=pd.read_csv(feedbackParameterFileName)
# if curr_parameter>(len(parameters['value'])-1):
# curr_parameter=curr_parameter-1
# curr_parameter=(len(parameters['value'])-1)
# parameter=parameters['value'].iloc[curr_parameter]
# print('curr_parameter=',curr_parameter)
# print('parameter=',parameter)
print(f'TR[0]={TR[0]},trID={trID},parameter={parameter},timestamp={timestamp},runId={runId}')
# curr_parameter=curr_parameter+1
# start new clock for current updating duration (the duration in which only a single parameter is used, which can be 1 TR or a few TRs, the begining of the updateDuration is indicated by the table['newWobble'])
trialClock=core.Clock()
trialTime=trialClock.getTime()
# update the image list to be shown based on the fetched parameter
imagePaths=imageLists[parameter-10] #list(imageLists[parameter])
# calculated how long each image should last.
eachTime=ParameterUpdateDuration/len(imagePaths)
# update the image
# image.image=imagePaths[0]
image.setAutoDraw(False)
imagePaths[0].setAutoDraw(True)
# currImage*eachTime is used in the calculation of the start time of next image in the list.
# save when the image is presented and which image is presented.
data = data.append({'Sub': sub,
'Run': run,
'TR': TR[0],
'time': trialTime,
'imageTime':imagePaths[0].image,
'eachTime':eachTime},
ignore_index=True)
oldMorphParameter=re.findall(r"_\w+_",imagePaths[0].image)[1]
# print('curr morph=',oldMorphParameter)
remainImageNumber.append(0)
currImage=1
# # discard the first image since it has been used.
# imagePaths.pop(0)
if (states[0] == 'feedback') and (trialTime>currImage*eachTime):
try: # sometimes the trialTime accidentally surpasses the maximum time, in this case just do nothing, pass
imagePaths[currImage-1].setAutoDraw(False)
imagePaths[currImage].setAutoDraw(True)
# print('currImage=',imagePaths[currImage],end='\n\n')
remainImageNumber.append(currImage)
# write the data!
data = data.append({'Sub': sub,
'Run': run,
'TR': TR[0],
'time': trialTime,
'imageTime':imagePaths[currImage].image,
'eachTime':eachTime},
ignore_index=True)
currMorphParameter=re.findall(r"_\w+_",imagePaths[currImage].image)[1]
if currMorphParameter!=oldMorphParameter:
pass
# print('curr morph=',currMorphParameter)
oldMorphParameter=currMorphParameter
currImage=currImage+1
except:
pass
elif states[0] == 'ITI':
backgroundImage.setAutoDraw(True)
fix.draw()
elif states[0] == 'waiting':
backgroundImage.setAutoDraw(False)
image.setAutoDraw(True)
# refresh the screen
mywin.flip()
# write data out!
data.to_csv(newfile)
mywin.close()
core.quit()
# ##############################################################################
# ##############################################################################
# ##############################################################################
# ####################### simmulated data forparameters#########################
# ##############################################################################
# ##############################################################################
# ##############################################################################
# parameters = pd.DataFrame(columns=['runId','trId','value','timestamp'])
# main_dir="/Volumes/GoogleDrive/My Drive/Turk_Browne_Lab/rtcloud_kp/"
# parameterWriteFolder=main_dir+'./subjects/pilot_sub001/ses1_feedbackParameter/'
# if not os.path.isdir(parameterWriteFolder):
# os.mkdir(parameterWriteFolder)
# for i in range(20):
# runId=1
# trId=int(np.random.uniform(1,20,1))
# value=int(np.random.uniform(1,20,1))
# timestamp=int(np.random.uniform(0,10,1))
# parameterFileName=f"{parameterWriteFolder}run_{runId}.csv"
# print("Dequeue run: {}, tr: {}, value: {}, timestamp: {}".
# format(runId,trId,value,timestamp))
# parameters = parameters.append({'runId':runId,
# 'trId':trId,
# 'value':value,
# 'timestamp':timestamp},
# ignore_index=True)
# print('parameters=',parameters)
# parameters.to_csv(parameterFileName)
# -
| archive/test feedback1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:miniconda3-python-tutorial]
# language: python
# name: conda-env-miniconda3-python-tutorial-python3_myenv
# ---
# ## Figure 6 (Journal of Climate submission; Molina et al.)
# Fig. 6. Annual cycle of area weighted averages of monthly SSTs ($^{\circ}$C) across the same regions indicated in Fig. 5. The CESM1 control (800-year mean), OISST (1982-2020 mean), Global and Pacific experiments (300-year mean), Pacific Salt experiment (150-year mean), and present day observations (OISST; 40-year mean) are indicated in the legend.
# **Figure by: <NAME>, NCAR**
# +
# imports
import xarray as xr
import numpy as np
import matplotlib.pyplot as plt
import cftime
import cartopy
import cartopy.crs as ccrs
from cartopy.util import add_cyclic_point
from climatico.util import weighted_mean, pacific_lon
from datetime import timedelta
# -
from config import directory_figs, directory_data
# +
#list of filenames to do this for
file_g02sv = 'b1d.e11.B1850LENS.f09_g16.FWAtSalG02Sv.pop.h.SST.*.nc'
file_g04sv = 'b1d.e11.B1850LENS.f09_g16.FWAtSalG04Sv.pop.h.SST.*.nc'
file_p02sv = 'b1d.e11.B1850LENS.f09_g16.FWAtSalP02Sv.pop.h.SST.*.nc'
file_p04sv = 'b1d.e11.B1850LENS.f09_g16.FWAtSalP04Sv.pop.h.SST.*.nc'
file_psalt = 'b1d.e11.B1850LENS.f09_g16.FWPaSalP04Sv.pop.h.SST.*.nc'
file_cntrl = 'b1d.e11.B1850C5CN.f09_g16.005.pop.h.SST.*.nc'
obs_oissts = '/gpfs/fs1/collections/rda/data/ds277.7/avhrr_v2.1/*/oisst-avhrr-v02r01.*.nc'
# -
def grab_weighted_avg(ds_cntrl, ds_g02sv, ds_g04sv, ds_p02sv, ds_p04sv, ds_psalt, ds_oisst = None, obs = False,
lon1 = 170.5, lon2 = -150.5, lat1 = 30.5, lat2 = 40.5):
"""
Region to take weighted mean for.
"""
if obs == False:
try:
assert ds_oisst == None
print("passed, no ds_oisst")
except ValueError:
print("ds_oisst not None, set obs to True")
if obs == True:
try:
assert ds_oisst == None
print("please insert ds_oisst")
except ValueError:
print("passed, ds_oisst data input")
ds_cntrl_box = weighted_mean(ds_cntrl.sel(
lon=slice(pacific_lon(lon1, to180=False),
pacific_lon(lon2, to180=False)),
lat=slice(lat1,lat2)),
lat_name='lat')
ds_g02sv_box = weighted_mean(ds_g02sv.sel(
lon=slice(pacific_lon(lon1, to180=False),
pacific_lon(lon2, to180=False)),
lat=slice(lat1,lat2)),
lat_name='lat')
ds_g04sv_box = weighted_mean(ds_g04sv.sel(
lon=slice(pacific_lon(lon1, to180=False),
pacific_lon(lon2, to180=False)),
lat=slice(lat1,lat2)),
lat_name='lat')
ds_p02sv_box = weighted_mean(ds_p02sv.sel(
lon=slice(pacific_lon(lon1, to180=False),
pacific_lon(lon2, to180=False)),
lat=slice(lat1,lat2)),
lat_name='lat')
ds_p04sv_box = weighted_mean(ds_p04sv.sel(
lon=slice(pacific_lon(lon1, to180=False),
pacific_lon(lon2, to180=False)),
lat=slice(lat1,lat2)),
lat_name='lat')
ds_psalt_box = weighted_mean(ds_psalt.sel(
lon=slice(pacific_lon(lon1, to180=False),
pacific_lon(lon2, to180=False)),
lat=slice(lat1,lat2)),
lat_name='lat')
if obs:
ds_oisst_box = weighted_mean(ds_oisst.sel(
lon=slice(pacific_lon(lon1, to180=False),
pacific_lon(lon2, to180=False)),
lat=slice(lat1,lat2)),
lat_name='lat')
if not obs:
return ds_cntrl_box.values, ds_g02sv_box.values, ds_g04sv_box.values, ds_p02sv_box.values, ds_p04sv_box.values, ds_psalt_box.values
if obs:
return ds_cntrl_box.values, ds_g02sv_box.values, ds_g04sv_box.values, ds_p02sv_box.values, ds_p04sv_box.values, ds_psalt_box.values, ds_oisst_box.values
# +
# %%capture
ds_g02sv = xr.open_mfdataset(f'{directory_data}{file_g02sv}', combine='by_coords')
ds_g02sv = ds_g02sv.assign_coords(time=ds_g02sv.coords['time'] - timedelta(days=17))
lat = ds_g02sv['lat'].values
lon = ds_g02sv['lon'].values
ds_g02sv = ds_g02sv.isel(z_t=0)['SST'].sel(time=slice(cftime.DatetimeNoLeap(
201, 1, 1, 0, 0),cftime.DatetimeNoLeap(501, 1, 1, 0, 0))).groupby('time.month').mean(skipna=True)
ds_g04sv = xr.open_mfdataset(f'{directory_data}{file_g04sv}', combine='by_coords')
ds_g04sv = ds_g04sv.assign_coords(time=ds_g04sv.coords['time'] - timedelta(days=17))
ds_g04sv = ds_g04sv.isel(z_t=0)['SST'].sel(time=slice(cftime.DatetimeNoLeap(
201, 1, 1, 0, 0),cftime.DatetimeNoLeap(501, 1, 1, 0, 0))).groupby('time.month').mean(skipna=True)
ds_p02sv = xr.open_mfdataset(f'{directory_data}{file_p02sv}', combine='by_coords')
ds_p02sv = ds_p02sv.assign_coords(time=ds_p02sv.coords['time'] - timedelta(days=17))
ds_p02sv = ds_p02sv.isel(z_t=0)['SST'].sel(time=slice(cftime.DatetimeNoLeap(
201, 1, 1, 0, 0),cftime.DatetimeNoLeap(501, 1, 1, 0, 0))).groupby('time.month').mean(skipna=True)
ds_p04sv = xr.open_mfdataset(f'{directory_data}{file_p04sv}', combine='by_coords')
ds_p04sv = ds_p04sv.assign_coords(time=ds_p04sv.coords['time'] - timedelta(days=17))
ds_p04sv = ds_p04sv.isel(z_t=0)['SST'].sel(time=slice(cftime.DatetimeNoLeap(
201, 1, 1, 0, 0),cftime.DatetimeNoLeap(501, 1, 1, 0, 0))).groupby('time.month').mean(skipna=True)
ds_psalt = xr.open_mfdataset(f'{directory_data}{file_psalt}', combine='by_coords')
ds_psalt = ds_psalt.assign_coords(time=ds_psalt.coords['time'] - timedelta(days=17))
ds_psalt = ds_psalt.isel(z_t=0)['SST'].sel(time=slice(cftime.DatetimeNoLeap(
101, 1, 1, 0, 0),cftime.DatetimeNoLeap(251, 1, 1, 0, 0))).groupby('time.month').mean(skipna=True)
# +
# %%capture
ds_cntrl = xr.open_mfdataset(f'{directory_data}{file_cntrl}', combine='by_coords')
ds_cntrl = ds_cntrl.assign_coords(time=ds_cntrl.coords['time'] - timedelta(days=17))
ds_cntrl = ds_cntrl.isel(z_t=0)['SST'].sel(time=slice(cftime.DatetimeNoLeap(
800, 1, 1, 0, 0),cftime.DatetimeNoLeap(1600, 1, 1, 0, 0))).groupby('time.month').mean(skipna=True)
# +
# %%capture
ds_oisst = xr.open_mfdataset(f'{obs_oissts}', combine='by_coords')
ds_oisst = ds_oisst.isel(zlev=0)['sst'].sel(time=slice('1982-01-01','2020-12-31')).resample(time='MS').mean(
skipna=True).groupby('time.month').mean(skipna=True)
# +
# %%capture
# north pacific
ds_cntrl_box1_, ds_g02sv_box1_, ds_g04sv_box1_, ds_p02sv_box1_, ds_p04sv_box1_, ds_psalt_box1_, ds_oisst_box1_ = grab_weighted_avg(
ds_cntrl, ds_g02sv, ds_g04sv, ds_p02sv, ds_p04sv, ds_psalt, ds_oisst, obs = True, lon1 = 170.5, lon2 = -150.5, lat1 = 30.5, lat2 = 40.5)
# north atlantic
ds_cntrl_box2_, ds_g02sv_box2_, ds_g04sv_box2_, ds_p02sv_box2_, ds_p04sv_box2_, ds_psalt_box2_, ds_oisst_box2_ = grab_weighted_avg(
ds_cntrl, ds_g02sv, ds_g04sv, ds_p02sv, ds_p04sv, ds_psalt, ds_oisst, obs = True, lon1 = -50.5, lon2 = -20.5, lat1 = 30.5, lat2 = 40.5)
# equatorial pacific
ds_cntrl_box3_, ds_g02sv_box3_, ds_g04sv_box3_, ds_p02sv_box3_, ds_p04sv_box3_, ds_psalt_box3_, ds_oisst_box3_ = grab_weighted_avg(
ds_cntrl, ds_g02sv, ds_g04sv, ds_p02sv, ds_p04sv, ds_psalt, ds_oisst, obs = True, lon1 = -170.5, lon2 = -120.5, lat1 = -10.5, lat2 = 10.5)
# south pacific
ds_cntrl_box4_, ds_g02sv_box4_, ds_g04sv_box4_, ds_p02sv_box4_, ds_p04sv_box4_, ds_psalt_box4_, ds_oisst_box4_ = grab_weighted_avg(
ds_cntrl, ds_g02sv, ds_g04sv, ds_p02sv, ds_p04sv, ds_psalt, ds_oisst, obs = True, lon1 = -160.5, lon2 = -110.5, lat1 = -40.5, lat2 = -30.5)
# south atlantic
ds_cntrl_box5_, ds_g02sv_box5_, ds_g04sv_box5_, ds_p02sv_box5_, ds_p04sv_box5_, ds_psalt_box5_, ds_oisst_box5_ = grab_weighted_avg(
ds_cntrl, ds_g02sv, ds_g04sv, ds_p02sv, ds_p04sv, ds_psalt, ds_oisst, obs = True, lon1 = -30.5, lon2 = -0.5, lat1 = -40.5, lat2 = -30.5)
# equatorial atlantic
ds_cntrl_box6_, ds_g02sv_box6_, ds_g04sv_box6_, ds_p02sv_box6_, ds_p04sv_box6_, ds_psalt_box6_, ds_oisst_box6_ = grab_weighted_avg(
ds_cntrl, ds_g02sv, ds_g04sv, ds_p02sv, ds_p04sv, ds_psalt, ds_oisst, obs = True, lon1 = -20.5, lon2 = -0.5, lat1 = -3.5, lat2 = 3.5)
# +
# -------------- create figure
a = u"\u00b0"
vanom1=-10
vanom2=10
fig = plt.figure(figsize=(8,7))
fig.suptitle('SST Annual Cycle', fontsize=12, x=0.475, y=1.0)
# --------------
ax1 = plt.axes([0., 0.66, 0.475, 0.25])
ax1.set_title(f'a) Subtropical North Pacific \n(30.5'+a+'N, 170.5'+a+'E; 40.5'+a+'N, 150.5'+a+'W)', fontsize=12)
ax1.plot(np.arange(0,12), ds_cntrl_box1_, c='k', lw=3.5)
ax1.plot(np.arange(0,12), ds_g02sv_box1_, c='b', lw=2)
ax1.plot(np.arange(0,12), ds_g04sv_box1_, c='navy', ls='--', lw=2)
ax1.plot(np.arange(0,12), ds_p02sv_box1_, c='red', ls='-', lw=1.75)
ax1.plot(np.arange(0,12), ds_p04sv_box1_, c='darkred', ls='--', lw=2)
ax1.plot(np.arange(0,12), ds_psalt_box1_, c='goldenrod', ls='-', lw=1.75)
ax1.plot(np.arange(0,12), ds_oisst_box1_, c='limegreen', ls='-', lw=1.75)
ax1.margins(x=0)
ax1.set_ylim([9.,25])
ax1.tick_params(axis='both', labelleft=True, direction='in', labelbottom=False)
ax1.set_xticks([1,3,5,7,9,11])
#ax1.set_xticklabels(['Feb','Apr','Jun','Aug','Oct','Dec'], fontsize=12)
ax1.set_yticks([10,14,18,22])
ax1.set_yticklabels([10,14,18,22], fontsize=12)
#ax1.set_xlabel('Month', fontsize=12)
ax1.grid(alpha=0.4, ls='--')
ax1.set_ylabel('Temperature ('+a+'C)', fontsize=12)
# --------------
ax2 = plt.axes([0.5, 0.66, 0.475, 0.25])
ax2.set_title('b) Subtropical North Atlantic \n(30.5'+a+'N, 50.5'+a+'W; 40.5'+a+'N, 20.5'+a+'W)', fontsize=12)
ax2.plot(np.arange(0,12), ds_cntrl_box2_, c='k', lw=3.5)
ax2.plot(np.arange(0,12), ds_g02sv_box2_, c='b', lw=2)
ax2.plot(np.arange(0,12), ds_g04sv_box2_, c='navy', ls='--', lw=2)
ax2.plot(np.arange(0,12), ds_p02sv_box2_, c='red', ls='-', lw=1.75)
ax2.plot(np.arange(0,12), ds_p04sv_box2_, c='darkred', ls='--', lw=2)
ax2.plot(np.arange(0,12), ds_psalt_box2_, c='goldenrod', ls='-', lw=1.75)
ax2.plot(np.arange(0,12), ds_oisst_box2_, c='limegreen', ls='-', lw=1.75)
ax2.margins(x=0)
ax2.set_ylim([9.,25])
ax2.tick_params(axis='both', labelleft=True, direction='in', labelbottom=False)
ax2.set_xticks([1,3,5,7,9,11])
#ax2.set_xticklabels(['Feb','Apr','Jun','Aug','Oct','Dec'], fontsize=12)
ax2.set_yticks([10,14,18,22])
ax2.set_yticklabels([10,14,18,22], fontsize=12)
#ax2.set_xlabel('Month', fontsize=12)
ax2.grid(alpha=0.4, ls='--')
ax2.set_ylabel('Temperature ('+a+'C)', fontsize=12)
ax2.yaxis.set_label_position("right")
ax2.yaxis.tick_right()
# --------------
ax3 = plt.axes([0., 0.33, 0.475, 0.25])
ax3.set_title('c) Tropical Pacific \n (10.5'+a+'S, 170.5'+a+'W; 10.5'+a+'N, 120.5'+a+'W)', fontsize=12)
ax3.plot(np.arange(0,12), ds_cntrl_box3_, c='k', lw=3.5)
ax3.plot(np.arange(0,12), ds_g02sv_box3_, c='b', lw=2)
ax3.plot(np.arange(0,12), ds_g04sv_box3_, c='navy', ls='--', lw=2)
ax3.plot(np.arange(0,12), ds_p02sv_box3_, c='red', ls='-', lw=1.75)
ax3.plot(np.arange(0,12), ds_p04sv_box3_, c='darkred', ls='--', lw=2)
ax3.plot(np.arange(0,12), ds_psalt_box3_, c='goldenrod', ls='-', lw=1.75)
ax3.plot(np.arange(0,12), ds_oisst_box3_, c='limegreen', ls='-', lw=1.75)
ax3.margins(x=0)
ax3.set_ylim([25.5,28.5])
ax3.tick_params(axis='both', labelleft=True, direction='in', labelbottom=False)
ax3.set_xticks([1,3,5,7,9,11])
#ax3.set_xticklabels(['Feb','Apr','Jun','Aug','Oct','Dec'], fontsize=12)
ax3.set_yticks([26,27,28])
ax3.set_yticklabels([26,27,28], fontsize=12)
#ax3.set_xlabel('Month', fontsize=12)
ax3.grid(alpha=0.4, ls='--')
ax3.set_ylabel('Temperature ('+a+'C)', fontsize=12)
# --------------
ax4 = plt.axes([0.5, 0.33, 0.475, 0.25])
ax4.set_title('d) Tropical Atlantic \n (3.5'+a+'S, 20.5'+a+'W; 3.5'+a+'N, 0.5'+a+'W)', fontsize=12)
ax4.plot(np.arange(0,12), ds_cntrl_box6_, c='k', lw=3.5)
ax4.plot(np.arange(0,12), ds_g02sv_box6_, c='b', lw=2)
ax4.plot(np.arange(0,12), ds_g04sv_box6_, c='navy', ls='--', lw=2)
ax4.plot(np.arange(0,12), ds_p02sv_box6_, c='red', ls='-', lw=1.75)
ax4.plot(np.arange(0,12), ds_p04sv_box6_, c='darkred', ls='--', lw=2)
ax4.plot(np.arange(0,12), ds_psalt_box6_, c='goldenrod', ls='-', lw=1.75)
ax4.plot(np.arange(0,12), ds_oisst_box6_, c='limegreen', ls='-', lw=1.75)
ax4.margins(x=0)
ax4.set_ylim([23.,29.])
ax4.tick_params(axis='both', labelleft=True, direction='in', labelbottom=False)
ax4.set_xticks([1,3,5,7,9,11])
#ax4.set_xticklabels(['Feb','Apr','Jun','Aug','Oct','Dec'], fontsize=12)
ax4.set_yticks([24,26,28])
ax4.set_yticklabels([24,26,28], fontsize=12)
#ax4.set_xlabel('Month', fontsize=12)
ax4.grid(alpha=0.4, ls='--')
ax4.set_ylabel('Temperature ('+a+'C)', fontsize=12)
ax4.yaxis.set_label_position("right")
ax4.yaxis.tick_right()
# --------------
ax5 = plt.axes([0., 0., 0.475, 0.25])
ax5.set_title('e) Subtropical South Pacific \n(40.5'+a+'S, 160.5'+a+'W; 30.5'+a+'S, 110.5'+a+'W)', fontsize=12)
ax5.plot(np.arange(0,12), ds_cntrl_box4_, c='k', lw=3.5)
ax5.plot(np.arange(0,12), ds_g02sv_box4_, c='b', lw=2)
ax5.plot(np.arange(0,12), ds_g04sv_box4_, c='navy', ls='--', lw=2)
ax5.plot(np.arange(0,12), ds_p02sv_box4_, c='red', ls='-', lw=1.75)
ax5.plot(np.arange(0,12), ds_p04sv_box4_, c='darkred', ls='--', lw=2)
ax5.plot(np.arange(0,12), ds_psalt_box4_, c='goldenrod', ls='-', lw=1.75)
ax5.plot(np.arange(0,12), ds_oisst_box4_, c='limegreen', ls='-', lw=1.75)
ax5.margins(x=0)
ax5.set_ylim([14.,22.])
ax5.tick_params(axis='both', labelleft=True, direction='in', labelbottom=True)
ax5.set_xticks([1,3,5,7,9,11])
ax5.set_xticklabels(['Feb','Apr','Jun','Aug','Oct','Dec'], fontsize=12)
ax5.set_yticks([15,17,19,21])
ax5.set_yticklabels([15,17,19,21], fontsize=12)
ax5.set_xlabel('Month', fontsize=12)
ax5.grid(alpha=0.4, ls='--')
ax5.set_ylabel('Temperature ('+a+'C)', fontsize=12)
# --------------
ax6 = plt.axes([0.5, 0., 0.475, 0.25])
ax6.set_title('f) Subtropical South Atlantic \n(40.5'+a+'S, 30.5'+a+'W; 30.5'+a+'S, 0.5'+a+'W)', fontsize=12)
l1, = ax6.plot(np.arange(0,12), ds_cntrl_box5_, c='k', lw=3.5)
l2, = ax6.plot(np.arange(0,12), ds_g02sv_box5_, c='b', lw=2)
l3, = ax6.plot(np.arange(0,12), ds_g04sv_box5_, c='navy', ls='--', lw=2)
l4, = ax6.plot(np.arange(0,12), ds_p02sv_box5_, c='red', ls='-', lw=1.75)
l5, = ax6.plot(np.arange(0,12), ds_p04sv_box5_, c='darkred', ls='--', lw=2)
l6, = ax6.plot(np.arange(0,12), ds_psalt_box5_, c='goldenrod', ls='-', lw=1.75)
l0, = ax6.plot(np.arange(0,12), ds_oisst_box5_, c='limegreen', ls='-', lw=1.75)
ax6.margins(x=0)
ax6.set_ylim([14.,22.])
ax6.tick_params(axis='both', labelleft=True, direction='in', labelbottom=True)
ax6.set_xticks([1,3,5,7,9,11])
ax6.set_xticklabels(['Feb','Apr','Jun','Aug','Oct','Dec'], fontsize=12)
ax6.set_yticks([15,17,19,21])
ax6.set_yticklabels([15,17,19,21], fontsize=12)
ax6.set_xlabel('Month', fontsize=12)
ax6.grid(alpha=0.4, ls='--')
ax6.set_ylabel('Temperature ('+a+'C)', fontsize=12)
ax6.yaxis.set_label_position("right")
ax6.yaxis.tick_right()
# --------------
ax5.legend([l1,
l2,l3,l4,l5,l6,l0],
['CESM1 Control',
'0.2 Sv Global','0.4 Sv Global','0.2 Sv Pacific','0.4 Sv Pacific','Pacific Salt','OISST'],
bbox_to_anchor=(1.95, -0.675), ncol=4, loc='lower right', fontsize=12)
plt.savefig(f'{directory_figs}amocenso_fig6.png', bbox_inches='tight', dpi=200)
plt.savefig(f'{directory_figs}amocenso_fig6.pdf', bbox_inches='tight', dpi=500)
plt.show()
# --------------
| notebooks/figures/fig6_annualcyclebymonth.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# <img src="data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAPwAAADICAMAAAD7nnzuAAABUFBMVEVi3FP///9e2077+/v19fXm5uaysbKq6qPT09OLwoTAwMBh2lJf3k/y8fLq5+qK2YHa4Nmx26xe1FBd0U9bzU1VwEhY2kfX5NXE8L9NrkE7iDGqxqbo+eZ<KEY>2M8jzFVnkxHoTyXu5Pd6txGij5DmTdYnFDw++42kSl5226a5pLNzc0xgCc0iCni+ODY9dTG2cTo8Oe37bGk6Zxq3lzN8smU5Yue3peH4H1RzEG61rd+0XSItYO+47qv0qtmzFlTkEw7my5z3Ge2zrRcvFCqzKao26MwiiO97ribzJV0uGwcfgdfk1kkehdwqGlyvWrI2sePuItxzWaN0oWm1KJzyWpuqWZEtTVcrFNWsEs1pSWIroQIbgB/xHeMx4Z8yHR6pnac1ZaExH3F0MS3yLW6tLuqt6iiup+9b4owAAAZE0lEQVR4nO2d6UPazNbAIbG1bezja0jAgEkQogiIigUERCWIS0DcnmpRW5f2ttXe2/b///bOTGaygS2bWCznQyvDZJjfLGdmknNyXO6/WFyPXYHHlN/BT81M5kcHUfKHayuLXcAra6uVUMHlogZRXK5CqFLMZzqCX5xZ93pRIQMroPZj3sLolNIm/FQ+6R1kbot4XctrdBvw9GFyoLvcIVRhfaVl+JWk6wmhQ6EKq82UXyO8kn8qA94q3lAT1dcAP7X8BNFdsPMPGxSfE34q+TTZAb1r1Kn37PD01FOb7VYZW6d/AU9nKk+YHUz8VeVeeHqq+KTZgeSV++BH1p86u8tl2+9Y4OlV72NX7eGFytDN4OmZsceuWR+ESip0Izy9+OQnPBIq3ww+/9jV6o9QFXPgE3g6E/orOh6s9qvGwMfw9MjoX6DtdCmsOOFzhceuU99kbHWEtsLTdP6v6Xggih3+deWxK9RH8R7irnfpM37lsSvUT6GSr63wr/+SdQ5LIa53PYb/C3b1Vpl4bcKP+J7sLYymQq2a8PTIVuix69NXoYrPRgz41xO9LNnx4OQPFCrpQ5Neh9/v3SpPhZKmVCoVlqEYppXr+thaVCX7msCPPNvo3WnWu2ael93KYmYtP86pDPPbBlhfXl5eXV5Nsi01VZcS2iLw9Mj/XfQQftLtEGXlTvKwv0EK4YcKhxL3u6w9kIIVfrx3o60RHt4cPFc97K+uogj8hBiWHr7vmYl/+gYPfuMozP1qQBvwtaUg//Bd3194SC9ZoHTVZraGAR9IR3mPBR5n7Fn1sEz8A9U9hH/dB3i3cgzoYQaK8npDyWIxWWG9FFaElJch8P9qV6qRCjImScae1RDKA8PT2Wo1G7fQxzUR0lOF5ZlFRZeVDUmFk2Esr5CHqDRMPwQaggEZ10jGzMYvJ0378sDwytuT9MHS0mXWeEwQ08IeJjRjHw93MsDyHtpHydYVz4XWbEn0vtRDfOah4Wf9iaAgCsIRuWnkmxYAgPM5+URY9jjhS5ogeaYcGbdAM/Wqkg8PHwmGZU6SriZw7XOnQd7jXXMw0e9FWXXAV9NBXm3QHRNAE/ao7/sAL8gsw7KeHWITElsSJWrdyRQ/FhvhozxbdGbMvevZItgPeKTeGYagBVBzAIq4r5715QjUmXa1v0hmg5ID+i0Aet6jNmSsidygwbuoUdKhfsBE5fePpw8ODpYWSmRARMTz8QusF7Ozt6enC6CVPOr+e5hxaWmuhLVGLSj3qOv7CL++SODhBoblwoImCIJ4g7s0ANpE3SHr/AnQk0EB7ge5KyEI84k3eMUsJcKegYMnJgEAHtSe8px/uNi42DyX8aqX9QfDnAGfSghhXoa7QWYHZ+S39O/qc70a9/2DH8uTOZ8C8KFVov9W8GLmm4+GOXN7mwhzYIsD9zgzuNEy2JAqviBKgwbPkNUtloryFfsmR4dPiBZ4ODoAe6hxmxxfwCV2LX2Dp4q4A5VbMLmdy3xzeHDZYWPGgYOnqMqiAekX9vHf9fLC6Vn8Hngw36lVclF54ZJkBPDygMC/PRAk1VsZNbapgVRExKorm0pFEtpRE/iqDk86vp5KJ4Lay4GDj5W/bBzOmEafuXl/Yg9/nE1FouLVVhN434F25QkxLNaKt2mg/MkGeXDg3TQQ64y9TUWE80WDSZQkQ9tDeNJK8e2tqbzK4e/KSwLPcYa2Hxh4h8RSYJXfwbred3m9t0mWPATPWjXhxBWPM+Yu93aK5KtBhacDYNCLEku+zGVXjHM+gmfyltw1TSBHQWXF9JQZUHjlk9+fAPt1armJy4cOv275JnAQ/Nwk40DCK9VdoNwBO+tiLMt33GeFZyzXgVNdeN/8mPMNJDytKDlfbP4kkgiKEtq7sMTqnc768bEursOHJkln5z6BI628nyMZ52sG/B++yaGWDye2A4EqlEDs7HYhnYboPL5xzahft0u+eL165k+XS9slkDMGNCE4sDDqxVap7stWA5dwinBX77ZL9Xg9G/MfvNyuwQJj0+IfDu+i1CtBWyKSiEbBqZQ3nlcxDMcL13NzkUgiKlx91GDOqIBu1bMcf3M9Nx1ZSiTgkdYji9HrhTnwMSiKGsyoBcN/+KkOlMzJYVN4cD7lWMtNCNYj8aIIWiQsSxLPwyz41iT8JiyCA3wY3tJl9E96PplHZfXqOd4DwrOsxyIs67jnzLAejpMkeHDFOUkG/RuO0xPgt3o+lpT4p9/DQ2XbpOFhE0yELQK/wlkariSf9HyMPb37Cj4gfA/lYZ7YDgj8w8gQfgg/hH/s6vRXhvBD+CH8Y1envzKEH8IP4R+7Ov2VIfwQfgj/2NXprwzhh/Adw7fqEVQIIfmD/LS7hae8hQq0hQ9Rv3uVViG/MgNkZbLSB9eh1qQ7eMq7vraSmZqaymRm8sVf4lMh/CB+6rxHJoTdS1fwY5U1yyNkJbPM3l+ACf9B7pnZdJfSFXyDndjU+L2uABTxL4gf98xwuFvpAt6bd7KDQpbvfZREej4HnzD/GfSdwxtWhVZR3kn3zehQfqaUBRKYewrwjRakbndZM56d4+XPWAUZLhyM+P3QMAXDY3cx6zppetKa/1h/E6dS9kTkdNaB/23H8FQSm4kp1dvbs2ocDYNqOiHK2HOssp6fXFs7HC3iN+8wxY2XZSCn5PH6GMyxMjOZXzfeQBda1iVEMcnRyZnJ0XXra3soV3I5D1Lzq8WCkUwViiDnytrhapKh2hxQncMTo6JqKpVOH/hvq4o7vgutK5ClbcV4z64ys6E/asamZFN7MpwZVMGwu3ZnRkP6UCDOJ0U2j42v1pLGM1kqOUksshbX1r16KrW+ZiQeJttcRTqHJ2axP6GpTTCaiJyWyinoGwEq4C1anaLoNeg2R0yNMnvQUYIq2CyWMqh9DPgP5pRa3MEKghq1emQpoypM9toSp8bVtvC7gMeGlUr5WuR5PiwEE6AVkGVJg/tQRuZYYnwXv0H21I63kE5BRuM665czKlKhlHNdPYQeh84F52tbRhudw68bbT41sbG5o3K8KAho0FPrDevAFs8ReN80WOepBku1LTAeGn2ugNCfoX40PHRMueCYVecP5fbuXW16Ce+qWLtncWVyeUeVoRENfOGYs55KWeA5A17gqGXyRb1OzPAvJLYpvHv7CjRW47oa/yYTnUv7ssT3ajvcxjraMTzj9IJTFjdUFf6w2Um5ahbNjfhbaGFMrKsBfIEY387Oz89i28ISWAQs8CZsVpAZl2GT66vq+asLCfEOZ/20O79bxU1y3YapVuc9z6gNb45e3IDwxHKajqVSqZOYmw6kUtCkzgKPp0zubcofSSxh5XEtswZ8IHXi9xHcaJglc0y5PQFlZt302Yk/IeKOvwXlR5ay+oeYJrc88LuA94RLDWPxEEzcJP67eoLQ3p+lwcYmaBv2WE9V/01rgvgRWxe/DHPj+NL6f/z+yCUeyr5r0YMvoD/BIhPTtVPYnB9wg/wnBYr5+Flvw1Ki9aNDF/CsJLwkHoGGbKjGfJ6HFRRFuApEwZT3mPAS/jMbq73f39/HQ6ImSAT+UwpccoML980ZF/hgM4IiNVQk/qF4DBUzocPX51o3Tu3iYMN4eCFxWjPcPJFk9lTcS7n/AHZe4iSwCoi8udQB+B08g+1uCCWNJ/DI+5j40QF4YqBfPUFFyqDIsKwS3WIrJr4Q5Fsd990caRmPHA4uHfjL2Zwx/pUvV1gR+k4isBrQohC9+sQK3/Q981ktbMBDo3xjqMwZFwQOLEU2LPJ6m+8mWlZ5XZ3nQx6wuAejgP/UeDnA+4+4TspJRD/BgMqi7a0FvtmZyO2zwAehV50FnnhdLhGvZOhvuNqsFOVt65O+C3iqkDn0qMgyFkzrAP7x2scN/NeCXosxfHfLAi/jP+ufYha5TYgmPG+DJxfEp1GRlJey7CMVWymf5hMtG2V3caSlVuBr8D1wDPLiR+wgBpYarIRBN8GtnHd0LYROWxZ4CbsQ+OCZCIof/gPmOYE/dcBLd6RpYZHgXJBnKLCZwonzejEHEViMP4LPVg8JTxV0rTWVH08mK8kLvLgrZW2PzOjs8XmleAhbSLXPeWmT+A5Oa0FNCx4ppagWDIqyBZ61wZML6InzSqW4AmNOgKMS/iHfLjhUaNpxPH4MSgHFPLzCM9XNVGbFeINufCHKk3HvVjIZXRXsg2OLFV7Cc5jOfvl6cbcFLp75EOYlz73w3JbxYyt6M09teBii7n2xb1/vJkD7LH67QpbrDwxvrOYOgW/3aaLMt3Y8Vnh1nDSWQl6WMLUJ6kx2eA3wnvPGe2Z5j3mIWFzUv0evXWl5c98xfKXpcpXzw3dCnC86kun3onWHJ3nU/YZLJ6RfwXMXzvzKZ5kbbwhOEt9r48Z45wcbbj/n/GWwyPqhZ6C6af+KDhwkRMveHqzhV1uO2AqlqOVg0wDPeqQje9/n4O1C9cJB75sLiq0f6zqGZ7mr45KdUam+TUUEUG1W/Zq11jPmONhILMuJR9aLldg8yMHeD89y4ZfWFy35ZtMJOII++yyJdHU3BYt5+J6Hnj+XZ+abj5TAW3AaCaLXnrHS3kufmZ7y+6OihPe0cQAPcsjiXE0xssymkKdlU/g4utfNSuJ1jLRXPLabQksaK90YP+Su3qLjRB96HtBLYSERSS+clsEGZdcPz5XIHwr5xUhhbfr0rHZ2uwvSQZuIYNryWjqFTrdgHYZtp0UWyrFaDGUBBxWZBSxCBGRJoZfJgLMDuUBi9CKX4AVns/CCBBrf5IcCsfIsrkHLO/vuDjYsJ6M7dxG4Q4mAo6YgkmUGfMcLYN8PNh1+6D0WljzoHASzoVvXDGq7aCKNrxXg6gy7N4ryiAhexhdAZ0NID4pMoH2MXiRuZvOHYFa5jWdhXR1sID68cxeEIghwpSaaFhw+ZLjv1b+Q9bkA80IfMdRAqO1AjmgU5AB1hh0Gx4MA8/Bwk8ZyTS+ARepuZ66GYvh2buF1+YhaP2DJPBBZRl5itq/wN9ihDiZA4TzEg4zF1/IwTeeDXmQSLslyAdO8SHsqb6/BQ8Prv82wSBo8vnC68eC20THMzMG47HmafWpS5H3FtFj3Htjk3O/r9lsnuPbd5JpfoLvmtVGMftXQIGkIP4T/q2QIP4Qfwj92dforQ/gh/BD+savTXxnCD+GH8I9dnf7KEH4IP4R/7OoYQnmbe6yR9J54K3QF3xiUrCHsVodxywrro0iWHfdpC8t6erH159C/kK7gK+tIloukJlRlFYkRd4tK4iyV9u6tEoecjGQzMTHDO7X3dOIe6QbetBDfwY8ViM/RjIqfHhAjW/qivRf4EVcsn2gPZBTCFqfbQrgHr4LsCp5YRix+xlX0ksflV7pdjOGLkrnhuXb63oDXwk3ha209kLxPuhr2xhtca/pzYcPs1v1NNwczjFdKmthW1xvwSzajOgMexoB4ZHhqnMTfuEZVMaPwZDU93gp53fcpNNhoo+QW4HsQ2KK7nid1URaQyWfBMBDK6WbvBTwNchEchAOvU40zwJF+D7zrT4JXiYVYGQ5zqmhYyCgvofmFMQ2gzSzLUF5vBSxh60lVteNTXmZ9dXS16DHCktngwXW6MIx12KsozYx45rV9fnB4F0PiC4Fh7mGsPkBZSGs4nC4kYAyOSp64lEyeW8KPUYV1YtmVIYuCDZ4QuydVCzxud+VCf4JNRt3KTuvKpUt4lVhGamCYhyzWafFjMCy9uKq5ExihYtViOaXsG6aClC24x8wmWiSt8EbgspmwvGPAa9+MiI7w7dnGqINGbw9thIjFcLQ5Bl1rdQKiy2ABIMaxgbTAq3m7KRkMU4a0QNHujoSMEW3wZPj45oSwCR8k4RF819CGhdif5y6DrZtjdWuZQcZ9TZNZm/dXKWh6A8wmBNVpsUkfici5MOQ0ZsxdAXoLPDFJzS1EgrwF/orYMSL/VGwJ7PYhK8j+9DxD4snlQA1soeVyc2EP6Zv5IF/ENoe0QkLe5y6RuSBxsFMUMjK2Ab0J/458v5CGQX4s2p7DF9SWgHJN6qOe/pRq3eK861Mdi8e9cime478w3Kn4gdQUdAbumFxg1nBMqcEeI+tB9vayXMcFgE2DAT+HLQ8VFP+CNeGjYZWUCZYSFTseKH5/tPXdT7fwDDEhrn3Ew7CKGUofidvbbUTc1CutQO8o7Ruu9LRgxOfz+dMJ7RqbbW6LkvGaBZxEx1BIBMa2zpMptxA0GqJ6Egn2wwgRX5/EYzur4T66LeP6asTedCFBrNDrJzAg1xUmuwSVxkN3Nh0E6d/0T3Uwtx2OKAGd3b7JYfFPw3GP871Ftr99gydxFHPYMDi+u4CBjnBrVA+CxE0k8Pby+MPmOR4jsSUBT5X47OW7D5ub2Ig6Phd2wPtOIuggQ1mHvYc42eQiGrbJjqfa2vN3fSeH+NHTcX38V/1REqdBT6BPI4YvmZLLLULRP5UOhK94lOjJZDN86XRBImcDx94enxmVUw1nj6Xa0PW9sMPjbFbfyqeIcGereC6dCN/jS3agNVjR6zAN/ldZPY6XA568jyCwpOtQZVYPDdE/eGN/jyS+kAh/sK15cF8faupLVj8Ifm4KX9b2HBfAINaN8GTU1cv6/9l5sBVo4/DY/Q1MY5+DBExwnrM1xy40AieKwWeVQDpIhr0tPXspOOHd8Ru5Yc4zLkZXeYquXehYO4t8b+BV6672FKCqG5aUHBqJePOZPUmll8woH0Dz48vepiOW9KDY6HYItz4N53mvbVOZ2/VH2zro2uAvOrp1bQ1Zo4AJ7mGtaiAA9TQZn3CrAo2jb0rfPiLjahWvVvV/YRBO4eNdCRldywY8HSN/3PDOpQ6e5azw2fbUHbh624R/tjHWATvDbJq/X0MHd2sY4Vs4EiniU6+cXe/t7EDPmDtoNc6pxA2t9G6P3zkHCizzgZc5Y4dHx05w6B53Hb6ExglvixU0m2rDfRxKqGSB3+8E3uWqmApuF0065txI8C2gkWi+TSG+gmN1bd3AU61xIKAzM/qfi58lM1C3LxW5JhtcqPOc8JRF4eRO2ryxR1VM+JF/JjpiN/b3gIxEITImfRWPRLZR37+HB3H2osFhbvFcMg826ah4hzfQ8WuZdSg88zUNbn2Rb+t+NpWsW+C3Qx3BUxfETSwQCepvACLNoZQjuv5ldpwucHGwu4VeUw0edrGg5WBzIMjGJuHIfqrTbweYKg+5tbUz6qnii2ew5gj+WT3ZkcZjyP17ZTahh4w1lr/4NO4hhpFmbJ509Xl0TGNY1d73ytkBOO2ZR1pB8mySIF/XlvM8CuxLmU8G3NV55NbWDvzFP69N+BedPak09jn1eTzpjOaA+3riCyPd+Qg+7UPxlmFmxiO9M/3zctnTNDzCkFsc8E4Oq5KQfVnRiFiMpnclRFEUucnV1kler/f2MwLvBvB3nbCDUjYntmuBQKBsLDXsOIxZFghcmm5uoI/3vtRKWV+9Gijvpk03NI8sHp+BdF+2GjuFPmY8UAXjd0fQG74MPavYnSP0IXYm8Bd3R2fgr1N00JnJ5MdXcc/H213kwS//14QH6r7a2a1whpWFYAKIEU8MBiWDCVHLUyXobibeXE9PTyPXMNMNDYbwCkan56YjoAQB3ttDTocakKCAXhOhf9A0UVa5KwGmQ686OLcUQ92gRb6taidfPBuxwL/obNKjt2fY44mxeswyEomMZONkPiwCCfOSeexmUJwyPVAZieoGW0R3mGLRB+I+xUJvNvSnx2s9LUEv3pZfGqCLt2aFB+P+yNsZPOPxcBxnBh6DQCTFlk/3HYMRyaxf4MhkklmCEckMu9QZn/CfLGs+GITS5mEWSgGM+hE3hoeT/r+dvpK3MZxY8wBjllBljem2C6zXW5zSjD9tm7tqyt/6S1J0Gbt4o095HR6M+zdfOtvkPYJQxgMgtwIjvrbb8aEqHvUYHnT9/yp/jl3O70SVvr2Hi0QNrB1Is7a3yIOOt8GDru9wtXsMAbpPj1qZSLTxlhAslf+RjtfhUdf/t0OF/wiirxJ48WiTfezLmwb41y/eBB6oqg8hSPVzzrWjBaGSr15gdUfgUde/+jIwXe+6b035rYT+Z3a8CQ9m/avNgdH4nUrhyNLxBF7v+h+DM+07E+rbK0PVW+DRHhfQP+m+p759twx6K/wIoq88ZfqvdnYTHg38N69+nj9ZeubLKzThm8Fj+h9PVusdPYfslo63wsNpD1T+96M/KOxI76Ty47uT3Q4PlR6gj20+dk17LVToy49Xb5zsNnhAP/IMaL3vP86elN6jxr7+fN6E3Q6P5j3s/OfPy+CQ9yQWfQCxCdCbsTvh9RXvxatX35/HNiuFscHmp8ZcoeSXH9+/Q3Sbnm8Oj+hh54Pe/3H2ZXOnMDao4gqdf30Ze/4djnjU7U72Rng48Qn+9+c/fvz8eXb0cgClHPv58wckR+iNQ745vIEP+eEAGFwB1X9D0JuwN4UH9Agf8r9BLTCYAsEh+T3o98BjfMgPGmBwBdb/9ch96PfCY37cAAMqr18j8vvQfwWv88MWGFShfwX+W3izBQZRfkvWAvwTlr8a/v8BTv5WA11rqxEAAAAASUVORK5CYII=" alt="DSW LOGO" />
# + [markdown] colab_type="text" id="0eiKSLYG8XvO"
# # Challenge : predict conversions 🏆🏆
#
# This is the template that shows the different steps of the challenge. In this notebook, all the training/predictions steps are implemented for a very basic model (logistic regression with only one variable). Please use this template and feel free to change the preprocessing/training steps to get the model with the best f1-score ! May the force be with you 🧨🧨
#
# **For a detailed description of this project, please refer to *02-Conversion_rate_challenge.ipynb*.**
# -
# # Import libraries
# + colab={} colab_type="code" id="AGhdl7Bt2xZd"
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import StandardScaler
from sklearn.compose import ColumnTransformer
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import f1_score, confusion_matrix
import matplotlib.pyplot as plt
import plotly.express as px
import plotly.graph_objects as go
import plotly.io as pio
# setting Jedha color palette as default
pio.templates["jedha"] = go.layout.Template(
layout_colorway=["#4B9AC7", "#4BE8E0", "#9DD4F3", "#97FBF6", "#2A7FAF", "#23B1AB", "#0E3449", "#015955"]
)
pio.templates.default = "jedha"
from IPython.display import display
# + [markdown] colab_type="text" id="LHgro65rxKF7"
# # Read file with labels
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="W1AU8AH8u0qd" outputId="00698a97-027b-493b-a2e4-33fdcc295abb"
data = pd.read_csv('conversion_data_train.csv')
print('Set with labels (our train+test) :', data.shape)
# -
data.head()
# + [markdown] colab_type="text" id="0XwjKBc63B1n"
# # Explore dataset
# + colab={} colab_type="code" id="NM0feCss5sLZ"
# The dataset is quite big : you must create a sample of the dataset before making any visualizations !
data_sample = data.sample(10000)
# -
# + [markdown] colab_type="text" id="70MwsoCS3QD5"
# # Make your model
# + [markdown] colab_type="text" id="dPh1qPTf3wZU"
# ## Choose variables to use in the model, and create train and test sets
# **From the EDA, we know that the most useful feature is total_pages_visited. Let's create a baseline model by using at first only this feature : in the next cells, we'll make preprocessings and train a simple (univariate) logistic regression.**
# + colab={} colab_type="code" id="sjEHMGoY3kMB"
features_list = ['total_pages_visited']
numeric_indices = [0]
categorical_indices = []
target_variable = 'converted'
# + colab={"base_uri": "https://localhost:8080/", "height": 50} colab_type="code" id="SV5E9KMs4xcq" outputId="9d1ed76e-e82e-45e7-f3e5-6d47962caa5a"
X = data.loc[:, features_list]
Y = data.loc[:, target_variable]
print('Explanatory variables : ', X.columns)
print()
# + colab={"base_uri": "https://localhost:8080/", "height": 67} colab_type="code" id="W8K5DQEvvQgl" outputId="d280ebc9-4d4b-4723-b9fe-32513f898abc"
# Divide dataset Train set & Test set
print("Dividing into train and test sets...")
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.1, random_state=0)
print("...Done.")
print()
# + colab={"base_uri": "https://localhost:8080/", "height": 218} colab_type="code" id="vVu0eXQD4xVc" outputId="83a5f553-f50d-44dc-d12a-6cb21e74e4d7"
# Convert pandas DataFrames to numpy arrays before using scikit-learn
print("Convert pandas DataFrames to numpy arrays...")
X_train = X_train.values
X_test = X_test.values
Y_train = Y_train.values
Y_test = Y_test.values
print("...Done")
print(X_train[0:5,:])
print(X_test[0:2,:])
print()
print(Y_train[0:5])
print(Y_test[0:2])
# + [markdown] colab_type="text" id="7b_aU7ij7K3Q"
# ## Training pipeline
# + colab={"base_uri": "https://localhost:8080/", "height": 235} colab_type="code" id="_9bEZ5bn7I5Z" outputId="ad5c8f97-2d25-4827-f1ee-43c665a97fa0"
# Put here all the preprocessings
print("Encoding categorical features and standardizing numerical features...")
featureencoder = StandardScaler()
X_train = featureencoder.fit_transform(X_train)
print("...Done")
print(X_train[0:5,:])
# + colab={"base_uri": "https://localhost:8080/", "height": 104} colab_type="code" id="1qhidLbq7o-5" outputId="6bfb746c-1ff4-41c9-b0d6-a98fd09a444d"
# Train model
print("Train model...")
classifier = LogisticRegression() #
classifier.fit(X_train, Y_train)
print("...Done.")
# + colab={"base_uri": "https://localhost:8080/", "height": 84} colab_type="code" id="Au2TK_vw7rD-" outputId="702789a8-4631-4c29-f297-e4b2901f3195"
# Predictions on training set
print("Predictions on training set...")
Y_train_pred = classifier.predict(X_train)
print("...Done.")
print(Y_train_pred)
print()
# + [markdown] colab_type="text" id="7TY_v9uH_CE7"
# ## Test pipeline
# + colab={"base_uri": "https://localhost:8080/", "height": 134} colab_type="code" id="ngOSdG6-_Cvb" outputId="1e19e8ee-222f-413b-9bc0-e9f41dcca1c0"
# Use X_test, and the same preprocessings as in training pipeline,
# but call "transform()" instead of "fit_transform" methods (see example below)
print("Encoding categorical features and standardizing numerical features...")
X_test = featureencoder.transform(X_test)
print("...Done")
print(X_test[0:5,:])
# + colab={"base_uri": "https://localhost:8080/", "height": 84} colab_type="code" id="QS1XrzzE_jQI" outputId="866a96d2-4180-4bd1-ce54-ba052e75d485"
# Predictions on test set
print("Predictions on test set...")
Y_test_pred = classifier.predict(X_test)
print("...Done.")
print(Y_test_pred)
print()
# + [markdown] colab_type="text" id="zxJCTlz0_2it"
# ## Performance assessment
# + colab={"base_uri": "https://localhost:8080/", "height": 50} colab_type="code" id="6x7p1nyr_3UV" outputId="8e5b91ba-ca06-4486-d808-37a6aaaa8cf7"
# WARNING : Use the same score as the one that will be used by Kaggle !
# Here, the f1-score will be used to assess the performances on the leaderboard
print("f1-score on train set : ", f1_score(Y_train, Y_train_pred))
print("f1-score on test set : ", f1_score(Y_test, Y_test_pred))
# + colab={"base_uri": "https://localhost:8080/", "height": 151} colab_type="code" id="KhDTCeBy__JK" outputId="72c82d66-d765-437e-e9ef-4ccc80e7183f"
# You can also check more performance metrics to better understand what your model is doing
print("Confusion matrix on train set : ")
print(confusion_matrix(Y_train, Y_train_pred))
print()
print("Confusion matrix on test set : ")
print(confusion_matrix(Y_test, Y_test_pred))
print()
# -
# **Our baseline model reaches a f1-score of almost 70%. Now, feel free to refine your model and try to beat this score ! 🚀🚀**
# + [markdown] colab_type="text" id="6tVVDRABv91O"
# # Train best classifier on all data and use it to make predictions on X_without_labels
# **Before making predictions on the file conversion_data_test.csv, let's train our model on ALL the data that was in conversion_data_train.csv. Sometimes, this allows to make tiny improvements in the score because we're using more examples to train the model.**
# + colab={"base_uri": "https://localhost:8080/", "height": 154} colab_type="code" id="M14RHUadzE2p" outputId="abcfcfec-9461-4579-adbd-f23270f984eb"
# Concatenate our train and test set to train your best classifier on all data with labels
X = np.append(X_train,X_test,axis=0)
Y = np.append(Y_train,Y_test)
classifier.fit(X,Y)
# + colab={"base_uri": "https://localhost:8080/", "height": 151} colab_type="code" id="Tr4CEaPzzbP-" outputId="f0d1c8ed-be4b-4974-d7b9-f23a49344d9d"
# Read data without labels
data_without_labels = pd.read_csv('conversion_data_test.csv')
print('Prediction set (without labels) :', data_without_labels.shape)
# Warning : check consistency of features_list (must be the same than the features
# used by your best classifier)
features_list = ['total_pages_visited']
X_without_labels = data_without_labels.loc[:, features_list]
# Convert pandas DataFrames to numpy arrays before using scikit-learn
print("Convert pandas DataFrames to numpy arrays...")
X_without_labels = X_without_labels.values
print("...Done")
print(X_without_labels[0:5,:])
# + colab={"base_uri": "https://localhost:8080/", "height": 134} colab_type="code" id="LoUISfsT0HMR" outputId="e42dc389-5e77-4e13-ccbc-1fef4aa2c0ca"
# WARNING : PUT HERE THE SAME PREPROCESSING AS FOR YOUR TEST SET
# CHECK YOU ARE USING X_without_labels
print("Encoding categorical features and standardizing numerical features...")
X_without_labels = featureencoder.transform(X_without_labels)
print("...Done")
print(X_without_labels[0:5,:])
# + colab={} colab_type="code" id="7DuWSEHuwEQJ"
# Make predictions and dump to file
# WARNING : MAKE SURE THE FILE IS A CSV WITH ONE COLUMN NAMED 'converted' AND NO INDEX !
# WARNING : FILE NAME MUST HAVE FORMAT 'conversion_data_test_predictions_[name].csv'
# where [name] is the name of your team/model separated by a '-'
# For example : [name] = AURELIE-model1
data = {
'converted': classifier.predict(X_without_labels)
}
Y_predictions = pd.DataFrame(columns=['converted'],data=data)
Y_predictions.to_csv('conversion_data_test_predictions_EXAMPLE.csv', index=False)
# -
# ## Analyzing the coefficients and interpreting the result
# **In this template, we just trained a model with only one feature (total_pages_visited), so there's no analysis to be done about the feature importance 🤔**
#
# **Once you've included more features in your model, please take some time to analyze the model's parameters and try to find some lever for action to improve the newsletter's conversion rate 😎😎**
| projects/05-conversion-rate/notebooks/99-eda.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# # Visualizing the gender gap in college degrees
# ## Data description
#
# The data set contains the percentage of bachelor's degrees granted to women from 1970 to 2012. The data set is broken up into 17 categories of degrees, with each column as a separate category.
# +
# %matplotlib inline
import pandas as pd
import matplotlib.pyplot as plt
women_degrees = pd.read_csv('percent-bachelors-degrees-women-usa.csv')
cb_dark_blue = (0/255,107/255,164/255)
cb_orange = (255/255, 128/255, 14/255)
stem_cats = ['Engineering', 'Computer Science', 'Psychology', 'Biology', 'Physical Sciences', 'Math and Statistics']
fig = plt.figure(figsize=(18, 3))
for sp in range(0,6):
ax = fig.add_subplot(1,6,sp+1)
ax.plot(women_degrees['Year'], women_degrees[stem_cats[sp]], c=cb_dark_blue, label='Women', linewidth=3)
ax.plot(women_degrees['Year'], 100-women_degrees[stem_cats[sp]], c=cb_orange, label='Men', linewidth=3)
ax.spines["right"].set_visible(False)
ax.spines["left"].set_visible(False)
ax.spines["top"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.set_xlim(1968, 2011)
ax.set_ylim(0,100)
ax.set_title(stem_cats[sp])
ax.tick_params(bottom="off", top="off", left="off", right="off")
if sp == 0:
ax.text(2005, 87, 'Men')
ax.text(2002, 8, 'Women')
elif sp == 5:
ax.text(2005, 62, 'Men')
ax.text(2001, 35, 'Women')
plt.show()
# +
# %matplotlib inline
import pandas as pd
import matplotlib.pyplot as plt
women_degrees = pd.read_csv('percent-bachelors-degrees-women-usa.csv')
cb_dark_blue = (0/255,107/255,164/255)
cb_orange = (255/255, 128/255, 14/255)
stem_cats = ['Engineering', 'Computer Science', 'Psychology', 'Biology', 'Physical Sciences', 'Math and Statistics']
lib_arts_cats = ['Foreign Languages', 'English', 'Communications and Journalism', 'Art and Performance', \
'Social Sciences and History']
other_cats = ['Health Professions', 'Public Administration', 'Education', 'Agriculture','Business', 'Architecture']
fig = plt.figure(figsize=(12, 18))
sp=1
for i in range(0,6):
ax1 = fig.add_subplot(6,3,sp)
ax1.plot(women_degrees['Year'], women_degrees[stem_cats[i]], c=cb_dark_blue, label='Women', linewidth=3)
ax1.plot(women_degrees['Year'], 100-women_degrees[stem_cats[i]], c=cb_orange, label='Men', linewidth=3)
ax1.spines["right"].set_visible(False)
ax1.spines["left"].set_visible(False)
ax1.spines["top"].set_visible(False)
ax1.spines["bottom"].set_visible(False)
ax1.set_xlim(1968, 2011)
ax1.set_ylim(0,100)
ax1.set_title(stem_cats[i])
ax1.tick_params(bottom="off", top="off", left="off", right="off")
if i < 5:
ax2 = fig.add_subplot(6,3,sp+1)
ax2.plot(women_degrees['Year'], women_degrees[lib_arts_cats[i]], c=cb_dark_blue, label='Women', linewidth=3)
ax2.plot(women_degrees['Year'], 100-women_degrees[lib_arts_cats[i]], c=cb_orange, label='Men', linewidth=3)
ax2.spines["right"].set_visible(False)
ax2.spines["left"].set_visible(False)
ax2.spines["top"].set_visible(False)
ax2.spines["bottom"].set_visible(False)
ax2.set_xlim(1968, 2011)
ax2.set_ylim(0,100)
ax2.set_title(lib_arts_cats[i])
ax2.tick_params(bottom="off", top="off", left="off", right="off")
ax3 = fig.add_subplot(6,3,sp+2)
ax3.plot(women_degrees['Year'], women_degrees[other_cats[i]], c=cb_dark_blue, label='Women', linewidth=3)
ax3.plot(women_degrees['Year'], 100-women_degrees[other_cats[i]], c=cb_orange, label='Men', linewidth=3)
ax3.spines["right"].set_visible(False)
ax3.spines["left"].set_visible(False)
ax3.spines["top"].set_visible(False)
ax3.spines["bottom"].set_visible(False)
ax3.set_xlim(1968, 2011)
ax3.set_ylim(0,100)
ax3.set_title(other_cats[i])
ax3.tick_params(bottom="off", top="off", left="off", right="off")
if i == 0:
ax1.text(2005, 87, 'Men')
ax1.text(2002, 8, 'Women')
ax2.text(2005, 20, 'Men')
ax2.text(2002, 75, 'Women')
ax3.text(2005, 90, 'Men')
ax3.text(2002, 5, 'Women')
elif i == 5:
ax1.text(2005, 62, 'Men')
ax1.text(2001, 35, 'Women')
ax3.text(2005, 35, 'Men')
ax3.text(2001, 65, 'Women')
sp+= 3
plt.show()
# +
# %matplotlib inline
import pandas as pd
import matplotlib.pyplot as plt
women_degrees = pd.read_csv('percent-bachelors-degrees-women-usa.csv')
cb_dark_blue = (0/255,107/255,164/255)
cb_orange = (255/255, 128/255, 14/255)
stem_cats = ['Engineering', 'Computer Science', 'Psychology', 'Biology', 'Physical Sciences', 'Math and Statistics']
lib_arts_cats = ['Foreign Languages', 'English', 'Communications and Journalism', 'Art and Performance', \
'Social Sciences and History']
other_cats = ['Health Professions', 'Public Administration', 'Education', 'Agriculture','Business', 'Architecture']
fig = plt.figure(figsize=(12, 18))
sp=1
for i in range(0,6):
ax1 = fig.add_subplot(6,3,sp)
ax1.plot(women_degrees['Year'], women_degrees[stem_cats[i]], c=cb_dark_blue, label='Women', linewidth=3)
ax1.plot(women_degrees['Year'], 100-women_degrees[stem_cats[i]], c=cb_orange, label='Men', linewidth=3)
ax1.spines["right"].set_visible(False)
ax1.spines["left"].set_visible(False)
ax1.spines["top"].set_visible(False)
ax1.spines["bottom"].set_visible(False)
ax1.set_xlim(1968, 2011)
ax1.set_ylim(0,100)
ax1.set_title(stem_cats[i])
ax1.tick_params(bottom="off", top="off", left="off", right="off", labelbottom="off")
if i < 5:
ax2 = fig.add_subplot(6,3,sp+1)
ax2.plot(women_degrees['Year'], women_degrees[lib_arts_cats[i]], c=cb_dark_blue, label='Women', linewidth=3)
ax2.plot(women_degrees['Year'], 100-women_degrees[lib_arts_cats[i]], c=cb_orange, label='Men', linewidth=3)
ax2.spines["right"].set_visible(False)
ax2.spines["left"].set_visible(False)
ax2.spines["top"].set_visible(False)
ax2.spines["bottom"].set_visible(False)
ax2.set_xlim(1968, 2011)
ax2.set_ylim(0,100)
ax2.set_title(lib_arts_cats[i])
ax2.tick_params(bottom="off", top="off", left="off", right="off",labelbottom="off")
ax3 = fig.add_subplot(6,3,sp+2)
ax3.plot(women_degrees['Year'], women_degrees[other_cats[i]], c=cb_dark_blue, label='Women', linewidth=3)
ax3.plot(women_degrees['Year'], 100-women_degrees[other_cats[i]], c=cb_orange, label='Men', linewidth=3)
ax3.spines["right"].set_visible(False)
ax3.spines["left"].set_visible(False)
ax3.spines["top"].set_visible(False)
ax3.spines["bottom"].set_visible(False)
ax3.set_xlim(1968, 2011)
ax3.set_ylim(0,100)
ax3.set_title(other_cats[i])
ax3.tick_params(bottom="off", top="off", left="off", right="off",labelbottom="off")
if i == 0:
ax1.text(2005, 87, 'Men')
ax1.text(2002, 8, 'Women')
ax2.text(2005, 20, 'Men')
ax2.text(2002, 75, 'Women')
ax3.text(2005, 90, 'Men')
ax3.text(2002, 5, 'Women')
elif i == 5:
ax1.text(2005, 62, 'Men')
ax1.text(2001, 35, 'Women')
ax3.text(2005, 35, 'Men')
ax3.text(2001, 65, 'Women')
sp+= 3
ax1.tick_params(labelbottom="on")
ax2.tick_params(labelbottom="on")
ax3.tick_params(labelbottom="on")
plt.show()
# +
# %matplotlib inline
import pandas as pd
import matplotlib.pyplot as plt
women_degrees = pd.read_csv('percent-bachelors-degrees-women-usa.csv')
cb_dark_blue = (0/255,107/255,164/255)
cb_orange = (255/255, 128/255, 14/255)
stem_cats = ['Engineering', 'Computer Science', 'Psychology', 'Biology', 'Physical Sciences', 'Math and Statistics']
lib_arts_cats = ['Foreign Languages', 'English', 'Communications and Journalism', 'Art and Performance', \
'Social Sciences and History']
other_cats = ['Health Professions', 'Public Administration', 'Education', 'Agriculture','Business', 'Architecture']
fig = plt.figure(figsize=(12, 18))
sp=1
for i in range(0,6):
ax1 = fig.add_subplot(6,3,sp)
ax1.plot(women_degrees['Year'], women_degrees[stem_cats[i]], c=cb_dark_blue, label='Women', linewidth=3)
ax1.plot(women_degrees['Year'], 100-women_degrees[stem_cats[i]], c=cb_orange, label='Men', linewidth=3)
ax1.spines["right"].set_visible(False)
ax1.spines["left"].set_visible(False)
ax1.spines["top"].set_visible(False)
ax1.spines["bottom"].set_visible(False)
ax1.set_xlim(1968, 2011)
ax1.set_ylim(0,100)
ax1.set_title(stem_cats[i])
ax1.tick_params(bottom="off", top="off", left="off", right="off", labelbottom="off")
ax1.set_yticks([0,100])
if i < 5:
ax2 = fig.add_subplot(6,3,sp+1)
ax2.plot(women_degrees['Year'], women_degrees[lib_arts_cats[i]], c=cb_dark_blue, label='Women', linewidth=3)
ax2.plot(women_degrees['Year'], 100-women_degrees[lib_arts_cats[i]], c=cb_orange, label='Men', linewidth=3)
ax2.spines["right"].set_visible(False)
ax2.spines["left"].set_visible(False)
ax2.spines["top"].set_visible(False)
ax2.spines["bottom"].set_visible(False)
ax2.set_xlim(1968, 2011)
ax2.set_ylim(0,100)
ax2.set_title(lib_arts_cats[i])
ax2.tick_params(bottom="off", top="off", left="off", right="off",labelbottom="off")
ax2.set_yticks([0,100])
ax3 = fig.add_subplot(6,3,sp+2)
ax3.plot(women_degrees['Year'], women_degrees[other_cats[i]], c=cb_dark_blue, label='Women', linewidth=3)
ax3.plot(women_degrees['Year'], 100-women_degrees[other_cats[i]], c=cb_orange, label='Men', linewidth=3)
ax3.spines["right"].set_visible(False)
ax3.spines["left"].set_visible(False)
ax3.spines["top"].set_visible(False)
ax3.spines["bottom"].set_visible(False)
ax3.set_xlim(1968, 2011)
ax3.set_ylim(0,100)
ax3.set_title(other_cats[i])
ax3.tick_params(bottom="off", top="off", left="off", right="off",labelbottom="off")
ax3.set_yticks([0,100])
if i == 0:
ax1.text(2005, 87, 'Men')
ax1.text(2002, 8, 'Women')
ax2.text(2005, 20, 'Men')
ax2.text(2002, 75, 'Women')
ax3.text(2005, 90, 'Men')
ax3.text(2002, 5, 'Women')
elif i == 5:
ax1.text(2005, 62, 'Men')
ax1.text(2001, 35, 'Women')
ax3.text(2005, 35, 'Men')
ax3.text(2001, 65, 'Women')
sp+= 3
ax1.tick_params(labelbottom="on")
ax2.tick_params(labelbottom="on")
ax3.tick_params(labelbottom="on")
plt.show()
# +
# %matplotlib inline
import pandas as pd
import matplotlib.pyplot as plt
women_degrees = pd.read_csv('percent-bachelors-degrees-women-usa.csv')
cb_dark_blue = (0/255,107/255,164/255)
cb_orange = (255/255, 128/255, 14/255)
stem_cats = ['Engineering', 'Computer Science', 'Psychology', 'Biology', 'Physical Sciences', 'Math and Statistics']
lib_arts_cats = ['Foreign Languages', 'English', 'Communications and Journalism', 'Art and Performance', \
'Social Sciences and History']
other_cats = ['Health Professions', 'Public Administration', 'Education', 'Agriculture','Business', 'Architecture']
fig = plt.figure(figsize=(12, 18))
sp=1
for i in range(0,6):
ax1 = fig.add_subplot(6,3,sp)
ax1.plot(women_degrees['Year'], women_degrees[stem_cats[i]], c=cb_dark_blue, label='Women', linewidth=3)
ax1.plot(women_degrees['Year'], 100-women_degrees[stem_cats[i]], c=cb_orange, label='Men', linewidth=3)
ax1.spines["right"].set_visible(False)
ax1.spines["left"].set_visible(False)
ax1.spines["top"].set_visible(False)
ax1.spines["bottom"].set_visible(False)
ax1.set_xlim(1968, 2011)
ax1.set_ylim(0,100)
ax1.set_title(stem_cats[i])
ax1.tick_params(bottom="off", top="off", left="off", right="off", labelbottom="off")
ax1.set_yticks([0,100])
ax1.axhline(50, c=(171/255, 171/255, 171/255), alpha=0.3)
if i < 5:
ax2 = fig.add_subplot(6,3,sp+1)
ax2.plot(women_degrees['Year'], women_degrees[lib_arts_cats[i]], c=cb_dark_blue, label='Women', linewidth=3)
ax2.plot(women_degrees['Year'], 100-women_degrees[lib_arts_cats[i]], c=cb_orange, label='Men', linewidth=3)
ax2.spines["right"].set_visible(False)
ax2.spines["left"].set_visible(False)
ax2.spines["top"].set_visible(False)
ax2.spines["bottom"].set_visible(False)
ax2.set_xlim(1968, 2011)
ax2.set_ylim(0,100)
ax2.set_title(lib_arts_cats[i])
ax2.tick_params(bottom="off", top="off", left="off", right="off",labelbottom="off")
ax2.set_yticks([0,100])
ax2.axhline(50, c=(171/255, 171/255, 171/255), alpha=0.3)
ax3 = fig.add_subplot(6,3,sp+2)
ax3.plot(women_degrees['Year'], women_degrees[other_cats[i]], c=cb_dark_blue, label='Women', linewidth=3)
ax3.plot(women_degrees['Year'], 100-women_degrees[other_cats[i]], c=cb_orange, label='Men', linewidth=3)
ax3.spines["right"].set_visible(False)
ax3.spines["left"].set_visible(False)
ax3.spines["top"].set_visible(False)
ax3.spines["bottom"].set_visible(False)
ax3.set_xlim(1968, 2011)
ax3.set_ylim(0,100)
ax3.set_title(other_cats[i])
ax3.tick_params(bottom="off", top="off", left="off", right="off",labelbottom="off")
ax3.set_yticks([0,100])
ax3.axhline(50, c=(171/255, 171/255, 171/255), alpha=0.3)
if i == 0:
ax1.text(2005, 87, 'Men')
ax1.text(2002, 8, 'Women')
ax2.text(2005, 20, 'Men')
ax2.text(2002, 75, 'Women')
ax3.text(2005, 90, 'Men')
ax3.text(2002, 5, 'Women')
elif i == 5:
ax1.text(2005, 62, 'Men')
ax1.text(2001, 35, 'Women')
ax3.text(2005, 35, 'Men')
ax3.text(2001, 65, 'Women')
sp+= 3
ax1.tick_params(labelbottom="on")
ax2.tick_params(labelbottom="on")
ax3.tick_params(labelbottom="on")
plt.savefig("percent-bachelors-degrees-women-usa.png")
plt.show()
| percent-bachelors-degrees-women-usa.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
# For technical reasons, this file is *NOT* reloaded automatically when you use
# 'bundle exec jekyll serve'. If you change this file, please restart the server process.
title: Bay Jekyll Theme
description: >- # this means to ignore newlines until "baseurl:"
Bay is a minimal Jekyll Theme.
baseurl: "/bay" # the subpath of the site
url: "https://eliottvincent.github.io" # the base hostname & protocol for the site
og_image: /assets/img/profile-pic.jpg
google_analytics: "UA-121636368-3"
header:
pages:
- name: Home
slug: /
- name: Work
- name: Blog
footer:
show_powered_by: true
contact:
- name: Email
value: <EMAIL>
link: mailto:<EMAIL>
- name: WeChat
value: YourWeChatUsername
link: "#"
follow:
- name: Twitter
link: http://twitter.com/YourTwitterUsername
username: "@YourTwitterUsername"
- name: Facebook
link: http://facebook.com/YourFacebookUsername
- name: LinkedIn
link: http://linkedin.com/in/YourLinkedInUsername
- name: GitHub
link: http://github.com/YourGitHubUsername
- name: Dribbble
link: https://dribbble.com/YourDribbbleUsername
- name: RSS
link: /feed.xml
# Build settings
markdown: kramdown
sass:
style: :compressed
plugins:
- jekyll-feed
# Excluded items
exclude:
- .sass-cache/
- .jekyll-cache/
- feed.xml
- gemfiles/
- Gemfile
- Gemfile.lock
- vendor/bundle/
- vendor/cache/
- vendor/gems/
- vendor/ruby/
# Used for defeating caching
version: 1
| _config.yml_p.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ci)
# language: python
# name: ci
# ---
# # Neural Networks, Part 2: Convolutional Layers
#
# In the "Neural Networks, Part 1" notebook we introduced the most basic ways to build neural networks using fully-connected layers. This model is extremely general, which means that it will work with most types of data. However, there are some cases where we can take advantage of the structure of the data to improve our networks tremendously.
#
# Right now the field which is booming the most from deep learning is __computer vision__, especially for autonomous driving. Computer vision is about giving machines the ability to see; naturally, the primary type of data that we deal with in computer vision is image data. So far we've treated an image as simply a list of pixels, with no regard to the overall structure of the image. When we do this, we several key aspects of images:
#
# - Pixels tend to be similar (correlated) to their neighboring pixels
# - Objects in an image can vary in location, size, rotation, etc.
#
# What if we could modify our neural network so that it could take advantage of these properties of images? As it turns out, we can accomplish these things with __convolution__. Convolution is actually a very old technique that goes back to computer vision and then even further back to signal processing. In this notebook we're going to comandeer convolution for our own purposes as a __convolutional layer__, which can be plugged into a neural network to create a __convolutional neural network__, and we'll go through some additional details that come into play when using these layers. Finally, we'll take our original MLP image classifier, super-charge it with some convolutional layers, and see the difference in performance and accuracy.
#
# ## Machine Learning to Deep Learning: Where Are We?
#
# There are a lot of buzzwords in our field, and __deep learning__ is by far the worst of them. Deep learning is considered a subset of machine learning, which is a subset of artificial intelligence. So everything we've done so far is definitely machine learning -- but at what point does machine learning also become deep learning? The answer is that this boundary isn't really well-defined. Deep learning generally refers to machine learning with "deep" models -- but by the same token, what makes a model "deep"? For this reason it's better to avoid the term "deep learning", because most people don't really know what it means. But as a guideline, if you're using neural networks with several layers -- and especially if you're using convolutional layers -- then you're probably doing "deep learning".
#
# ## Getting Started
#
# You should have your own Anaconda virtual environment with all of the necessary Python modules installed. Additionally, this notebook is the first one which uses tensorflow, so __you need an NVIDIA GPU to use this notebook__ (unless you also installed the CPU version of tensorflow (but you probably didn't)). You can check by trying to import them:
# +
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import skimage.color
import skimage.data
from tensorflow import keras
import sklearn.metrics
# -
# ## Convolutions Demystified
#
# You may have already learned about convolution in another domain, such as math or electrical engineering. You may associate convolution with integrals, and great pain. Fortunately for you, we will not need integrals to use convolution with images. To do convolution on an image, we take a weight matrix and slide it (or "convolve" it) across the image, left to right, top to bottom, like this:
#
# <img src="https://cufctl.github.io/creative-inquiry/assets/img/convolution.gif" alt="convolution" width=400/>
#
# The weight matrix has many names, but we'll call it a __filter__ or __kernel__. At each location in the image, we compute a __weighted sum__ of the pixels in the given window, using the kernel elements as weights. At the end of it we get an "output image", where each "pixel" is the weighted sum centered on that pixel. So in the figure above, the blue square is the input image, the moving green square is the kernel, and the red square is the output of the convolution. Notice that we didn't slide the kernel over pixels where it would have been out of bounds, which means that the output image is smaller. We also usually use kernels that are square and have odd width and height, so that the kernel is centered on the output pixel.
#
# We said that the output is an "image", but what does it represent, and can we even view it as an image? After all, the output is a weighted sum, and the weights can be anything, so we could get output values that are negative, or very large, or not even whole numbers! How about this: let's write some code that performs a convolution on an image and then tries to visualize the output.
# +
# load a sample image
image = skimage.data.astronaut()
# convert image to grayscale
gray = skimage.color.rgb2gray(image)
# define kernel
"""
W = np.array([
[+1, 0, -1],
[+2, 0, -2],
[+1, 0, -1]
])
"""
"""
W = np.array([
[+1, +2, +1],
[ 0, 0, 0],
[-1, -2, -1]
])
"""
Wx = np.array([
[-1, 0, +1],
[-2, 0, +2],
[-1, 0, +1]
])
Wy = np.array([
[-1, -2, -1],
[ 0, 0, 0],
[+1, +2, +1]
])
# compute convolved image
"""
convolved = np.zeros_like(gray)
for i in range(1, convolved.shape[0] - 1):
for j in range(1, convolved.shape[1] - 1):
convolved[i, j] = np.vdot(gray[i-1:i+2, j-1:j+2], Wx)
"""
# Sobel filter.
convolvedx = np.zeros_like(gray)
convolvedy = np.zeros_like(gray)
convolved = np.zeros_like(gray)
for i in range(1, convolvedx.shape[0] - 1):
for j in range(1, convolvedx.shape[1] - 1):
convolvedx[i, j] = np.vdot(gray[i-1:i+2, j-1:j+2], Wx)
for i in range(1, convolvedy.shape[0] - 1):
for j in range(1, convolvedy.shape[1] - 1):
convolvedy[i, j] = np.vdot(gray[i-1:i+2, j-1:j+2], Wy)
for i in range(1, convolved.shape[0] - 1):
for j in range(1, convolved.shape[1] - 1):
convolved[i, j] = np.power(np.square(convolvedx[i, j])+ np.square(convolvedy[i, j]) , 0.5)
# plot input image and output image
plt.figure(figsize=(12, 6))
plt.subplot(121)
plt.imshow(gray, cmap="gray")
plt.title("Input Image")
plt.gca().get_xaxis().set_visible(False)
plt.gca().get_yaxis().set_visible(False)
plt.subplot(122)
plt.imshow(convolved, cmap="gray")
plt.title("Output Image")
plt.gca().get_xaxis().set_visible(False)
plt.gca().get_yaxis().set_visible(False)
plt.show()
# TODO: experiment with other filters (Sobel, Canny, Prewitt, Laplacian, etc.)
# -
np.power(4, 0.5)
# Thanks to the `imshow` function in matplotlib, we can pass the output image and its values are automatically scaled to form a proper grayscale image. The kernel we used is actually a simple edge detection filter -- it causes the output to be high around edges and low everywhere else. We can also call this output image a __feature map__ or __activation map__, because it is essentially a map of the important information that was seen by the filter.
#
# ## Elements of CNNs
#
# Now that we have a working definition of convolution we can focus on how to incorporate them into our neural network. In particular, there are three new elements that we'll need in order to make convolution work in a neural network: the __convolutional layer__, the __ReLU activation__, and the __pooling layer__.
#
# ### Convolutional Layer
#
# This figure shows what a convolutional layer does:
#
# <img src="http://cs231n.github.io/assets/cnn/depthcol.jpeg" alt="depth-column" width=300/>
#
# The red square is the input image. We say that an image has a __width__, __height__, and __depth__. The height and width are intuitive -- in this figure they are 32x32 -- but you may not be used to thinking of an image as having "depth". So far we've only worked with grayscale images, where each pixel has a single "gray" value. In a color image, however, each pixel has three components: red, green, and blue. This type of color image is called an __RGB image__, and the components are called __channels__. Now we can stack these channels so that the image is actually 32x32x3. This means that our kernel -- the small dark red square inside the image -- will also have a depth of 3, in addition to whatever it's width and height are.
#
# The blue cube, which we call the __output volume__, represents the neurons in the output layer. The kernel will be the weight matrix of the convolutional layer, that is, it will represent the connections from the input layer to each neuron in the output layer. There are two key aspects to understand here. First, each neuron in the ouput volume is connected to only a small region of input pixels, rather than all of the input pixels as in a fully-connected layer. Second, every neuron is sharing the same weight matrix! Can you see how this structure is addressing exactly the problems we mentioned before? Now we are focusing on neighboring regions of pixels instead of whole images, and we're accounting for the fact that certain patterns could occur anywhere in the image.
#
# However there's one more thing that we kind of ignored: why is the output a _cube_?! Shouldn't it just be a square? The answer is yes, it would be a square, _if we were only using one kernel_. When we use convolutional layers, we almost always have several kernels in each layer. Each kernel slides across the input image in exactly the same way, but because the kernels will be initialized differently, the idea is that they will each learn a different pattern in the image. One kernel might learn a vertical edge, another kernel a horizontal edge, a third kernel a diagonal edge, and so on. See the blue circles forming a column in the output volume? We call that a __depth column__ -- it represents a group of neurons that are looking at the same region in an image, but through different kernels.
#
# So how big is the output volume? The depth is equal to the number of filters -- every filter produces an activation map. The weight and height depend on a number of factors, which we can summarize in the following equations:
#
# $$W_o = (W_i - F + 2 P) / S + 1$$
# $$H_o = (H_i - F + 2 P) / S + 1$$
#
# Where $(W_i, H_i)$ is the input size, $F$ is the width/height of the filter (which we assume is square), $P$ is the amount of __zero padding__, and $S$ is the __stride__. Zero padding refers to adding a border of zeros around the input image, and the stride refers to how far the kernel steps when it slides across the input. We almost always use $F = 2 P + 1$ and $S = 1$, which makes these equations simpler (just a little bit):
#
# $$W_o = W_i$$
# $$H_o = W_i$$
#
# In other words, we leave the stride set to 1 (just like the convolution examples from before) and we fix the filter size and zero padding so that the output volume always has the same width and height as the input -- if we use 3x3 kernels then we'll add one layer of zeros, if we use 5x5 kernels then we'll add two layers of zeros, and so on. It's just simpler that way.
#
# ### ReLU Activation
#
# <img src="http://cs231n.github.io/assets/nn1/relu.jpeg" alt="relu" width=300/>
#
# So far we've mainly use the sigmoid activation, but there's a new activation function in town that's both simpler and better. It's called the __rectified linear unit (ReLU)__:
#
# $$ReLU(x) = max(0, x)$$
#
# How is it better? Well, <NAME> (et. al.) found that their network trained much more quickly when they used ReLU instead of sigmoid. It wasn't that the computations were faster, but that the network didn't need to be trained for as many iterations -- it converged more quickly. We'll use this activation after each convolutional layer, and when we add a few fully-connected layers to the end of our network, we'll use ReLU there too.
#
# ### Pooling Layer
#
# <img src="http://cs231n.github.io/assets/cnn/maxpool.jpeg" alt="max-pooling" width=400/>
#
# A pooling layer is essentially a "downsampling" layer -- it breaks the input image into chunks and takes only one value from each chunk. This layer doesn't have any neurons, it just has two settings: (1) the size of the chunks and (2) how to select a value from each chunk. The figure above shows a 2x2 __max-pooling layer__, because each chunk is 2x2 and the maximum value is taken from each chunk. There are other types of pooling layers, like min-pooling and mean-pooling, but max-pooling tends to give the best results, and with good reason. Remember that the output of a convolution is a map that represents what the convolution kernel is "paying attention" to, so by taking only the highest values in each region we are taking the most important features that were computed by the convolutional layer.
#
# We typically use convolutional layers and pooling layers in pairs: the convolutional layer produces an activation map with the same size as the input, and then the pooling layer downsamples the activation map by halving the width and height. At each step, the output volume becomes thinner and deeper, until eventually we can flatten it into a vector and use fully-connected layers at the end of the network.
# ## Implementing CNNs in Keras
#
# Finally! It's time to create our own CNN in Keras. As before we're going to create an image classifier, but this time we're going to incorporate convolutional layers to improve performance and accuracy. We're also going to graduate once again from MNIST to another image dataset, the [CIFAR-10](https://www.cs.toronto.edu/~kriz/cifar.html) dataset. The CIFAR-10 dataset has 60,000 color images, with each image having a size of 32x32x3. As usual, we'll load the dataset and visualize a few samples:
# +
(X_train, y_train), (X_test, y_test) = keras.datasets.cifar10.load_data()
print("X_train: %s" % str(X_train.shape))
print("y_train: %s" % str(y_train.shape))
print("X_test: %s" % str(X_test.shape))
print("y_test: %s" % str(y_test.shape))
# +
# select several samples from CIFAR-10 at random
rows = 4
cols = 4
indices = np.random.choice(np.arange(len(X_train)), rows * cols)
# plot the images in a grid
plt.figure(figsize=(2 * cols, 2 * rows))
for i in range(rows * cols):
index = indices[i]
ax = plt.subplot(rows, cols, i + 1)
plt.imshow(X_train[index])
plt.title("label = %d" % y_train[index])
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()
# -
# These images aren't much bigger than the ones in MNIST, but they're color images, and they're taken from a variety of real-world settings. In other words, it's probably going to be more difficult to classify these images than to classify handwritten digits.
#
# The code to create, train, and evaluate a CNN is largely similar to what we've done before, but this time we'll need a few more types of layers.
# create a basic convolutional neural network
cnn = keras.models.Sequential()
cnn.add(keras.layers.Conv2D(32, (3,3), padding="same", activation="relu", input_shape=(32,32,3)))
cnn.add(keras.layers.MaxPooling2D(2, 2))
cnn.add(keras.layers.Conv2D(64, (3,3), padding="same", activation="relu"))
cnn.add(keras.layers.MaxPooling2D(2, 2))
cnn.add(keras.layers.Conv2D(64, (3,3), padding="same", activation="relu"))
cnn.add(keras.layers.Flatten())
cnn.add(keras.layers.Dense(1024, activation="relu"))
cnn.add(keras.layers.Dense(10, activation="softmax"))
cnn.summary()
# So we've created a tiny CNN. We've introduced three new types of layers, so we'll go through each of them. The `Conv2D` layer is defined primarily by the number of filters and the size of each filter (in contrast to the `Dense` layer, which is defined by the number of output units). We'll adjust the zero-padding to ensure that the output size always matches the input size, which we can specify by simply saying `"same"`. We'll use ReLU activation for every convolutional layer _and_ every fully-connected layer. The `MaxPooling2D` layer does exactly what it says; we only specify the pool size, which is 2x2. The `Flatten` layer simply flattens the output volume into a vector so that it can be passed to a fully-connected layer.
#
# As before, we only specify the input shape for the first layer as the shape of the input data, and Keras automagically deduces the shapes of the remaining layers for you. And this time, we don't have to flatten the input data because we're using it as-is. Neat!
#
# Finally, notice that the end of our CNN contains fully-connected layers just like before. As a result the output of our CNN -- and consequently the training and testing procedure for it -- is _exactly the same_ as our MLP from the previous notebook. So let's do it!
# +
# normalize the data
X_norm_train = X_train.astype("float32") / 255.
X_norm_test = X_test.astype("float32") / 255.
# compute one-hot labels
y_cate_train = keras.utils.to_categorical(y_train, num_classes=10)
y_cate_test = keras.utils.to_categorical(y_test, num_classes=10)
# compile the model
cnn.compile(optimizer="sgd", loss="categorical_crossentropy", metrics=["accuracy"])
# train the model
history = cnn.fit(x=X_norm_train, y=y_cate_train, batch_size=500, epochs=50, validation_split=0.1)
# +
# TODO: use the history to plot training accuracy and training loss (refer to previous notebook)
def plot_acc_and_score(which_history,which_mlp,which_approach):
# plot the training accuracy
plt.subplot(1,2, 1)
plt.plot(which_history.history["acc"])
plt.plot(which_history.history["val_acc"])
plt.title("Training Accuracy: "+ which_approach)
plt.ylabel("Accuracy")
plt.xlabel("Epoch")
plt.legend(["Training", "Validation"], loc="upper left")
# plot the training loss
plt.subplot(1,2, 2)
plt.plot(history.history["loss"])
plt.plot(history.history["val_loss"])
plt.title("Training Loss: "+ which_approach)
plt.ylabel("Loss")
plt.xlabel("Epoch")
plt.legend(["Training", "Validation"], loc="upper left")
plt.show()
plot_acc_and_score(history,cnn,"approach 1")
# +
# evaluate the model
score = cnn.evaluate(x=X_norm_test, y=y_cate_test)
print("test loss: %g" % score[0])
print("test accuracy: %g" % score[1])
# +
# TODO: use cnn.predict() plot a confusion matrix (refer to previous notebooks)# get the raw predictions of the network on the test set
y_cate_pred = cnn.predict(X_norm_test)
# convert the one-hot encoded output to class indexes
y_pred = np.argmax(y_cate_pred, axis=1)
classes = [str(i) for i in range(10)]
cnf_matrix = sklearn.metrics.confusion_matrix(y_test, y_pred)
# plot a heatmap of the confusion matrix
sns.heatmap(cnf_matrix, annot=True, fmt="d", cbar=False, square=True, xticklabels=classes, yticklabels=classes)
plt.ylabel("Expected")
plt.xlabel("Measured")
plt.title("Confusion Matrix")
plt.show()
# -
# Chances are that you're going to have to train the network for many more epochs before it achieves a high accuracy, and that's fine. Models that are more complex generally take longer to train. We've only shown you the process so that you know what it looks like; from here it's up to you to train and monitor your network until it performs well.
#
# ## Convolutional Autoencoder
#
# As a side note, we can revisit our strange friend the autoencoder and apply the same techniques from this notebook to create a __convolutional autoencoder__ -- an autoencoder with convolutional layers. The only new thing here is that we use "up-sampling" layers in the decoder to mirror the max-pooling layers in the encoder. As in the previous notebook, we'll go ahead and give you the code to train the autoencoder and compare test images to their reconstructed counterparts:
# +
# create a convolutional autoencoder
ae = keras.models.Sequential()
# encoder
ae.add(keras.layers.Conv2D(16, (3, 3), activation="relu", padding="same", input_shape=(32, 32, 3)))
ae.add(keras.layers.MaxPooling2D((2, 2)))
ae.add(keras.layers.Conv2D(8, (3, 3), activation="relu", padding="same"))
ae.add(keras.layers.MaxPooling2D((2, 2)))
# decoder
ae.add(keras.layers.Conv2D(8, (3, 3), activation="relu", padding="same"))
ae.add(keras.layers.UpSampling2D((2, 2)))
ae.add(keras.layers.Conv2D(16, (3, 3), activation="relu", padding="same"))
ae.add(keras.layers.UpSampling2D((2, 2)))
ae.add(keras.layers.Conv2D(3, (3, 3), activation="sigmoid", padding="same"))
ae.compile(optimizer="adadelta", loss="binary_crossentropy")
# print model summary
ae.summary()
# normalize the data
X_norm_train = X_train.astype("float32") / 255.
X_norm_test = X_test.astype("float32") / 255.
# train the model
history = ae.fit(x=X_norm_train, y=X_norm_train, batch_size=500, epochs=50, validation_split=0.1)
# +
# reconstruct test images with the trained autoencoder
X_reconstructed = ae.predict(X_norm_test)
# sample some test images for visualization
num_images = 10
indices = np.random.choice(np.arange(len(X_norm_test)), num_images)
# plot each sample image and its reconstructed counterpart
plt.figure(figsize=(3 * num_images, 3))
for i in range(num_images):
# plot original image
ax = plt.subplot(2, num_images, i + 1)
plt.imshow(X_norm_test[i])
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
# plot reconstructed image
ax = plt.subplot(2, num_images, i + 1 + num_images)
plt.imshow(X_reconstructed[i])
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()
# -
# ## Assignment: It's Convolutions All The Way Down!!!
#
# There's a lot of information in this notebook. It's beginning to be a pattern, isn't it? Once again, if you'd like to learn more about convolutional neural networks and how to use them, we highly recommend that you check out [CS231n](http://cs231n.github.io/) and the [Keras documentation](https://keras.io/).
#
# In the meantime, we'll expand on the assignment from the previous notebook by applying it to CNNs. In addition to the network size, we now have a slew of new hyperparameters related to convolutional layers. On top of that, we'll need to pay more attention to how we train the CNN because it's a more complex model. To that end, create and train a variety of networks by experimenting with the following hyperparameters:
#
# - filter size (3x3, 5x5, 7x7, etc.)
# - number of filters (8, 16, 32, 64, etc.)
# - batch size
# - number of epochs
#
# As before, use the following metrics to evaluate each model:
#
# - training loss
# - training accuracy
# - test loss
# - test accuracy
#
# To make things easier on yourself, try to encapsulate the entire training / evaluation procedure into a function that you can call repeatedly. We'll get you started with a function called `evaluate_cnn`, which takes a "layer config" as a short-hand for creating a network.
# +
def evaluate_cnn(layers, X_norm_train, y_cate_train, X_norm_test, y_cate_test):
"""
Create, train, and evaluate a convolutional neural network.
The first layer is assumed to be a convolutional layer, and the
last layer is assumed to be a fully-connected layer. ReLU activation
is used for all convolutional and fully-connected layers, except for
the final layer, in which softmax is used.
We define the following mapping for creating different layers:
("c", 32, 3) -> create a Conv2D with 32 output channels and kernel size of 3x3
("m", 2) -> create a MaxPooling2D with pool size of 2x2
("d", 1024) -> create a fully-connected layer with 1024 units
("f", None) -> flatten the output volume from the previous layer
Args:
layers: A list of tuples specifiying the type and size of each layer.
X_norm_train
y_cate_train
X_norm_test
y_cate_test
Returns: evaluation metrics
"""
model = keras.models.Sequential()
for i, layer in enumerate(layers):
if layer[0] == "c":
if i == 0:
model.add(keras.layers.Conv2D(layer[1], (layer[2],layer[2]), padding="same", activation="relu", input_shape=X_norm_train.shape[1:]))
else:
model.add(keras.layers.Conv2D(layer[1], (layer[2],layer[2]), padding="same", activation="relu"))
elif layer[0] == "m":
model.add(keras.layers.MaxPooling2D((layer[1], layer[1])))
elif layer[0] == "f":
model.add(keras.layers.Flatten())
elif layer[0] == "d":
if i == len(layers) - 1:
model.add(keras.layers.Dense(layer[1], activation="softmax"))
else:
model.add(keras.layers.Dense(layer[1], activation="relu"))
# TODO: complete this function by adding training, evaluation, and visualization
# an example config for the network we used previously
layers = [
("c", 32, 3),
("m", 2),
("c", 64, 3),
("m", 2),
("c", 64, 3),
("f", None),
("d", 1024),
("d", 10)
]
evaluate_cnn(layers, X_norm_train, y_cate_train, X_norm_test, y_cate_test)
| neural-networks-conv.ipynb |
# +
from newsgac import database
from newsgac.users.models import User
from newsgac.common.utils import model_to_dict
[model_to_dict(user) for user in User.objects.all()]
# -
| notebooks/users.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <div id="qe-notebook-header" align="right" style="text-align:right;">
# <a href="https://quantecon.org/" title="quantecon.org">
# <img style="width:250px;display:inline;" width="250px" src="https://assets.quantecon.org/img/qe-menubar-logo.svg" alt="QuantEcon">
# </a>
# </div>
# # Kesten Processes and Firm Dynamics
#
#
# <a id='index-0'></a>
# ## Contents
#
# - [Kesten Processes and Firm Dynamics](#Kesten-Processes-and-Firm-Dynamics)
# - [Overview](#Overview)
# - [Kesten Processes](#Kesten-Processes)
# - [Heavy Tails](#Heavy-Tails)
# - [Application: Firm Dynamics](#Application:-Firm-Dynamics)
# - [Exercises](#Exercises)
# - [Solutions](#Solutions)
# In addition to what’s in Anaconda, this lecture will need the following libraries:
# + hide-output=true
# !pip install --upgrade quantecon
# !pip install --upgrade yfinance
# -
# ## Overview
#
# [Previously](ar1_processes.ipynb) we learned about linear scalar-valued stochastic processes (AR(1) models).
#
# Now we generalize these linear models slightly by allowing the multiplicative coefficient to be stochastic.
#
# Such processes are known as Kesten processes after German–American mathematician Harry Kesten (1931–2019)
#
# Although simple to write down, Kesten processes are interesting for at least two reasons:
#
# 1. A number of significant economic processes are or can be described as Kesten processes.
# 1. Kesten processes generate interesting dynamics, including, in some cases, heavy-tailed cross-sectional distributions.
#
#
# We will discuss these issues as we go along.
#
# Let’s start with some imports:
# + hide-output=false
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
import quantecon as qe
# -
# The following two lines are only added to avoid a `FutureWarning` caused by
# compatibility issues between pandas and matplotlib.
# + hide-output=false
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
# -
# Additional technical background related to this lecture can be found in the
# monograph of [[BDM+16]](zreferences.ipynb#buraczewski2016stochastic).
# ## Kesten Processes
#
#
# <a id='index-1'></a>
# A **Kesten process** is a stochastic process of the form
#
#
# <a id='equation-kesproc'></a>
# $$
# X_{t+1} = a_{t+1} X_t + \eta_{t+1} \tag{1}
# $$
#
# where $ \{a_t\}_{t \geq 1} $ and $ \{\eta_t\}_{t \geq 1} $ are IID
# sequences.
#
# We are interested in the dynamics of $ \{X_t\}_{t \geq 0} $ when $ X_0 $ is given.
#
# We will focus on the nonnegative scalar case, where $ X_t $ takes values in $ \mathbb R_+ $.
#
# In particular, we will assume that
#
# - the initial condition $ X_0 $ is nonnegative,
# - $ \{a_t\}_{t \geq 1} $ is a nonnegative IID stochastic process and
# - $ \{\eta_t\}_{t \geq 1} $ is another nonnegative IID stochastic process, independent of the first.
# ### Example: GARCH Volatility
#
# The GARCH model is common in financial applications, where time series such as asset returns exhibit time varying volatility.
#
# For example, consider the following plot of daily returns on the Nasdaq
# Composite Index for the period 1st January 2006 to 1st November 2019.
#
#
# <a id='ndcode'></a>
# + hide-output=false
import yfinance as yf
import pandas as pd
s = yf.download('^IXIC', '2006-1-1', '2019-11-1')['Adj Close']
r = s.pct_change()
fig, ax = plt.subplots()
ax.plot(r, alpha=0.7)
ax.set_ylabel('returns', fontsize=12)
ax.set_xlabel('date', fontsize=12)
plt.show()
# -
# Notice how the series exhibits bursts of volatility (high variance) and then
# settles down again.
#
# GARCH models can replicate this feature.
#
# The GARCH(1, 1) volatility process takes the form
#
#
# <a id='equation-garch11v'></a>
# $$
# \sigma_{t+1}^2 = \alpha_0 + \sigma_t^2 (\alpha_1 \xi_{t+1}^2 + \beta) \tag{2}
# $$
#
# where $ \{\xi_t\} $ is IID with $ \mathbb E \xi_t^2 = 1 $ and all parameters are positive.
#
# Returns on a given asset are then modeled as
#
#
# <a id='equation-garch11r'></a>
# $$
# r_t = \sigma_t \zeta_{t+1} \tag{3}
# $$
#
# where $ \{\zeta_t\} $ is again IID and independent of $ \{\xi_t\} $.
#
# The volatility sequence $ \{\sigma_t^2 \} $, which drives the dynamics of returns, is a Kesten process.
# ### Example: Wealth Dynamics
#
# Suppose that a given household saves a fixed fraction $ s $ of its current wealth in every period.
#
# The household earns labor income $ y_t $ at the start of time $ t $.
#
# Wealth then evolves according to
#
#
# <a id='equation-wealth-dynam'></a>
# $$
# w_{t+1} = R_{t+1} s w_t + y_{t+1} \tag{4}
# $$
#
# where $ \{R_t\} $ is the gross rate of return on assets.
#
# If $ \{R_t\} $ and $ \{y_t\} $ are both IID, then [(4)](#equation-wealth-dynam)
# is a Kesten process.
# ### Stationarity
#
# In earlier lectures, such as the one on [AR(1) processes](ar1_processes.ipynb), we introduced the notion of a stationary distribution.
#
# In the present context, we can define a stationary distribution as follows:
#
# The distribution $ F^* $ on $ \mathbb R $ is called **stationary** for the
# Kesten process [(1)](#equation-kesproc) if
#
#
# <a id='equation-kp-stationary0'></a>
# $$
# X_t \sim F^*
# \quad \implies \quad
# a_{t+1} X_t + \eta_{t+1} \sim F^* \tag{5}
# $$
#
# In other words, if the current state $ X_t $ has distribution $ F^* $,
# then so does the next period state $ X_{t+1} $.
#
# We can write this alternatively as
#
#
# <a id='equation-kp-stationary'></a>
# $$
# F^*(y) = \int \mathbb P\{ a_{t+1} x + \eta_{t+1} \leq y\} F^*(dx)
# \quad \text{for all } y \geq 0. \tag{6}
# $$
#
# The left hand side is the distribution of the next period state when the
# current state is drawn from $ F^* $.
#
# The equality in [(6)](#equation-kp-stationary) states that this distribution is unchanged.
# ### Cross-Sectional Interpretation
#
# There is an important cross-sectional interpretation of stationary distributions, discussed previously but worth repeating here.
#
# Suppose, for example, that we are interested in the wealth distribution — that is, the current distribution of wealth across households in a given country.
#
# Suppose further that
#
# - the wealth of each household evolves independently according to
# [(4)](#equation-wealth-dynam),
# - $ F^* $ is a stationary distribution for this stochastic process and
# - there are many households.
#
#
# Then $ F^* $ is a steady state for the cross-sectional wealth distribution in this country.
#
# In other words, if $ F^* $ is the current wealth distribution then it will
# remain so in subsequent periods, *ceteris paribus*.
#
# To see this, suppose that $ F^* $ is the current wealth distribution.
#
# What is the fraction of households with wealth less than $ y $ next
# period?
#
# To obtain this, we sum the probability that wealth is less than $ y $ tomorrow, given that current wealth is $ w $, weighted by the fraction of households with wealth $ w $.
#
# Noting that the fraction of households with wealth in interval $ dw $ is $ F^*(dw) $, we get
#
# $$
# \int \mathbb P\{ R_{t+1} s w + y_{t+1} \leq y\} F^*(dw)
# $$
#
# By the definition of stationarity and the assumption that $ F^* $ is stationary for the wealth process, this is just $ F^*(y) $.
#
# Hence the fraction of households with wealth in $ [0, y] $ is the same
# next period as it is this period.
#
# Since $ y $ was chosen arbitrarily, the distribution is unchanged.
# ### Conditions for Stationarity
#
# The Kesten process $ X_{t+1} = a_{t+1} X_t + \eta_{t+1} $ does not always
# have a stationary distribution.
#
# For example, if $ a_t \equiv \eta_t \equiv 1 $ for all $ t $, then
# $ X_t = X_0 + t $, which diverges to infinity.
#
# To prevent this kind of divergence, we require that $ \{a_t\} $ is
# strictly less than 1 most of the time.
#
# In particular, if
#
#
# <a id='equation-kp-stat-cond'></a>
# $$
# \mathbb E \ln a_t < 0
# \quad \text{and} \quad
# \mathbb E \eta_t < \infty \tag{7}
# $$
#
# then a unique stationary distribution exists on $ \mathbb R_+ $.
#
# - See, for example, theorem 2.1.3 of [[BDM+16]](zreferences.ipynb#buraczewski2016stochastic), which provides slightly weaker conditions.
#
#
# As one application of this result, we see that the wealth process
# [(4)](#equation-wealth-dynam) will have a unique stationary distribution whenever
# labor income has finite mean and $ \mathbb E \ln R_t + \ln s < 0 $.
# ## Heavy Tails
#
# Under certain conditions, the stationary distribution of a Kesten process has
# a Pareto tail.
#
# (See our [earlier lecture](heavy_tails.ipynb) on heavy-tailed distributions for background.)
#
# This fact is significant for economics because of the prevalence of Pareto-tailed distributions.
# ### The Kesten–Goldie Theorem
#
# To state the conditions under which the stationary distribution of a Kesten process has a Pareto tail, we first recall that a random variable is called **nonarithmetic** if its distribution is not concentrated on $ \{\dots, -2t, -t, 0, t, 2t, \ldots \} $ for any $ t \geq 0 $.
#
# For example, any random variable with a density is nonarithmetic.
#
# The famous Kesten–Goldie Theorem (see, e.g., [[BDM+16]](zreferences.ipynb#buraczewski2016stochastic), theorem 2.4.4) states that if
#
# 1. the stationarity conditions in [(7)](#equation-kp-stat-cond) hold,
# 1. the random variable $ a_t $ is positive with probability one and nonarithmetic,
# 1. $ \mathbb P\{a_t x + \eta_t = x\} < 1 $ for all $ x \in \mathbb R_+ $ and
# 1. there exists a positive constant $ \alpha $ such that
#
#
# $$
# \mathbb E a_t^\alpha = 1,
# \quad
# \mathbb E \eta_t^\alpha < \infty,
# \quad \text{and} \quad
# \mathbb E [a_t^{\alpha+1} ] < \infty
# $$
#
# then the stationary distribution of the Kesten process has a Pareto tail with
# tail index $ \alpha $.
#
# More precisely, if $ F^* $ is the unique stationary distribution and $ X^* \sim F^* $, then
#
# $$
# \lim_{x \to \infty} x^\alpha \mathbb P\{X^* > x\} = c
# $$
#
# for some positive constant $ c $.
# ### Intuition
#
# Later we will illustrate the Kesten–Goldie Theorem using rank-size plots.
#
# Prior to doing so, we can give the following intuition for the conditions.
#
# Two important conditions are that $ \mathbb E \ln a_t < 0 $, so the model
# is stationary, and $ \mathbb E a_t^\alpha = 1 $ for some $ \alpha >
# 0 $.
#
# The first condition implies that the distribution of $ a_t $ has a large amount of probability mass below 1.
#
# The second condition implies that the distribution of $ a_t $ has at least some probability mass at or above 1.
#
# The first condition gives us existence of the stationary condition.
#
# The second condition means that the current state can be expanded by $ a_t $.
#
# If this occurs for several concurrent periods, the effects compound each other, since $ a_t $ is multiplicative.
#
# This leads to spikes in the time series, which fill out the extreme right hand tail of the distribution.
#
# The spikes in the time series are visible in the following simulation, which generates of 10 paths when $ a_t $ and $ b_t $ are lognormal.
# + hide-output=false
μ = -0.5
σ = 1.0
def kesten_ts(ts_length=100):
x = np.zeros(ts_length)
for t in range(ts_length-1):
a = np.exp(μ + σ * np.random.randn())
η = np.exp(np.random.randn())
x[t+1] = a * x[t] + η
return x
fig, ax = plt.subplots()
num_paths = 10
np.random.seed(12)
for i in range(num_paths):
ax.plot(kesten_ts())
ax.set(xlabel='time', ylabel='$X_t$')
plt.show()
# -
# ## Application: Firm Dynamics
#
# As noted in our [lecture on heavy tails](heavy_tails.ipynb), for common measures of firm size such as revenue or employment, the US firm size distribution exhibits a Pareto tail (see, e.g., [[Axt01]](zreferences.ipynb#axtell2001zipf), [[Gab16]](zreferences.ipynb#gabaix2016power)).
#
# Let us try to explain this rather striking fact using the Kesten–Goldie Theorem.
# ### Gibrat’s Law
#
# It was postulated many years ago by <NAME> [[Gib31]](zreferences.ipynb#gibrat1931inegalites) that firm size evolves according to a simple rule whereby size next period is proportional to current size.
#
# This is now known as [Gibrat’s law of proportional growth](https://en.wikipedia.org/wiki/Gibrat%27s_law).
#
# We can express this idea by stating that a suitably defined measure
# $ s_t $ of firm size obeys
#
#
# <a id='equation-firm-dynam-gb'></a>
# $$
# \frac{s_{t+1}}{s_t} = a_{t+1} \tag{8}
# $$
#
# for some positive IID sequence $ \{a_t\} $.
#
# One implication of Gibrat’s law is that the growth rate of individual firms
# does not depend on their size.
#
# However, over the last few decades, research contradicting Gibrat’s law has
# accumulated in the literature.
#
# For example, it is commonly found that, on average,
#
# 1. small firms grow faster than large firms (see, e.g., [[Eva87]](zreferences.ipynb#evans1987relationship) and [[Hal87]](zreferences.ipynb#hall1987relationship)) and
# 1. the growth rate of small firms is more volatile than that of large firms [[DRS89]](zreferences.ipynb#dunne1989growth).
#
#
# On the other hand, Gibrat’s law is generally found to be a reasonable
# approximation for large firms [[Eva87]](zreferences.ipynb#evans1987relationship).
#
# We can accommodate these empirical findings by modifying [(8)](#equation-firm-dynam-gb)
# to
#
#
# <a id='equation-firm-dynam'></a>
# $$
# s_{t+1} = a_{t+1} s_t + b_{t+1} \tag{9}
# $$
#
# where $ \{a_t\} $ and $ \{b_t\} $ are both IID and independent of each
# other.
#
# In the exercises you are asked to show that [(9)](#equation-firm-dynam) is more
# consistent with the empirical findings presented above than Gibrat’s law in
# [(8)](#equation-firm-dynam-gb).
# ### Heavy Tails
#
# So what has this to do with Pareto tails?
#
# The answer is that [(9)](#equation-firm-dynam) is a Kesten process.
#
# If the conditions of the Kesten–Goldie Theorem are satisfied, then the firm
# size distribution is predicted to have heavy tails — which is exactly what
# we see in the data.
#
# In the exercises below we explore this idea further, generalizing the firm
# size dynamics and examining the corresponding rank-size plots.
#
# We also try to illustrate why the Pareto tail finding is significant for
# quantitative analysis.
# ## Exercises
# ### Exercise 1
#
# Simulate and plot 15 years of daily returns (consider each year as having 250
# working days) using the GARCH(1, 1) process in [(2)](#equation-garch11v)–[(3)](#equation-garch11r).
#
# Take $ \xi_t $ and $ \zeta_t $ to be independent and standard normal.
#
# Set $ \alpha_0 = 0.00001, \alpha_1 = 0.1, \beta = 0.9 $ and $ \sigma_0 = 0 $.
#
# Compare visually with the Nasdaq Composite Index returns [shown above](#ndcode).
#
# While the time path differs, you should see bursts of high volatility.
# +
import yfinance as yf
import pandas as pd
s = yf.download('^IXIC', '2006-1-1', '2019-11-1')['Adj Close']
r = s.pct_change()
fig, ax = plt.subplots()
ax.plot(r, alpha=0.7)
ax.set_ylabel('returns', fontsize=12)
ax.set_xlabel('date', fontsize=12)
plt.show()
# +
from scipy.stats import norm
α0=0.00001
α1=0.1
β=0.9
σ0=0
T=3750
σ=np.empty(T)
r=np.empty(T)
for t in range(T-1):
z0 = norm.rvs()
z1 = norm.rvs()
σ[t+1] = np.sqrt(α0 + (σ[t]**2)*(α1*(z0**2) + β))
r[t]=σ[t]*z1
fig, ax = plt.subplots()
ax.plot(r, alpha=0.7)
ax.set_ylabel('returns', fontsize=12)
ax.set_xlabel('date', fontsize=12)
plt.show()
# -
# ### Exercise 2
#
# In our discussion of firm dynamics, it was claimed that [(9)](#equation-firm-dynam) is more consistent with the empirical literature than Gibrat’s law in [(8)](#equation-firm-dynam-gb).
#
# (The empirical literature was reviewed immediately above [(9)](#equation-firm-dynam).)
#
# In what sense is this true (or false)?
# ### Exercise 3
#
# Consider an arbitrary Kesten process as given in [(1)](#equation-kesproc).
#
# Suppose that $ \{a_t\} $ is lognormal with parameters $ (\mu,
# \sigma) $.
#
# In other words, each $ a_t $ has the same distribution as $ \exp(\mu + \sigma Z) $ when $ Z $ is standard normal.
#
# Suppose further that $ \mathbb E \eta_t^r < \infty $ for every $ r > 0 $, as
# would be the case if, say, $ \eta_t $ is also lognormal.
#
# Show that the conditions of the Kesten–Goldie theorem are satisfied if and
# only if $ \mu < 0 $.
#
# Obtain the value of $ \alpha $ that makes the Kesten–Goldie conditions
# hold.
# ### Exercise 4
#
# One unrealistic aspect of the firm dynamics specified in [(9)](#equation-firm-dynam) is
# that it ignores entry and exit.
#
# In any given period and in any given market, we observe significant numbers of firms entering and exiting the market.
#
# Empirical discussion of this can be found in a famous paper by <NAME> [[Hop92]](zreferences.ipynb#hopenhayn1992entry).
#
# In the same paper, Hopenhayn builds a model of entry and exit that
# incorporates profit maximization by firms and market clearing quantities, wages and prices.
#
# In his model, a stationary equilibrium occurs when the number of entrants
# equals the number of exiting firms.
#
# In this setting, firm dynamics can be expressed as
#
#
# <a id='equation-firm-dynam-ee'></a>
# $$
# s_{t+1} = e_{t+1} \mathbb{1}\{s_t < \bar s\}
# + (a_{t+1} s_t + b_{t+1}) \mathbb{1}\{s_t \geq \bar s\} \tag{10}
# $$
#
# Here
#
# - the state variable $ s_t $ is represents productivity (which is a proxy
# for output and hence firm size),
# - the IID sequence $ \{ e_t \} $ is thought of as a productivity draw for a new
# entrant and
# - the variable $ \bar s $ is a threshold value that we take as given,
# although it is determined endogenously in Hopenhayn’s model.
#
#
# The idea behind [(10)](#equation-firm-dynam-ee) is that firms stay in the market as long
# as their productivity $ s_t $ remains at or above $ \bar s $.
#
# - In this case, their productivity updates according to [(9)](#equation-firm-dynam).
#
#
# Firms choose to exit when their productivity $ s_t $ falls below $ \bar s $.
#
# - In this case, they are replaced by a new firm with productivity
# $ e_{t+1} $.
#
#
# What can we say about dynamics?
#
# Although [(10)](#equation-firm-dynam-ee) is not a Kesten process, it does update in the
# same way as a Kesten process when $ s_t $ is large.
#
# So perhaps its stationary distribution still has Pareto tails?
#
# Your task is to investigate this question via simulation and rank-size plots.
#
# The approach will be to
#
# 1. generate $ M $ draws of $ s_T $ when $ M $ and $ T $ are
# large and
# 1. plot the largest 1,000 of the resulting draws in a rank-size plot.
#
#
# (The distribution of $ s_T $ will be close to the stationary distribution
# when $ T $ is large.)
#
# In the simulation, assume that
#
# - each of $ a_t, b_t $ and $ e_t $ is lognormal,
# - the parameters are
# + hide-output=false
from scipy.stats import norm
M = 1_000_000 # number of firms
T = 500 # sampling date
μ_a = -0.5 # location parameter for a
σ_a = 0.1 # scale parameter for a
μ_b = 0.0 # location parameter for b
σ_b = 0.5 # scale parameter for b
μ_e = 0.0 # location parameter for e
σ_e = 0.5 # scale parameter for e
s_bar = 1.0 # threshold
s_init = 1.0 # initial condition for each firm
#@njit(parallel=True)
def firm(T=500, μ_a=-0.5, σ_a=0.1, μ_b=0.0, σ_b=0.5, μ_e=0.0, σ_e=0.5, s_bar=1.0, s_init = 1.0):
s = s_init
for i in range(T-1):
z_a=norm.rvs()
z_b=norm.rvs()
z_e=norm.rvs()
if s < s_bar:
e = np.exp(μ_e+σ_e*z_e)
s = e
else:
a = np.exp(μ_a+σ_a*z_a)
b = np.exp(μ_b+σ_b*z_b)
s = a*s+b
return s
firms=np.empty(M)
@njit(parallel=True)
for m in prange(M):
firms[m]=firm()
fig, ax = plt.subplots()
qe.rank_size_plot(firms, ax, c=0.01)
plt.show()
ax.plot(s, alpha=0.7)
ax.set_ylabel('productivity', fontsize=12)
ax.set_xlabel('Time', fontsize=12)
plt.show()
# -
| homework/HarveyT47/kesten_processes.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: U4-S3-DNN (Python 3.7)
# language: python
# name: u4-s3-dnn
# ---
# + [markdown] id="Z5osKTr3CnRb" colab_type="text"
# Lambda School Data Science
#
# *Unit 4, Sprint 3, Module 3*
#
# ---
# + [markdown] id="YtAqcG51CnRe" colab_type="text"
# # Autoencoders
#
# > An autoencoder is a type of artificial neural network used to learn efficient data codings in an unsupervised manner.[1][2] The aim of an autoencoder is to learn a representation (encoding) for a set of data, typically for dimensionality reduction, by training the network to ignore signal “noise”. Along with the reduction side, a reconstructing side is learnt, where the autoencoder tries to generate from the reduced encoding a representation as close as possible to its original input, hence its name.
# + [markdown] id="EEXXYehlCnRg" colab_type="text"
# ## Learning Objectives
# *At the end of the lecture you should be to*:
# * <a href="#p1">Part 1</a>: Describe the componenets of an autoencoder
# * <a href="#p2">Part 2</a>: Train an autoencoder
# * <a href="#p3">Part 3</a>: Apply an autoenocder to a basic information retrieval problem
#
# __Problem:__ Is it possible to automatically represent an image as a fixed-sized vector even if it isn’t labeled?
#
# __Solution:__ Use an autoencoder
#
# Why do we need to represent an image as a fixed-sized vector do you ask?
#
# * __Information Retrieval__
# - [Reverse Image Search](https://en.wikipedia.org/wiki/Reverse_image_search)
# - [Recommendation Systems - Content Based Filtering](https://en.wikipedia.org/wiki/Recommender_system#Content-based_filtering)
# * __Dimensionality Reduction__
# - [Feature Extraction](https://www.kaggle.com/c/vsb-power-line-fault-detection/discussion/78285)
# - [Manifold Learning](https://en.wikipedia.org/wiki/Nonlinear_dimensionality_reduction)
#
# We've already seen *representation learning* when we talked about word embedding modelings during our NLP week. Today we're going to achieve a similiar goal on images using *autoencoders*. An autoencoder is a neural network that is trained to attempt to copy its input to its output. Usually they are restricted in ways that allow them to copy only approximately. The model often learns useful properties of the data, because it is forced to prioritize which aspecs of the input should be copied. The properties of autoencoders have made them an important part of modern generative modeling approaches. Consider autoencoders a special case of feed-forward networks (the kind we've been studying); backpropagation and gradient descent still work.
# + [markdown] id="Cu-r6vI4CnRi" colab_type="text"
# # Autoencoder Architecture (Learn)
# <a id="p1"></a>
# + [markdown] id="Le9Zkx2jCnRl" colab_type="text"
# ## Overview
#
# The *encoder* compresses the input data and the *decoder* does the reverse to produce the uncompressed version of the data to create a reconstruction of the input as accurately as possible:
#
# <img src='https://miro.medium.com/max/1400/1*44eDEuZBEsmG_TCAKRI3Kw@2x.png' width=800/>
#
# The learning process gis described simply as minimizing a loss function:
# $ L(x, g(f(x))) $
#
# - $L$ is a loss function penalizing $g(f(x))$ for being dissimiliar from $x$ (such as mean squared error)
# - $f$ is the encoder function
# - $g$ is the decoder function
# + [markdown] id="-HRp0o2zCnRl" colab_type="text"
# ## Follow Along
# ### Extremely Simple Autoencoder
# + id="u-0JBK36CnRy" colab_type="code" colab={}
# %load_ext tensorboard
# + id="hClFQf1MDAtF" colab_type="code" colab={}
import tensorflow as tf
import numpy as np
import os
URL_ = "https://github.com/LambdaSchool/DS-Unit-4-Sprint-2-Neural-Networks/blob/main/quickdraw10.npz?raw=true"
path_to_zip = tf.keras.utils.get_file('./quickdraw10.npz', origin=URL_, extract=False)
# + id="Lwc82kH4CnR5" colab_type="code" colab={}
data = np.load(path_to_zip)
X_train = data['arr_0']
y_train = data['arr_1']
print(X_train.shape)
print(y_train.shape)
# + id="AoAd1eY-CnSA" colab_type="code" colab={}
class_names = ['apple',
'anvil',
'airplane',
'banana',
'The Eiffel Tower',
'The Mona Lisa',
'The Great Wall of China',
'alarm clock',
'ant',
'asparagus']
# + id="LpuQOvIOCnSF" colab_type="code" colab={}
import matplotlib.pyplot as plt
plt.figure(figsize=(10,5))
start = 0
for num, name in enumerate(class_names):
plt.subplot(2,5, num+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(X_train[start].reshape(28,28), cmap=plt.cm.binary)
plt.xlabel(name)
start += 10000
plt.show()
# + id="8DCNnW2uCnSL" colab_type="code" colab={}
from sklearn.utils import shuffle
X_train, y_train = shuffle(X_train, y_train)
# + id="2Yp5d5jnDU2d" colab_type="code" colab={}
from tensorflow.keras.layers import Input, Dense
from tensorflow.keras.models import Model
from tensorflow.keras.callbacks import EarlyStopping, TensorBoard
# this is the size of our encoded representations
encoding_dim = 32 # 32 floats -> compression of factor 24.5, assuming the input is 784 floats
# this is our input placeholder
input_img = Input(shape=(784,))
# "encoded" is the encoded representation of the input
encoded = Dense(encoding_dim, activation='relu')(input_img)
# "decoded" is the lossy reconstruction of the input
decoded = Dense(784, activation='sigmoid')(encoded)
# this model maps an input to its reconstruction
autoencoder = Model(input_img, decoded)
# + id="3gCnfmXQDYkZ" colab_type="code" colab={}
# this model maps an input to its encoded representation
encoder = Model(input_img, encoded)
# + id="x-JwESQ-DaUp" colab_type="code" colab={}
autoencoder.compile(optimizer='nadam', loss='binary_crossentropy')
# + id="M-f8wok1Db2A" colab_type="code" colab={}
X_train = X_train.astype('float32') / 255.
print(X_train.shape)
# + id="fahm2CjBDg7k" colab_type="code" colab={}
import os
import datetime
from tensorflow.keras.callbacks import TensorBoard
# tf.keras.callbacks.TesnorBoard()
stop = EarlyStopping(monitor='val_loss', min_delta=0.001, patience=2)
now = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
logdir = os.path.join("logs", f"SimpleAutoencoder-{now}")
tensorboard = TensorBoard(log_dir=logdir)
autoencoder.fit(X_train, X_train, # X_train twice!
epochs=10000,
batch_size=64,
shuffle=True,
validation_split=.2,
verbose = True,
callbacks=[stop, tensorboard])
# + id="ZWHFevSPDl60" colab_type="code" colab={}
# %tensorboard --logdir ./logs
# + id="5mSjOSXWDolk" colab_type="code" colab={}
# encode and decode some digits
decoded_imgs = autoencoder(X_train)
# + id="b_jzwe0nDp86" colab_type="code" colab={}
# use Matplotlib (don't ask)
import matplotlib.pyplot as plt
n = 10 # how many digits we will display
plt.figure(figsize=(20, 4))
for i in range(n):
# display original
ax = plt.subplot(2, n, i + 1)
plt.imshow(X_train[i].reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
# display reconstruction
ax = plt.subplot(2, n, i + 1 + n)
plt.imshow(decoded_imgs[i].numpy().reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()
# + [markdown] id="e8FRP87jCnSw" colab_type="text"
# ## Challenge
#
# Expected to talk about the components of autoencoder and their purpose.
# + [markdown] id="WYEjrxySCnSx" colab_type="text"
# # Train an Autoencoder (Learn)
# <a id="p2"></a>
# + [markdown] id="IXGgMJIvCnSx" colab_type="text"
# ## Overview
#
# As long as our architecture maintains an hourglass shape, we can continue to add layers and create a deeper network.
# + [markdown] toc-hr-collapsed=true id="PLh1jdp1CnSy" colab_type="text"
# ## Follow Along
# + [markdown] id="l414gL91CnSz" colab_type="text"
# ### Deep Autoencoder
# + id="SRxozSvYCnSz" colab_type="code" colab={}
input_img = Input(shape=(784,))
# + id="z8BVgVl-CnS5" colab_type="code" colab={}
# compile & fit model
# + id="HcC6nGqKCnS9" colab_type="code" colab={}
# use Matplotlib (don't ask)
import matplotlib.pyplot as plt
n = 10 # how many digits we will display
plt.figure(figsize=(20, 4))
for i in range(n):
# display original
ax = plt.subplot(2, n, i + 1)
plt.imshow(x_test[i].reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
# display reconstruction
ax = plt.subplot(2, n, i + 1 + n)
plt.imshow(decoded_imgs[i].reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()
# + [markdown] toc-hr-collapsed=true id="oQKWFNDLCnTB" colab_type="text"
# ### Convolutional autoencoder
#
# > Since our inputs are images, it makes sense to use convolutional neural networks (convnets) as encoders and decoders. In practical settings, autoencoders applied to images are always convolutional autoencoders --they simply perform much better.
#
# > Let's implement one. The encoder will consist in a stack of Conv2D and MaxPooling2D layers (max pooling being used for spatial down-sampling), while the decoder will consist in a stack of Conv2D and UpSampling2D layers.
# + id="iSY-9koWCnTD" colab_type="code" colab={}
from keras.layers import Input, Dense, Conv2D, MaxPooling2D, UpSampling2D
from keras.models import Model
from keras import backend as K
# Create Model
autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')
# + id="FFfOK2A_CnTG" colab_type="code" colab={}
from keras.datasets import mnist
import numpy as np
(x_train, _), (x_test, _) = mnist.load_data()
x_train = x_train.astype('float32') / 255.
x_test = x_test.astype('float32') / 255.
x_train = np.reshape(x_train, (len(x_train), 28, 28, 1)) # adapt this if using `channels_first` image data format
x_test = np.reshape(x_test, (len(x_test), 28, 28, 1)) # adapt this if using `channels_first` image data format
# + id="Ynz1wNb9CnTM" colab_type="code" colab={}
import os
import datetime
stop = EarlyStopping(monitor=..., min_delta=0.001, patience=2)
logdir = os.path.join("logs", datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))
tensorboard = Tensorboard(log_dir=logdir)
autoencoder.fit(..., ...,
epochs=10000,
batch_size=64,
shuffle=True,
validation_data=(..., ...),
verbose = False,
callbacks=...)
# + id="EWzO9eQUCnTS" colab_type="code" colab={}
decoded_imgs = autoencoder.predict(x_test)
n = 10
plt.figure(figsize=(20, 4))
for i in range(n):
# display original
ax = plt.subplot(2, n, i)
plt.imshow(x_test[i].reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
# display reconstruction
ax = plt.subplot(2, n, i + n)
plt.imshow(decoded_imgs[i].reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()
# + [markdown] id="DIAClBUwCnTV" colab_type="text"
# #### Visualization of the Representations
# + id="bu2z5TGyCnTW" colab_type="code" colab={}
encoder = Model(input_img, encoded)
encoder.predict(x_train)
n = 10
plt.figure(figsize=(20, 8))
for i in range(n):
ax = plt.subplot(1, n, i)
plt.imshow(encoded_imgs[i].reshape(4, 4 * 8).T)
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()
# + [markdown] id="kWk01C13CnTa" colab_type="text"
# ## Challenge
#
# You will train an autoencoder at some point in the near future.
# + [markdown] id="MGZVL11gCnTa" colab_type="text"
# # Information Retrieval with Autoencoders (Learn)
# <a id="p3"></a>
# + [markdown] id="zqHVvXDqCnTb" colab_type="text"
# ## Overview
#
# A common usecase for autoencoders is for reverse image search. Let's try to draw an image and see what's most similiar in our dataset.
#
# To accomplish this we will need to slice our autoendoer in half to extract our reduced features. :)
# + [markdown] id="8vvNdHfuCnTc" colab_type="text"
# ## Follow Along
# + id="5-Ip2960CnTd" colab_type="code" colab={}
encoder = Model(input_img, encoded)
encoded_imgs = encoder.predict(x_train)
# + id="_Z-RB0FHCnTi" colab_type="code" colab={}
encoded_imgs[0].T
# + id="hYTiGQpfCnTm" colab_type="code" colab={}
from sklearn.neighbors import NearestNeighbors
nn = NearestNeighbors(n_neighbors=10, algorithm='ball_tree')
nn.fit(encoded_imgs)
# + id="qii2tvxyCnTq" colab_type="code" colab={}
nn.kneighbors(...)
# + [markdown] id="upH6twqACnTr" colab_type="text"
# ## Challenge
#
# You should already be familiar with KNN and similarity queries, so the key component of this section is know what to 'slice' from your autoencoder (the encoder) to extract features from your data.
# + [markdown] id="ICwm7L34CnTs" colab_type="text"
# # Review
#
# * <a href="#p1">Part 1</a>: Describe the componenets of an autoencoder
# - Enocder
# - Decoder
# * <a href="#p2">Part 2</a>: Train an autoencoder
# - Can do in Keras Easily
# - Can use a variety of architectures
# - Architectures must follow hourglass shape
# * <a href="#p3">Part 3</a>: Apply an autoenocder to a basic information retrieval problem
# - Extract just the encoder to use for various tasks
# - AE ares good for dimensionality reduction, reverse image search, and may more things.
#
# + [markdown] id="Pdee5f87CnTu" colab_type="text"
# # Sources
#
# __References__
# - [Building Autoencoders in Keras](https://blog.keras.io/building-autoencoders-in-keras.html)
# - [Deep Learning Cookbook](http://shop.oreilly.com/product/0636920097471.do)
#
# __Additional Material__
| module3-autoencoders/LS_DS_433_Autoencoders_Lecture.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/BalkisG77/pythoncode-tutorials/blob/master/credit_model_.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="qG1XeyB5z5Yr"
#importer les packages
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pickle
# + colab={"base_uri": "https://localhost:8080/", "height": 456} id="4CmBax_J0ze7" outputId="47e6e0d3-cac6-49a1-e738-b3ba300a05a5"
#Lire la base de données
df=pd.read_csv('/content/train_u6lujuX_CVtuZ9i.csv')
df
# + id="L10n1D411kqM"
pd.set_option('display.max_rows',df.shape[0]+1)
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="MW9UP8Qa17ES" outputId="a617cbaf-a587-40f8-a1e6-c5ca4b3041d7"
df
# + colab={"base_uri": "https://localhost:8080/", "height": 456} id="5fA5HLfT3JHp" outputId="d51a1896-788c-489f-9f80-388f1dc457db"
pd.set_option('display.max_rows',10)
df
# + colab={"base_uri": "https://localhost:8080/"} id="r3S9TA3I3f3G" outputId="d6e5d020-23b6-413f-f22a-eff7ba699713"
#Voir les valeurs manquantes
df.info()
# + colab={"base_uri": "https://localhost:8080/"} id="izxhD7Eb3yZZ" outputId="42a514f3-d3be-4037-9eec-4cf846576e7a"
df.isnull().sum().sort_values(ascending=False)
# + colab={"base_uri": "https://localhost:8080/", "height": 317} id="a-6h7sZw4DE5" outputId="6beaa55e-5253-4dce-ba78-1d52512be18f"
#Valeurs statistiques
df.describe()
# + id="6Iirifw94aQd"
#df.describe(include='0')
# + colab={"base_uri": "https://localhost:8080/", "height": 439} id="XAlBD3Vt5Pzw" outputId="861c4c83-ca68-49ba-f718-367299a747ed"
#renseigner les valeurs manquantes
cat_data = []
num_data = []
for i,c in enumerate(df.dtypes):
if c==object:
cat_data.append(df.iloc[:,i])
else:
num_data.append(df.iloc[:,i])
cat_data=pd.DataFrame(cat_data).transpose()
num_data=pd.DataFrame(num_data).transpose()
num_data
# + colab={"base_uri": "https://localhost:8080/"} id="52lUsRoG6V0Z" outputId="c0b47854-1e4d-4b60-e88c-42f95cb80b07"
df.dtypes
# + colab={"base_uri": "https://localhost:8080/"} id="aNBsoE_y7jbX" outputId="266879b2-1de1-465c-df87-dade07d98227"
#Pour les variables catégoriques on va remplacer les valeurs manquantes par
cat_data=cat_data.apply(lambda x:x.fillna(x.value_counts().index[0]))
cat_data.isnull().sum().any()
# + colab={"base_uri": "https://localhost:8080/"} id="l93rmH0B8O9e" outputId="c856b9d8-600c-4a3d-f901-0696717f5720"
cat_data['Married'].value_counts()
# + colab={"base_uri": "https://localhost:8080/"} id="iatRUEY888to" outputId="1236cf1a-0ca2-4de3-c21b-1f3cac1cc1f2"
num_data.fillna(method='bfill',inplace=True)
num_data.isnull().sum().any()
# + colab={"base_uri": "https://localhost:8080/", "height": 419} id="viGyx3Ys90nt" outputId="4239d17c-223f-488a-a031-57601586e99a"
num_data
# + id="H0mUriC-B7K7"
# + id="9ZIKi5Wu9-KP"
#Changer les valeurs avec la variable target y 0 et 1
# + colab={"base_uri": "https://localhost:8080/"} id="xvlbquJt-FAt" outputId="414798d7-6b1e-49d4-f728-bb9215115c54"
#Transformer la colonne target
target_value={'Y':1,'N':0}
target=cat_data['Loan_Status']
cat_data.drop('Loan_Status', axis=1, inplace=True)
target=target.map(target_value)
target
# + colab={"base_uri": "https://localhost:8080/", "height": 419} id="Gq0flI5CA1EF" outputId="735dfd69-c415-4a07-dfc8-2a9d708ed424"
#remplacer les valeurs catégoriques par des valeurs numériques 0 et 1
from sklearn.preprocessing import LabelEncoder
le=LabelEncoder()
for i in cat_data:
cat_data[i]=le.fit_transform(cat_data[i])
cat_data
# + id="kIeoMqRfB8cb"
#Supprimer loan id
cat_data.drop('Loan_ID', axis=1,inplace=True)
# + id="5e8pb1JxCIfM"
#concatenante les deux cata et num data
X=pd.concat([cat_data,num_data], axis=1)
y=target
# + colab={"base_uri": "https://localhost:8080/"} id="OBigHvOrCawj" outputId="7fd8d840-e9bb-4468-e2e9-fdefd25fa7a6"
y
# + id="SKRB2-xIOFlQ"
#la base de données utilisée pour EDA
df=pd.concat([cat_data,num_data, target], axis=1)
# + colab={"base_uri": "https://localhost:8080/"} id="JuYNN2kTL6s5" outputId="a6bc66ad-6aca-4c22-b7c9-b1c024e48614"
target.value_counts()
# + colab={"base_uri": "https://localhost:8080/", "height": 477} id="HHGdLeZjMTKD" outputId="e048a352-7341-4e63-eab9-ae1dbde0a06e"
plt.figure(figsize=(8,6))
sns.countplot(target)
yes=target.value_counts()[0]/len(target)
no=target.value_counts()[1]/len(target)
print(f'le pourcentagedes crédits accordés est: {yes}')
print(f'le pourcentagedes crédits non accordés est: {no}')
# + colab={"base_uri": "https://localhost:8080/", "height": 345} id="sBaQNdYXNm3N" outputId="a96a85e6-69d9-42f3-fb8b-58e1a83b8f91"
#credit history
grid=sns.FacetGrid(df,col='Loan_Status', size=3.2, aspect=1.6)
grid.map(sns.countplot, 'Credit_History')
# + colab={"base_uri": "https://localhost:8080/", "height": 345} id="TNLro3RaPbGs" outputId="9a50f8d2-b024-4be3-b3d4-721010a2f9ae"
#sexe
grid=sns.FacetGrid(df,col='Loan_Status', size=3.2, aspect=1.6)
grid.map(sns.countplot, 'Gender')
# + colab={"base_uri": "https://localhost:8080/", "height": 345} id="gamBW-htPxMx" outputId="58cac0b9-a5fe-4667-e1f8-9111e2f43591"
#Married (plus faible écart pour marrier ou pas relativement faible par rapport a sexe et history )
grid=sns.FacetGrid(df,col='Loan_Status', size=3.2, aspect=1.6)
grid.map(sns.countplot, 'Married')
# + colab={"base_uri": "https://localhost:8080/", "height": 345} id="z6rx29bCP3j_" outputId="144765b7-a555-4190-a630-ae05af4e6df4"
#Voir si l'education a un impact (oui on voit bien qui ils ont plus de crédit qu'ils ont plus d'education)
grid=sns.FacetGrid(df,col='Loan_Status', size=3.2, aspect=1.6)
grid.map(sns.countplot, 'Education')
# + colab={"base_uri": "https://localhost:8080/", "height": 282} id="FTJWsoMvQN50" outputId="69d01437-3973-4a2c-e475-575707aa99ad"
#Revenu du demandeur (pas d'impact 0 et 1 accepté et refuser pas de concentration sur le revenue accpté pour un crédit ou pas)
plt.scatter(df['ApplicantIncome'], df['Loan_Status'])
# + colab={"base_uri": "https://localhost:8080/", "height": 282} id="UbterbhYQnN9" outputId="70d9b813-73f8-41ac-a75f-1c9876121902"
#pas d'impact
plt.scatter(df['CoapplicantIncome'], df['Loan_Status'])
# + colab={"base_uri": "https://localhost:8080/", "height": 162} id="UhIHXE2vRK3l" outputId="1aab9812-a87f-4b55-c523-9ca94e4c05d3"
#on voit les crédits refuser et les crédits accepté avec la median qui n'affecte pas meme median pour les 2
df.groupby('Loan_Status').median()
# + colab={"base_uri": "https://localhost:8080/"} id="U1LiWwuFSPhm" outputId="b697debd-cdd3-4f9c-8f7a-eb8c5fbe0320"
#Diviser la base de données en une base de données test et d'entrainement
#X_train taille: (491
#
from sklearn.model_selection import StratifiedShuffleSplit
sss=StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42)
for train,test in sss.split(X,y):
X_train,X_test=X.iloc[train],X.iloc[test]
y_train,y_test=y.iloc[train],y.iloc[test]
print('X_train taille:', X_train.shape)
print('X_test taille:', X_test.shape)
print('y_train taille:', y_train.shape)
print('y_test taille:', y_test.shape)
# + id="SYYF5y3QS5pq"
#on va apliquer 3 algorithmes Logistic Regression, KNN, DecisionTree
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.tree import DecisionTreeClassifier
#La fonction de précision
from sklearn.metrics import accuracy_score
# + colab={"base_uri": "https://localhost:8080/"} id="CJ6fpSoNU7k9" outputId="285f859a-f304-4c0b-e635-7a33d3d94a15"
models={
'LogisticRegression':LogisticRegression(random_state=42),
'KNeighborsClassifier':KNeighborsClassifier(),
'DecisionTreeClassifier':DecisionTreeClassifier(max_depth=1, random_state=42)
}
#La fonction de précision
def accu(y_true,y_pred,retu=False):
acc=accuracy_score(y_true,y_pred)
if retu:
return acc
else:
print(f'la precision du modèle est : {acc}')
#C'est la fonction d'application des modèles
def train_test_eval(models,X_train, y_train,X_test,y_test):
for name,models in models.items():
print(name,':')
models.fit(X_train,y_train)
accu(y_test,models.predict(X_test))
print('-'*30)
train_test_eval(models,X_train, y_train,X_test,y_test)
# + [markdown] id="WXiAly-lYPp4"
# **la precision du modèle est **: 0.8536585365853658
# ------------------------------
# KNeighborsClassifier :
# **la precision du modèle est **: 0.6504065040650406
# ------------------------------
# DecisionTreeClassifier :
# la precision du modèle est **texte en gras**: 0.8455284552845529
#
# le meilleur model c'est la regression logistique 85% =>la meilleur performance
#
# avant d'implanter le modele
# il faut trouver 11 variable pour trouver la réponse
# on va choisir les variables qu'ils ont plus d'impact sur le résultat on réalisant une nouvelles base de données X_2
# + id="MWXUztOPYTjR"
X_2= X[['Credit_History', 'Married', 'CoapplicantIncome']]
# + colab={"base_uri": "https://localhost:8080/"} id="MI1Y9QcgZfyK" outputId="ce7a1125-3985-430b-90ed-4e870b1fc637"
#Split pour X_2
sss=StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42)
for train,test in sss.split(X_2,y):
X_train,X_test=X_2.iloc[train],X_2.iloc[test]
y_train,y_test=y.iloc[train],y.iloc[test]
print('X_train taille:', X_train.shape)
print('X_test taille:', X_test.shape)
print('y_train taille:', y_train.shape)
print('y_test taille:', y_test.shape)
# + colab={"base_uri": "https://localhost:8080/"} id="ZE5u4nqXZm1m" outputId="3f7f939b-5295-409b-cbcb-961c2057124a"
train_test_eval(models,X_train, y_train,X_test,y_test)
# + id="8Qt6t5laaD38"
#Deployer le modele pour une application on utilisant flask
# + colab={"base_uri": "https://localhost:8080/"} id="KBS3gocAaiFg" outputId="b4d86a9b-d3a4-4da1-b5db-3687bf6334f7"
#Appliquer la regression logistique sur notre base de donnée
Classifier=LogisticRegression()
Classifier.fit(X,y)
# + id="TiV0RuFma4P9"
#Enregistrer le modèle
pickle.dump(Classifier,open('model.pkl', 'wb'))
| credit_model_.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import cvxpy as cvx
import numpy as np
import pandas as pd
# %matplotlib inline
# -
# # Exploratory data analysis
items = pd.read_csv('breakfast_items.csv', index_col='item')
items
# ## Bar plot of energy per gram
items['energy'].sort_values(ascending=False).plot.bar(color='darkblue')
# ## Bar plot of cost per gram
items['cost'].sort_values(ascending=False).plot.bar(color='darkblue')
# ## Bar plot of energy per pound sterling
(items['energy'] / items['cost']).sort_values(ascending=False).plot.bar(color='darkblue')
# # Modelling
# Define the decisions variables representing the amounts (in gram) for each product.
x = cvx.Variable(items.shape[0], nonneg=True)
# Define some constraints on energy and nutrients.
total_energy = 2500 / 4
max_fat = total_energy * 0.35 / 9
max_saturated_fat = total_energy * 0.11 / 9
max_carbs = total_energy * 0.5 / 4
max_sugar = total_energy * 0.05 / 4
min_fibre = 30 / 4
min_protein = 70 * 0.75 / 4
max_salt = 6 / 4
constraints = [
cvx.sum(x * items['energy']) == total_energy,
cvx.sum(x * items['fat']) <= max_fat,
cvx.sum(x * items['saturated_fat']) <= max_saturated_fat,
cvx.sum(x * items['carbs']) <= max_carbs,
cvx.sum(x * items['sugar']) <= max_sugar,
cvx.sum(x * items['fibre']) >= min_fibre,
cvx.sum(x * items['protein']) >= min_protein,
cvx.sum(x * items['salt']) <= max_salt,
]
# Define and solve the problem of minimising total cost subject to `constraints`.
problem = cvx.Problem(cvx.Minimize(cvx.sum(x * items['cost'])), constraints)
problem.solve()
# Check the solution.
pd.DataFrame({'quantity': np.round(x.value, 2)}, index=items.index).sort_values('quantity', ascending=False)
pd.DataFrame({'value': x.value @ items}, index=items.columns)
| notebooks/01_LP_Breakfast.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Práctico 2 - Redes en escalera avanzadas
#
# Este práctico es similar al práctico 1, pero agregará un paso extra que es el uso de redes en escalera avanzadas, ya sean Redes Convolucionales o Redes Recurrentes.
#
# Se les dará, como base, el mismo conjunto de datos de la competencia "PetFinder" que se trabajó para el práctico 1, con el agregado de, en este caso, utilizar la descripción como un feature extra y todo el procesamiento que ello requiere.
#
# Ahora bien, no es el único conjunto de datos que pueden trabajar. Si tienen un conjunto propio de datos que quieran utilizar y dicho conjunto se preste para el uso de alguna red en escalera avanzada (e.g. conjuntos que tengan imágenes o texto), son libres de hacerlo.
# +
import nltk
import numpy as np
import os
import pandas as pd
import tensorflow as tf
from IPython.display import SVG
from gensim import corpora
from nltk import word_tokenize
from nltk.corpus import stopwords
from pprint import pprint
nltk.download(["punkt", "stopwords"]);
# -
# ## Carga de los datos
DATA_DIRECTORY = '../petfinder_dataset/'
# +
dataset = pd.read_csv(os.path.join(DATA_DIRECTORY, 'train.csv'))
target_col = 'AdoptionSpeed'
nlabels = dataset[target_col].unique().shape[0]
dataset.head(3)
# -
# ## Preproceso del texto para agregarlo como feature (manejo de secuencias)
#
# A diferencia del práctico anterior, en este caso es necesario utilizar el texto como feature extra. Pueden luego agregarlo a una red recurrente o convolucional y concatenar su salida a los atributos "escalares" (como "raza" o "género").
#
# A continuación les mostraremos los pasos a seguir para ello. La descripción detallada de para que sirve cada paso se encuentra disponible en el [notebook 3](./3_cnns.ipynb).
#
# ### Tokenización
# +
SW = set(stopwords.words("english"))
def tokenize_description(description):
return [w.lower() for w in word_tokenize(description, language="english") if w.lower() not in SW]
# Fill the null values with the empty string to avoid errors with NLTK tokenization
dataset["TokenizedDescription"] = dataset["Description"].fillna(value="").apply(tokenize_description)
# -
# #### Tamaño de las descripciones
#
# Un punto importante a tener en cuenta es que las descripciones tienen tamaño variable, y esto no es compatible con los algoritmos de aprendizaje automático. Por lo que hay que llevar las secuencias a un tamaño uniforme.
#
# Para definir dicho tamaño uniforme, es útil mirar qué tamaños mínimos, máximos y medios manejan las descripciones y a partir de esto establecer el tamaño máximo de la secuencia.
pprint(dataset["TokenizedDescription"].apply(len).describe())
# Vemos que más del 75% de las secuencias tienen 55 palabras o menos. Esto es un buen punto de partida, así que podemos establecer el tamaño máximo de las secuencia en 55 palabras.
MAX_SEQUENCE_LEN = 55
# ## Vocabulario
vocabulary = corpora.Dictionary(dataset["TokenizedDescription"])
vocabulary.filter_extremes(no_below=1, no_above=1.0, keep_n=10000)
# ## Word Embeddings (GloVe)
# +
embeddings_index = {}
with open("./dataset/glove.6B.100d.txt", "r") as fh:
for line in fh:
values = line.split()
word = values[0]
if word in vocabulary.token2id: # Only use the embeddings of words in our vocabulary
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
print("Found {} word vectors.".format(len(embeddings_index)))
# -
# ## Creación de los datasets
#
# Similar al práctico anterior, tendremos datos que serán "one-hot-encoded", otros serán "embeddings" y otros serán numéricos.
#
# El caso particular del texto es que será tratado como una secuencia de embeddings, y dichos embeddings no serán entrenados en conjunto con la red, sino que serán tomados de un modelo "pre-entrenado". En este caso utilizamos GloVe, pero podríamos haber utilizado otro modelo (e.g. FastText).
# It's important to always use the same one-hot length
one_hot_columns = {
one_hot_col: dataset[one_hot_col].max()
for one_hot_col in ['Gender', 'Color1']
}
embedded_columns = {
embedded_col: dataset[embedded_col].max() + 1
for embedded_col in ['Breed1']
}
numeric_columns = ['Age', 'Fee']
# ## Generador del conjunto de datos
#
# Dada la naturaleza de los datos de texto, y que estos representan una secuencia de datos (que se da luego a una red recurrente o convolucional), en este caso no crearemos los datasets de antemano, sino que los generaremos a medida que el algoritmo de entrenamiento los pida.
#
# En particular, es porque las secuencias de texto pueden no tener el mismo tamaño (las oraciones tienen diferente cantidad de palabras), pero para que los modelos de redes las acepten, necesitamos rellenarlas (*padding*) de manera que todas tengan el mismo tamaño.
#
# En este paso también vamos a truncar aquellas secuencias de descripciones con más de `MAX_SEQUENCE_LEN` palabras, de manera que al hacer uso de `padded_batch` no lance un error al encontrarse con secuencias de tamaño mayor.
# +
def dataset_generator(ds, test_data=False):
for _, row in ds.iterrows():
instance = {}
# One hot encoded features
instance["direct_features"] = np.hstack([
tf.keras.utils.to_categorical(row[one_hot_col] - 1, max_value)
for one_hot_col, max_value in one_hot_columns.items()
])
# Numeric features (should be normalized beforehand)
# TODO: Add numeric features for row
# Embedded features
for embedded_col in embedded_columns:
instance[embedded_col] = [row[embedded_col]]
# Document to indices for text data, truncated at MAX_SEQUENCE_LEN words
instance["description"] = vocabulary.doc2idx(
row["TokenizedDescription"],
unknown_word_index=len(vocabulary)
)[:MAX_SEQUENCE_LEN]
# One hot encoded target for categorical crossentropy
if not test_data:
target = tf.keras.utils.to_categorical(row[target_col], nlabels)
yield instance, target
else:
yield instance
# Set output types of the generator (for numeric types check the type is valid)
instance_types = {
"direct_features": tf.float32,
"description": tf.int32
}
for embedded_col in embedded_columns:
instance_types[embedded_col] = tf.int32
tf_dataset = tf.data.Dataset.from_generator(
lambda: dataset_generator(dataset),
output_types=(instance_types, tf.int32)
)
for data, target in tf_dataset.take(2):
pprint(data)
pprint(target)
print()
# -
# ## Datos de entrenamiento y validación
#
# Ya generado el conjunto de datos base, tenemos que dividirlo en entrenamiento y validación. Además, como vamos a utilizar algunos datos que forman secuencias, los lotes (*batches*) de datos deben estar "rellenados" (*padded_batch*).
#
# Si bien rellenaremos "todos" los atributos, en la práctica el único que efectivamente se rellenará es el de *description* pues es el único con tamaños distintos.
# +
TRAIN_SIZE = int(dataset.shape[0] * 0.8)
DEV_SIZE = dataset.shape[0] - TRAIN_SIZE
BATCH_SIZE = 128
shuffled_dataset = tf_dataset.shuffle(TRAIN_SIZE + DEV_SIZE, seed=42)
# Pad the datasets to the max value for all the "non sequence" features
padding_shapes = (
{k: [-1] for k in ["direct_features"] + list(embedded_columns.keys())},
[-1]
)
# Pad to MAX_SEQUENCE_LEN for sequence features
padding_shapes[0]["description"] = [MAX_SEQUENCE_LEN]
# Pad values are irrelevant for non padded data
padding_values = (
{k: 0 for k in list(embedded_columns.keys())},
0
)
# Padding value for direct features should be a float
padding_values[0]["direct_features"] = np.float32(0)
# Padding value for sequential features is the vocabulary length + 1
padding_values[0]["description"] = len(vocabulary) + 1
train_dataset = shuffled_dataset.skip(DEV_SIZE)\
.padded_batch(BATCH_SIZE, padded_shapes=padding_shapes, padding_values=padding_values)
dev_dataset = shuffled_dataset.take(DEV_SIZE)\
.padded_batch(BATCH_SIZE, padded_shapes=padding_shapes, padding_values=padding_values)
# -
# ## Construyendo el modelo
#
# Al modelo anterior tenemos que agregarle la capa que maneje los embeddings de las palabras, e inicializarla de manera acorde, podemos guiarnos por lo visto en el [notebook 3](./3_cnns.ipynb) para hacer esto.
#
# ### Matriz de embeddings de palabras
# +
EMBEDDINGS_DIM = 100 # Given by the model (in this case glove.6B.100d)
embedding_matrix = np.zeros((len(vocabulary) + 2, 100))
for widx, word in vocabulary.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
embedding_matrix[widx] = embedding_vector
else:
# Random normal initialization for words without embeddings
embedding_matrix[widx] = np.random.normal(size=(100,))
# Random normal initialization for unknown words
embedding_matrix[len(vocabulary)] = np.random.normal(size=(100,))
# -
# ### Definiendo los inputs del modelo
#
# Definamos los inputs del modelo, con el agregado de la capa de embeddings de palabras inicializada en `embedding_matrix`.
# +
tf.keras.backend.clear_session()
# Add one input and one embedding for each embedded column
embedding_layers = []
inputs = []
for embedded_col, max_value in embedded_columns.items():
input_layer = tf.keras.layers.Input(shape=(1,), name=embedded_col)
inputs.append(input_layer)
# Define the embedding layer
embedding_size = int(max_value / 4)
embedding_layers.append(
tf.squeeze(
tf.keras.layers.Embedding(
input_dim=max_value,
output_dim=embedding_size
)(input_layer),
axis=-2
)
)
print('Adding embedding of size {} for layer {}'.format(embedding_size, embedded_col))
# Add the direct features already calculated
direct_features_input = tf.keras.layers.Input(
shape=(sum(one_hot_columns.values()),),
name='direct_features'
)
inputs.append(direct_features_input)
# Word embedding layer
description_input = tf.keras.layers.Input(shape=(MAX_SEQUENCE_LEN,), name="description")
inputs.append(description_input)
word_embeddings_layer = tf.keras.layers.Embedding(
embedding_matrix.shape[0],
EMBEDDINGS_DIM,
weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LEN,
trainable=False,
name="word_embedding"
)(description_input)
# -
# ### Definiendo la red que trabajará con el texto
#
# Antes de generar el *feature map* final entre los inputs y las clases, tenemos que generar el *feature map* de las secuencias de texto.
#
# Para ello pueden utilizar una red neuronal recurrente o convolucional.
#
# Pueden pensar dicha red como un submodelo del modelo general que se encarga de generar los atributos que representan la descripción de la mascota (recordemos que las redes se utilizan para hacer aprendizaje de representaciones).
#
# La red puede ser tan compleja como ustedes lo consideren pertinente.
# +
## TODO: Create a NN (CNN or RNN) for the description input (replace the next)
DESCRIPTION_FEATURES_LAYER_SIZE = 512
description_features = tf.keras.layers.Flatten()(word_embeddings_layer) # This is a simple concatenation
description_features = tf.keras.layers.Dense(
units=DESCRIPTION_FEATURES_LAYER_SIZE,
activation="relu",
name="description_features")(description_features)
# -
# ### Definiendo el *feature map* final de la red
#
# Ahora que tenemos nuestra representación de las descripciones, pasamos a combinarlo con los demás features en la última parte de nuestra red.
# +
HIDDEN_LAYER_SIZE = 128
feature_map = tf.keras.layers.Concatenate(name="feature_map")(
embedding_layers + [description_features, direct_features_input]
)
hidden_layer = tf.keras.layers.Dense(HIDDEN_LAYER_SIZE, activation="relu")(feature_map)
output_layer = tf.keras.layers.Dense(nlabels, activation="softmax", name="output")(hidden_layer)
model = tf.keras.models.Model(inputs=inputs, outputs=[output_layer], name="amazing_model")
# -
# ### Compilando y visualizando el modelo
model.compile(loss='categorical_crossentropy',
optimizer='nadam',
metrics=['accuracy'])
model.summary()
SVG(tf.keras.utils.model_to_dot(model, dpi=60).create(prog='dot', format='svg'))
# ## Entrenando el modelo
#
# Para entrenar el modelo es igual al caso anterior, ya generados el conjunto de datos correspondiente. Lo entrenamos con ayuda de `mlflow`.
# +
import mlflow
mlflow.set_experiment('awesome_advanced_approach')
with mlflow.start_run(nested=True):
# Log model hiperparameters first
mlflow.log_param('description_features_layer_size', DESCRIPTION_FEATURES_LAYER_SIZE)
mlflow.log_param('hidden_layer_size', HIDDEN_LAYER_SIZE)
mlflow.log_param('embedded_columns', embedded_columns)
mlflow.log_param('one_hot_columns', one_hot_columns)
# mlflow.log_param('numerical_columns', numerical_columns) # Not using these yet
# Train
epochs = 10
history = model.fit(train_dataset, epochs=epochs)
# Evaluate
loss, accuracy = model.evaluate(dev_dataset, verbose=0)
print("\n*** Validation loss: {} - accuracy: {}".format(loss, accuracy))
mlflow.log_metric('epochs', epochs)
mlflow.log_metric('train_loss', history.history["loss"][-1])
mlflow.log_metric('train_accuracy', history.history["accuracy"][-1])
mlflow.log_metric('validation_loss', loss)
mlflow.log_metric('validation_accuracy', accuracy)
# -
# ## Evaluando el modelo sobre los datos de evaluación para la competencia
#
# Una vez que tenemos definido nuestro modelo, el último paso es ponerlo a prueba en los datos de evaluación para generar un archivo para enviar a la competencia Kaggle.
#
# Comenzamos cargando el conjunto de datos.
test_dataset = pd.read_csv(os.path.join(DATA_DIRECTORY, 'test.csv'))
test_dataset.head()
# ## Creamos el conjunto de datos para darle al modelo entrenado
#
# Tenemos que preprocesar los datos de evaluación de la misma manera que preprocesamos los de entrenamiento (para que sean compatibles con lo esperado por el modelo). Por suerte, es tan simple como hacer un par de modificaciones a lo ya hecho previamente. Lo único que tenemos que tener en cuenta es que ahora el conjunto de datos no generará una etiqueta.
# +
# First tokenize the description
test_dataset["TokenizedDescription"] = test_dataset["Description"]\
.fillna(value="").apply(tokenize_description)
# Generate the basic TF dataset
tf_test_dataset = tf.data.Dataset.from_generator(
lambda: dataset_generator(test_dataset, True),
output_types=instance_types # It should have the same instance types
)
for data in tf_test_dataset.take(2): # The dataset only returns a data instance now (no target)
pprint(data)
print()
# -
# ## Padding batches
#
# Por último, y previo a probar el modelo sobre los datos de evaluación, generamos el conjunto de datos "rellenado".
#
# A diferencia de los datos de entrenamiento y validación, en este caso no permutamos las instancias, pues necesitamos saber a que `PID` pertenece cada una.
#
# Por otra parte, utilizamos los mismos valores de `padding_shapes` y `padding_values` para el primer componente (el de los datos), ignorando el valor del segundo componente (el de las etiquetas).
test_data = tf_test_dataset.padded_batch(
BATCH_SIZE,
padded_shapes=padding_shapes[0],
padding_values=padding_values[0]
)
# ## Correr el modelo
#
# El último paso es correr el modelo sobre los datos de evaluación para conseguir las predicciones a enviar a la competencia.
# +
test_dataset["AdoptionSpeed"] = model.predict(test_data).argmax(axis=1)
test_dataset.to_csv("./submission.csv", index=False, columns=["PID", "AdoptionSpeed"])
| archive/2019/consigna_practico_2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + id="Bowg-w0P315w"
# %matplotlib inline
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('ignore')
# + id="2KeC2N0q3158"
#Load dataset
df_train = pd.read_csv("input/df_train.csv", index_col=0)
df_test = pd.read_csv("input/df_test.csv", index_col=0)
#Insurance dataset
copy_train = df_train.copy()
copy_test = df_test.copy()
# + id="TaiWw7o8315-"
sns.set(style="darkgrid") # set seaborn style
# + colab={"base_uri": "https://localhost:8080/"} id="6CNp8Oip316A" outputId="4df7f6e1-99be-4e3d-d65c-cb3f084339d6"
df_train.isna().sum()
# + colab={"base_uri": "https://localhost:8080/"} id="rdFdSVfK316C" outputId="c42d3231-881f-4e84-e65f-a2b28f43a950"
print(f'Train data shape: {df_train.shape} | Test data shape: {df_test.shape}')
# + colab={"base_uri": "https://localhost:8080/"} id="aQ3TlKKp316E" outputId="d6b15148-55c3-4005-d3c0-0c26d24bc348"
missing_values_train = df_train.Valencia_pressure.isna().sum()
print(f'Train data Missing Values: {missing_values_train} | Percentage: {round(( missing_values_train/ df_train.Valencia_pressure.shape[0]) *100, 2)}%')
# + id="NlIL1RMyCULF"
# + colab={"base_uri": "https://localhost:8080/"} id="JV9-g-OJ316G" outputId="d59c38d8-ef0b-472c-8492-3b1bc222a5c4"
missing_values_test = df_test.Valencia_pressure.isna().sum()
print(f'Train data Missing Values: {missing_values_test} | Percentage: {round(( missing_values_test / df_train.Valencia_pressure.shape[0]) *100, 2)}%')
# + id="thJwBgQR316K"
mode = pd.concat([df_train.Valencia_pressure , df_test.Valencia_pressure]).mode()
# + id="OkdOpPRS316N"
#Impute missing values in Valencia_pressure with mean
df_train.Valencia_pressure.fillna(mode[0] , inplace=True)
df_test.Valencia_pressure.fillna(mode[0], inplace=True)
# + colab={"base_uri": "https://localhost:8080/"} id="KTUemrOx316Q" outputId="567d9d0a-154b-4dc0-e90d-a2d96823da40"
missing_values_train = df_train.Valencia_pressure.isna().sum()
print(f'Train data Missing Values: {missing_values_train} | Percentage: {round(( missing_values_train/ df_train.Valencia_pressure.shape[0]) *100, 2)}%')
# + id="b7Ky0Y0q316S"
# + colab={"base_uri": "https://localhost:8080/"} id="iOz-SUbc316T" outputId="b6bf3f88-4567-4a39-f4f9-4abc3dddcc55"
print(f'Sum of unique object: {df_train.Valencia_wind_deg.value_counts().count()}')
df_train.Valencia_wind_deg.unique()
# + colab={"base_uri": "https://localhost:8080/"} id="Zkfhmsm3316U" outputId="dfc70b39-ab47-44fd-9ddc-b479ce4dc486"
print(f'Sum of unique object: {df_train.Seville_pressure.value_counts().count()}')
df_train.Seville_pressure.unique()
# + id="mAZ8VAqf316V"
# + id="nXA8dD09316W"
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import OrdinalEncoder, OneHotEncoder , StandardScaler , MinMaxScaler ,RobustScaler
from sklearn.model_selection import train_test_split , cross_validate
# + id="0EGXuo9j316Y"
#Impute Categirical features using OrdinalEncoder()
enc = OrdinalEncoder()
# + id="c95rxjFo316Y"
df_train.Valencia_wind_deg = enc.fit_transform(df_train[['Valencia_wind_deg']])
df_train.Seville_pressure = enc.fit_transform(df_train[['Seville_pressure']])
# + id="KTvPosylQL5m"
# + id="swYunehJ316a"
df_test.Valencia_wind_deg = enc.fit_transform(df_test[['Valencia_wind_deg']])
df_test.Seville_pressure = enc.fit_transform(df_test[['Seville_pressure']])
# + colab={"base_uri": "https://localhost:8080/"} id="KXC826Ck316b" outputId="a31d99c3-1919-4ab0-d702-d135fee15ae3"
df_test.dtypes.value_counts()
# + id="MTPWQrGN316c"
# + id="FqOU3H3Y316c"
# Transform Features
import datetime as dt
df_train['time'] = pd.to_datetime(df_train['time'])
df_test['time'] = pd.to_datetime(df_test['time'])
df_test_copy = df_test.copy()
# day
df_train['Day'] = df_train['time'].dt.day
df_test['Day'] = df_test['time'].dt.day
# month
df_train['Month'] = df_train['time'].dt.month
df_test['Month'] = df_test['time'].dt.month
# year
df_train['Year'] = df_train['time'].dt.year
df_test['Year'] = df_test['time'].dt.year
# hour
df_train['Start_hour'] = df_train['time'].dt.hour
df_test['Start_hour'] = df_test['time'].dt.hour
# Drop Feature
df_train.drop(['time'] , axis=1 , inplace=True)
df_test.drop(['time'] , axis=1 , inplace=True)
# + id="DQUQrBUk316e"
columns = df_train.drop(['load_shortfall_3h'] , axis=1).columns
# + colab={"base_uri": "https://localhost:8080/", "height": 224} id="u42xAR1q316f" outputId="ddf8e314-561f-48b7-a399-964657db5188"
df_train.head()
# + id="0pXX8B4-316g"
#Scale the dataset
scaler = StandardScaler()
scaled_features = scaler.fit_transform(df_train.drop(['load_shortfall_3h'] , axis=1).values)
scaled_features_test = scaler.fit_transform(df_test.values)
# + id="zqjUiNdn316h"
df_train_scaled = pd.DataFrame(scaled_features, index=df_train.index, columns=columns)
df_test_scaled = pd.DataFrame(scaled_features_test, index=df_test.index , columns=columns)
# + id="TcaTdyIy316i"
#Add load_short_fall_3h as last_columns on training data
df_train_scaled['load_shortfall_3h'] = copy_train.load_shortfall_3h.values
# + id="BZevUSGp316k"
#Perform a test_train_split
X = df_train_scaled.drop(['load_shortfall_3h'], axis=1)
y = df_train_scaled.load_shortfall_3h
# + colab={"base_uri": "https://localhost:8080/"} id="T3ZQt8eq316l" outputId="b63f5468-d22d-436c-fb49-3c3c445d35f4"
X.head()
# + [markdown] id="jg5fQTWO316m"
# ## Feature Seletion
# + id="j1jeMSsj3161"
#Recursive Feature Selection (RFE) with Tree based and Gradient based Estimators
#Buid the Model
import pickle
from sklearn.svm import SVR
from sklearn.linear_model import LinearRegression, Ridge , Lasso
from sklearn.ensemble import RandomForestRegressor
from sklearn.tree import DecisionTreeRegressor
from sklearn.feature_selection import SelectFromModel
from sklearn.metrics import r2_score , mean_squared_error
from sklearn import metrics
#OLS summary
import statsmodels.formula.api as sm
# + id="4_NIAg_K3162"
#Train Test Split
X = df_train.drop(['load_shortfall_3h'],axis=1)
y = df_train.load_shortfall_3h
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)
# + colab={"base_uri": "https://localhost:8080/"} id="hUZssTHH3163" outputId="1ae8539c-7d27-4227-c349-d3c38e749688"
X_train.shape
# + [markdown] id="Luzs2-Vm3164"
# ### Feature Selection: Feature Importance of Random Forest Regressor
# + colab={"base_uri": "https://localhost:8080/"} id="C1O9alkX3164" outputId="fc7d8153-6fb7-47ea-ab49-4d0b59c0fe75"
selct_important = SelectFromModel(RandomForestRegressor(n_estimators=100 , random_state=0 , n_jobs=1))
selct_important.fit(X_train , y_train)
selct_important.get_support()
# + colab={"base_uri": "https://localhost:8080/"} id="CBR2-KA93165" outputId="55a9ad60-909f-4c07-a378-715e43b39b3d"
features = X_train.columns[(selct_important.get_support())]
len(features)
# + colab={"base_uri": "https://localhost:8080/"} id="jJQMapQQ3166" outputId="21672638-8d24-4f18-8e33-a4793c61fa35"
features
# + colab={"base_uri": "https://localhost:8080/"} id="JQjD_net3166" outputId="9803cc8c-9e6f-4f1e-a45c-746bc508e0f1"
np.mean(selct_important.estimator_.feature_importances_)
# + colab={"base_uri": "https://localhost:8080/", "height": 285} id="dN_UUkjG3167" outputId="06d852c7-584e-48b9-bd38-b07867a54d8f"
pd.Series(selct_important.estimator_.feature_importances_.ravel()).hist()
# + colab={"base_uri": "https://localhost:8080/"} id="-7AyOO0v3168" outputId="5097180a-c27a-4115-9784-119b55077b14"
selct_important.estimator_.feature_importances_
# + id="2EFmzEEP3169"
X_train_sel = selct_important.transform(X_train)
X_test_sel = selct_important.transform(X_test)
# -
import xgboost as xgb
selct_best_xg = SelectFromModel(xgb.XGBRegressor(objective ='reg:squarederror', n_estimators = 100, random_state=0 , n_jobs=1))
selct_best_xg.fit(X_train , y_train)
selct_best_xg.get_support()
b = X_train.columns[(selct_best_xg.get_support())]
len(b)
b
X_train_xg = selct_best_xg.transform(X_train)
X_test_xg = selct_best_xg.transform(X_test)
# + id="I8z2QRqR316-"
def _xgboost_train(X_train, X_test, y_train, y_test):
forest = xgb.XGBRegressor(objective ='reg:squarederror', n_estimators = 100, random_state=0 , n_jobs=1)
forest.fit(X_train , y_train)
pred = forest.predict(X_test)
print(f'Train RMSE: { np.sqrt(metrics.mean_squared_error(y_train[:2892], pred))}')
print(f'R Score Train: {r2_score(y_train[:2892] , pred)}')
print(f'Test RMSE: { np.sqrt(metrics.mean_squared_error(y_test, pred))}')
print(f'R Score Test: {r2_score(y_test , pred)}')
# -
def _randonForest_train(X_train, X_test, y_train, y_test):
forest = RandomForestRegressor(n_estimators=100 , random_state=0 , n_jobs=1)
forest.fit(X_train , y_train)
pred = forest.predict(X_test)
print(f'Train RMSE: { np.sqrt(metrics.mean_squared_error(y_train[:2892], pred))}')
print(f'R Score Train: {r2_score(y_train[:2892] , pred)}')
print(f'Test RMSE: { np.sqrt(metrics.mean_squared_error(y_test, pred))}')
print(f'R Score Test: {r2_score(y_test , pred)}')
# + colab={"base_uri": "https://localhost:8080/"} id="XieBii4Z316_" outputId="15d41cd8-cb28-4ba8-ce19-3a6f573d83ee"
# %%time
_randomForest_train(X_train_xg , X_test_xg , y_train , y_test )
# + colab={"base_uri": "https://localhost:8080/"} id="2bXw_g_a317A" outputId="46c99b69-ffff-4b92-8041-83a723ea7a2c"
# %%time
_xgboost_train(X_train_xg , X_test_xg , y_train , y_test )
# + [markdown] id="qiHLhz4U317B"
# #### Recursive Feature Selection(RFE)
# + id="oCoaOmoK317B"
from sklearn.feature_selection import RFE, RFECV
# + id="hiSO2mOk317C"
r_selection = RFE(RandomForestRegressor(n_estimators=100 , random_state=0 , n_jobs=1),n_features_to_select=10)
# + colab={"base_uri": "https://localhost:8080/"} id="8e3m2R4q317C" outputId="cc55dfa0-5dd3-413b-ad6e-a7787075df8d"
r_selection.fit(X_train , y_train)
# + colab={"base_uri": "https://localhost:8080/"} id="OArERUEO317D" outputId="03a85473-83ec-4298-ea25-bf6f589e8ff8"
r_selection.get_support()
# + colab={"base_uri": "https://localhost:8080/"} id="VjiuToC4317D" outputId="b9a34622-2c30-4a86-bb19-ae2b455897f5"
features1 = X_train.columns[(r_selection.get_support())]
len(features)
# + colab={"base_uri": "https://localhost:8080/"} id="ocZYnw5W317E" outputId="b6e395b7-5840-442b-9f96-126da02861fa"
features1
# + colab={"base_uri": "https://localhost:8080/"} id="KpjAO5-8317E" outputId="84f9936e-8d5a-485e-9bf9-1e4ff401bd4b"
r_selection.estimator_.feature_importances_
# + id="d8OLzqNn317F"
X_train_r = r_selection.transform(X_train)
X_test_r = r_selection.transform(X_test)
# + colab={"base_uri": "https://localhost:8080/"} id="i0_QBqAV317G" outputId="3ba6ccc2-52be-4fd0-e35a-323ef9176fcd"
# %%time
_randomForest_train(X_train_r , X_test_r , y_train , y_test )
# + colab={"base_uri": "https://localhost:8080/"} id="0MHN-j_R317G" outputId="67dabd46-2e64-4d38-f738-2d10e0ede09a"
# %%time
# All features
_randomForest_train(X_train , X_test , y_train , y_test )
# + [markdown] id="T9sybIvj317H"
# ### Feature Selection: GradientBoostRegressor
# + id="OXCOsLzP317H"
from sklearn.ensemble import GradientBoostingRegressor
# + colab={"base_uri": "https://localhost:8080/"} id="1Xc-rukF317I" outputId="3b9df80f-0cde-4be4-a310-cb94eb3d7ead"
xgrad = RFE(xgb.XGBRegressor(objective ='reg:squarederror', n_estimators = 100, random_state=0 , n_jobs=1),n_features_to_select=10)
xgrad.fit(X_train , y_train)
# + colab={"base_uri": "https://localhost:8080/"} id="LsvC83Yc317I" outputId="6d4ace37-d915-4520-a48e-ec557ccd4e66"
xgrad.get_support()
# + colab={"base_uri": "https://localhost:8080/"} id="gnjjrBeX317I" outputId="a40e4283-19d1-4a41-f8d4-bc6ffb43ae04"
features2 = X_train.columns[(xgrad.get_support())]
len(features)
# + colab={"base_uri": "https://localhost:8080/"} id="3E7k0apG317J" outputId="ce0ffc47-466f-4525-96fb-8a7039421e3d"
features2
# + colab={"base_uri": "https://localhost:8080/"} id="sIh-fLB8317J" outputId="00d0cca4-67aa-4956-8ea8-8b43307774eb"
xgrad.estimator_.feature_importances_
# + id="RCZNgWtd317K"
X_train_g = xgrad.transform(X_train)
X_test_g = xgrad.transform(X_test)
# + colab={"base_uri": "https://localhost:8080/"} id="aylgrfiI317K" outputId="e4a72eb8-8500-4a39-b777-f6ec3872d9c6"
# %%time
_randomForest_train(X_train_g , X_test_g , y_train , y_test )
# + colab={"base_uri": "https://localhost:8080/"} id="Mqp3frbN317L" outputId="05ba89db-ba44-4610-d6ee-b79a2b0cd6b0"
# %%time
#All features
_randomForest_train(X_train , X_test , y_train , y_test )
# + [markdown] id="sF_i7GAW317M"
# ### Feature Selection: Random Forest Tree & Gardient Boost - 50 Iterations
# + colab={"base_uri": "https://localhost:8080/"} id="LGwFyW81317M" outputId="1e752b3b-2af5-4b67-cc4e-6f67322508ff"
print('>'*40 + 'GradientBoostRegressor'+ '<'*40)
for index in range(1 , 51):
grad_selection = RFE(GradientBoostingRegressor(n_estimators=100 , random_state=0), n_features_to_select=index)
grad_selection.fit(X_train , y_train)
X_train_g = grad_selection.transform(X_train)
X_test_g = grad_selection.transform(X_test)
print(f'Selected Features: {index}')
_randomForest_train(X_train_g , X_test_g , y_train , y_test )
print()
# + colab={"base_uri": "https://localhost:8080/"} id="pS8Lo8Zz317N" outputId="b3a9140e-98eb-46c4-f784-87d6d2374f66"
print('>'*40 + 'RandomForestRegressor'+ '<'*40)
for index in range(1 , 51):
grad_selection = RFE(RandomForestRegressor(n_estimators=100 , random_state=0), n_features_to_select=index)
grad_selection.fit(X_train , y_train)
X_train_r = grad_selection.transform(X_train)
X_test_r = grad_selection.transform(X_test)
print(f'Selected Features: {index}')
_randomForest_train(X_train_r , X_test_r , y_train , y_test )
print()
# + id="Cj86DTPz317N"
# + colab={"base_uri": "https://localhost:8080/"} id="Fdq2_zDtWPmX" outputId="096c92bf-fae8-4e0f-f5be-ffb841257fd1"
grad_sel = RFECV(GradientBoostingRegressor(n_estimators=100 , random_state=0), cv=5)
grad_sel.fit(X_train , y_train)
X_train_g = grad_sel.transform(X_train)
X_test_g = grad_sel.transform(X_test)
print(f'Selected Features: {4}')
_randomForest_train(X_train_g , X_test_g , y_train , y_test )
# + colab={"base_uri": "https://localhost:8080/"} id="gRXJJGTvW2eU" outputId="07ebf1f5-f650-4baa-dd92-435ecc2d23ca"
best_feat1 = X_train.columns[(grad_sel.get_support())]
len(best_feat1)
# + colab={"base_uri": "https://localhost:8080/"} id="SgNLmyyWXILi" outputId="2f84d148-9c63-4a1a-9207-571e38ac6c64"
best_feat1
# + id="woBUdFyJW49J"
forest_selection = RFECV(RandomForestRegressor(n_estimators=100 , random_state=0, n_jobs=1), cv=5)
forest_selection.fit(X_train , y_train)
X_train_g = forest_selection.transform(X_train)
X_test_g = forest_selection.transform(X_test)
print(f'Selected Features: {10}')
_randomForest_train(X_train_g , X_test_g , y_train , y_test )
# + colab={"base_uri": "https://localhost:8080/"} id="iunN0Jd0YGg0" outputId="ff4c9bd8-0cd4-498c-f63d-e45ad249447c"
best_feat2 = X_train.columns[(forest_selection.get_support())]
len(best_feat2)
# + colab={"base_uri": "https://localhost:8080/"} id="PkpJfKXjbfCK" outputId="e3664f73-e7ef-42b8-f471-5305cd4d3bfd"
best_feat2
# + id="xuLJbGqsotH1"
# + id="yJ5OPeQfbg8R"
best_features = [ 'Madrid_wind_speed','Bilbao_rain_1h',
'Bilbao_pressure', 'Year', 'Day',
'Month', 'Start_hour']
# + colab={"base_uri": "https://localhost:8080/"} id="YJsHHGzudCBV" outputId="7c936235-746f-4cde-edd5-ad6560c4c3d5"
X = df_test[best_features]
X.columns
# + id="Nw2yZwRSdNpU"
# + id="JkHN0GngfqcI"
#Train a model
regression = [
xgb.XGBRegressor(objective ='reg:squarederror', n_estimators = 100, random_state=0 ),
# LinearRegression(),
RandomForestRegressor(max_depth=100)
]
# + colab={"base_uri": "https://localhost:8080/"} id="-uKbcb1Afsho" outputId="4c0d8082-31be-40ce-feea-9faff64e364f"
from sklearn import metrics
print('-'*40 + 'Trained Succesfully' + '-'*40 + '\n')
for items in regression:
model = items
model.fit(X_train[best_features],y_train)
train_model = model.predict(X_train[best_features])
test_model = model.predict(X_test[best_features])
#Dictionary of results
results_dict = {'Training':
{
"RMSE": np.sqrt(metrics.mean_squared_error(y_train, train_model)),
'R2 Score': r2_score(y_train ,train_model)
},
'Test':
{
"RMSE": np.sqrt(metrics.mean_squared_error(y_test, test_model)),
'R2 Score': r2_score(y_test ,test_model)
}
}
scores = pd.DataFrame(data=results_dict)
print(scores)
print('-'*50+'End'+'-'*50)
print()
y_pred = model.predict(X)
daf = pd.DataFrame(y_pred, columns=['load_shortfall_3h'])
output = pd.DataFrame({"time": df_test_copy['time'].reset_index(drop=True)})
submissionF = output.join(daf)
submissionF['load_shortfall_3h'] = daf.values
submissionF.to_csv("submissionF.csv", index=False)
print(submissionF)
# -
| Team-notebooks/feature_selection .ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.9.5 64-bit (''mldesignbook-ucg6RFfZ-py3.9'': poetry)'
# name: python3
# ---
# ## 2章: Open Bandit Pipelineを用いた意思決定モデルの学習/性能評価の実装
#
# この実装例は主に次のステップで構成される。
#
# 1. ある古い意思決定モデル$\pi_b$が稼働することで収集されたログデータを模した工データを生成する
# 2. トレーニングデータを用いて意思決定モデルを学習し、バリデーションデータに対して行動を選択する
# 3. 学習した意思決定モデルの性能を、バリデーションデータを用いて推定する
# +
# 必要なパッケージやモジュールをインポート
from sklearn.linear_model import LogisticRegression
from obp.dataset import (
SyntheticBanditDataset,
logistic_reward_function,
linear_behavior_policy
)
from obp.policy import IPWLearner, Random
from obp.ope import (
OffPolicyEvaluation,
RegressionModel,
InverseProbabilityWeighting as IPS,
DoublyRobust as DR
)
# -
# ### 1. 人工データの生成
# +
# `SyntheticBanditDataset`を用いて人工データを生成する
dataset = SyntheticBanditDataset(
n_actions=3, # 人工データにおける行動の数
dim_context=3, # 人工データにおける特徴量の次元数
reward_function=logistic_reward_function, # 目的変数を生成する関数
behavior_policy_function=linear_behavior_policy, # 過去の意思決定モデル\pi_bによる行動選択確率を生成する関数
random_state=12345,
)
# トレーニングデータとバリデーションデータを生成する
training_data = dataset.obtain_batch_bandit_feedback(n_rounds=10000)
validation_data = dataset.obtain_batch_bandit_feedback(n_rounds=10000)
# `training_data`の中身を確認する
training_data
# -
# ### 2. 意思決定モデルの学習(Off-Policy Learning; OPL)
#
# トレーニングデータを用いて次の2つの意思決定意思決定モデルを学習し、バリデーションデータに対して目的変数を最大化する行動を選択する。
#
# 1. IPWLearner+ロジスティック回帰
# 2. ランダム意思決定モデル
# +
# %%time
# 「IPWLearner+ロジスティック回帰」を定義
ipw_learner = IPWLearner(
n_actions=dataset.n_actions,
base_classifier=LogisticRegression(C=100, random_state=12345)
)
# トレーニングデータを用いて、意思決定意思決定モデルを学習
ipw_learner.fit(
context=training_data["context"], # 特徴量
action=training_data["action"], # 過去の意思決定モデル\pi_bによる行動選択
reward=training_data["reward"], # 観測される目的変数
pscore=training_data["pscore"], # 過去の意思決定モデル\pi_bによる行動選択確率(傾向スコア)
)
# バリデーションデータに対して行動を選択する
action_choice_by_ipw_learner = ipw_learner.predict(
context=validation_data["context"],
)
# +
# %%time
# ランダム意思決定モデルを定義
random = Random(n_actions=dataset.n_actions)
# バリデーションデータに対する行動選択確率を計算する
action_choice_by_random = random.compute_batch_action_dist(
n_rounds=validation_data["n_rounds"]
)
# -
# ### 3. 意思決定モデルの性能評価(Off-Policy Evaluation; OPE)
#
# 2つの意思決定意思決定モデルの性能を、バリデーションデータを用いて評価する。オフライン評価には、IPS推定量とDR推定量を用いる。
# +
# %%time
# DR推定量に必要な目的変数予測モデルを得る
# opeモジュールに実装されている`RegressionModel`に好みの機械学習手法を与えば良い
regression_model = RegressionModel(
n_actions=dataset.n_actions, # 行動の数
base_model=LogisticRegression(C=100, random_state=12345), # ロジスティック回帰を使用
)
# `fit_predict`メソッドにより、バリデーションデータにおける期待報酬を推定
estimated_rewards_by_reg_model = regression_model.fit_predict(
context=validation_data["context"], # 特徴量
action=validation_data["action"], # 過去の意思決定モデル\pi_bによる行動選択
reward=validation_data["reward"], # 観測される目的変数
random_state=12345,
)
# -
# 意思決定モデルの性能評価を一気通貫で行うための`OffPolicyEvaluation`を定義する
ope = OffPolicyEvaluation(
bandit_feedback=validation_data, # バリデーションデータ
ope_estimators=[IPS(estimator_name="IPS"), DR()] # 使用する推定量
)
# IPWLearner+ロジスティック回帰の性能をIPS推定量とDR推定量で評価
ope.visualize_off_policy_estimates_of_multiple_policies(
policy_name_list=["IPWLearner", "Random"],
action_dist_list=[
action_choice_by_ipw_learner, # IPWLearnerによるバリデーションデータに対する行動選択
action_choice_by_random, # ランダム意思決定モデルによるバリデーションデータに対する行動選択
],
estimated_rewards_by_reg_model=estimated_rewards_by_reg_model, # DR推定量に必要な期待報酬推定値
random_state=12345,
)
# ### どの推定量を信じたとしても、IPWLearner(new_decision_making_model)がランダム意思決定モデル(random)の性能を上回るという結果が得られた
# ### 4. 最後に2つの意思決定モデルの真の性能を確認する
# +
# ipw_learnerとrandomの真の性能を計算する
# これは、`SyntheticBanditDataset`の`cal_ground_truth_policy_value`メソッドを呼び出すことで計算できる
performance_of_ipw_learner = dataset.calc_ground_truth_policy_value(
expected_reward=validation_data['expected_reward'], # バリデーションデータにおける期待報酬
action_dist=action_choice_by_ipw_learner, # 評価対象の意思決定モデルによる行動選択確率
)
performance_of_random = dataset.calc_ground_truth_policy_value(
expected_reward=validation_data['expected_reward'], # バリデーションデータにおける期待報酬
action_dist=action_choice_by_random, # 評価対象の意思決定モデルによる行動選択確率
)
print(f'IPWLearner+ロジスティック回帰の性能: {performance_of_ipw_learner}')
print(f'ランダム意思決定モデルの性能: {performance_of_random}')
# -
| ch02/synthetic-data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## 作業
# 1. 請用 numpy 建立一個 10 x 10, 數值分布自 -1.0 ~ 1.0 的矩陣並繪製 Heatmap
# 2. 請用 numpy 建立一個 1000 x 3, 數值分布為 -1.0 ~ 1.0 的矩陣,並繪製 PairPlot (上半部為 scatter, 對角線為 hist, 下半部為 density)
# 3. 請用 numpy 建立一個 1000 x 3, 數值分布為常態分佈的矩陣,並繪製 PairPlot (上半部為 scatter, 對角線為 hist, 下半部為 density)
# # [作業目標]
# - 試著設定隨機資料, 並依照範例練習基礎與進階的 Heatmap
# # [作業重點]
# - 如題1.條件隨機矩陣, 並仿造基礎 Heatmap 範例作圖
# (In[2], OUT[2]) (Hint : numpy.random.random - 均勻分布, 隨機小數)
# - 如題2.3.條件隨機數值列, 並仿造進階 Heatmap 範例作圖
# (In[3], OUT[3], In[4], OUT[4]) (Hint : numpy.random.randn - 常態分布)
# +
# 載入需要的套件
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns # 另一個繪圖-樣式套件
plt.style.use('ggplot')
# 忽略警告訊息
# %matplotlib inline
import warnings
warnings.filterwarnings('ignore')
# -
# # 1. 請用 numpy 建立一個 10 x 10, 數值分布自 -1.0 ~ 1.0 的矩陣並繪製 Heatmap
#取亂數
matrix = np.random.uniform(-1,1, [10,10])
matrix
"""
Your Code Here
"""
plt.figure(figsize=(10,10))
"""
Your Code Here
"""
heatmap = sns.heatmap(matrix, cmap = 'coolwarm', vmin = -0.25, annot = True, vmax = 0.6)
plt.show()
# # 2. 請用 numpy 建立一個 1000 x 3, 數值分布為 -1.0 ~ 1.0 的矩陣,並繪製 PairPlot (上半部為 scatter, 對角線為 hist, 下半部為 density)
# +
nrow = 1000
ncol = 3
"""
Your Code Here
"""
matrix = np.random.rand(nrow,ncol)*2-1 # *2-1的用意是把 [0.0, 1.0) 變成 [-1, 1)
# 隨機給予 0, 1, 2 三種標籤
indice = np.random.choice([0,1,2], size=nrow)
plot_data = pd.DataFrame(matrix, indice).reset_index()
# 繪製 seborn 進階 Heatmap
grid = sns.PairGrid(data = plot_data, size = 3, diag_sharey=False,
hue = 'index', vars = [x for x in list(plot_data.columns) if x != 'index'])
"""
Your Code Here
Please replace "..." to correct plot function
"""
# 上半部放 scatter plot
grid.map_upper(plt.scatter , alpha = 0.2)
# 對角線畫 histogram
grid.map_diag(sns.distplot )# sns.kdeplot 是 density plot
# 下半部放 density plot
grid.map_lower(sns.kdeplot , cmap = plt.cm.OrRd_r)
plt.show()
# -
matrix
indice
# # 3. 請用 numpy 建立一個 1000 x 3, 數值分布為常態分佈的矩陣,並繪製 PairPlot (上半部為 scatter, 對角線為 hist, 下半部為 density)
# +
nrow = 1000
ncol = 3
"""
Your Code Here
"""
matrix = np.random.randn(nrow, ncol)
# 隨機給予 0, 1, 2 三種標籤
indice = np.random.choice([0,1,2], size=nrow)
plot_data = pd.DataFrame(matrix, indice).reset_index()
# 繪製 seborn 進階 Heatmap
grid = sns.PairGrid(data = plot_data, size = 3, diag_sharey=False,
hue = 'index', vars = [x for x in list(plot_data.columns) if x != 'index'])
"""
Your Code Here
Please replace "..." to correct plot function
"""
# 上半部放 scatter plot
grid.map_upper(plt.scatter , alpha = 0.2)
# 對角線畫 histogram
grid.map_diag(sns.distplot )# sns.kdeplot 是 density plot
# 下半部放 density plot
grid.map_lower(sns.kdeplot , cmap = plt.cm.OrRd_r)
plt.show()
# -
matrix
indice
| HomeWork/Day_020_HW.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# In this notebook we reconstruct the Stabilizer decomposition of the state $|H>^{\otimes 6}$ of *Trading classical and quantum computational resources* (2016).
#
# Here $|H> = |0> + (1/\sqrt 2-1)|1>$ is within local Cliffords of $|T> = |0> + e^{i\pi/4} |1>$.
# %load_ext autoreload
# %autoreload 2
import sys; sys.path.append('..')
import random, math, os
import pyzx as zx
from fractions import Fraction
import numpy as np
# %config InlineBackend.figure_format = 'svg'
#Z^6 |K_6>
g = zx.Graph()
verts = []
for i in range(6):
o = g.add_vertex(0, i, 1)
g.outputs.append(o)
v = g.add_vertex(1,i, 0,Fraction(1))
verts.append(v)
g.add_edge((o,v))
g.scalar.add_power(15)
for i in range(6):
for j in range(i+1,6):
g.add_edge((verts[i],verts[j]),2)
display(zx.draw(g))
K6 = g.to_matrix(True)
# |0>^6
g = zx.Graph()
verts = []
for i in range(6):
o = g.add_vertex(0, i, 1)
g.outputs.append(o)
v = g.add_vertex(2,i, 0)
verts.append(v)
g.add_edge((o,v))
g.scalar.add_power(-6)
display(zx.draw(g))
ket0 = g.to_matrix(True)
# |1>^6
g = zx.Graph()
verts = []
for i in range(6):
o = g.add_vertex(0, i, 1)
g.outputs.append(o)
v = g.add_vertex(2,i, 0, phase=Fraction(1))
verts.append(v)
g.add_edge((o,v))
g.scalar.add_power(-6)
display(zx.draw(g))
ket1 = g.to_matrix(True)
# |E_6>
g = zx.Graph()
v = g.add_vertex(2,3, 0)
for i in range(6):
o = g.add_vertex(0, i, 1)
g.outputs.append(o)
g.add_edge((o,v))
g.scalar.add_power(4)
display(zx.draw(g))
E6 = g.to_matrix(True)
# |O_6>
g = zx.Graph()
v = g.add_vertex(2,3, 0, Fraction(1))
for i in range(6):
o = g.add_vertex(0, i, 1)
g.outputs.append(o)
g.add_edge((o,v))
g.scalar.add_power(4)
display(zx.draw(g))
O6 = g.to_matrix(True)
# |phi'>
g = zx.Graph()
verts = []
for i in range(6):
o = g.add_vertex(0, i, 1)
g.outputs.append(o)
v = g.add_vertex(1,i, 0)
verts.append(v)
if i == 5:
g.add_edge((o,v),2)
else: g.add_edge((o,v))
g.scalar.add_power(9)
for i in range(4):
g.add_edge((verts[i],verts[i+1]),2)
g.add_edge((verts[i],verts[5]),2)
g.add_edge((verts[0],verts[4]),2)
g.add_edge((verts[4],verts[5]),2)
g.set_phase(verts[5],Fraction(1))
display(zx.draw(g))
phi1 = g.to_matrix(True)
# |phi''>
g = zx.Graph()
verts = []
for i in range(6):
o = g.add_vertex(0, i, 1)
g.outputs.append(o)
v = g.add_vertex(1,i, 0)
verts.append(v)
if i == 2:
g.add_edge((o,v),2)
else: g.add_edge((o,v))
g.scalar.add_power(9)
v1,v2,v3,v4,v5,v6 = verts
g.add_edges([(v1,v2),(v2,v4),(v4,v5),(v5,v6),(v1,v6),(v1,v3),(v2,v3),(v3,v4),(v3,v5),(v3,v6)],2)
g.set_phase(v3,Fraction(1))
display(zx.draw(g))
phi2 = g.to_matrix(True)
sq2 = math.sqrt(2)
H6 = (-16+12*sq2)*ket0 + (96 - 68*sq2)*ket1 + \
(10-7*sq2)*E6 + (-14 + 10*sq2)*O6 + \
(7-5*sq2)*K6 + (10-7*sq2)*phi1 + \
(10-7*sq2)*phi2
H6
H = np.matrix([[1],[sq2 - 1]])
S = np.matrix([[1,0],[0,-1j]])
HAD = np.matrix([[1,1],[1,-1]])
T = 1/(1-1j*math.tan(math.pi/8))*HAD*S* H
T
| scratchpads/magic state decomposition.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Sampling for FaIR
#
# Theme Song: Munich<br>
# Artist: Editors<br>
# Album: The Back Room<br>
# Released: 2005
#
# This notebook generates the parameter sets to run FaIR with for AR6 setups. This will be used in chapter 7 WG1 and passed on also to WG3 through `openscm-runner`
# +
import errno
import fair
import json
import sys
import os
import random
import numpy as np
import scipy.stats as st
import matplotlib.pyplot as pl
import pandas as pd
import pickle
from multiprocessing import Pool
from netCDF4 import Dataset
from tqdm.notebook import tqdm
from scipy.interpolate import interp1d
from fair.constants import molwt
from fair.ancil import natural, cmip6_volcanic, cmip6_solar
from ar6.forcing.aerosol import ghan
from ar6.constants import NINETY_TO_ONESIGMA
# -
fair.__version__
# ## Ensemble generation
#
# We want to ensure reproducible results that don't change when this script is re-run. Grab list of pre-generated random seeds.
with open('../data_input/random_seeds.json', 'r') as filehandle:
SEEDS = json.load(filehandle)
SAMPLES = 1000000
F2XCO2_MEAN = 4.00
F2XCO2_NINETY = 0.48
# ## Thermal parameters
# +
with open("../data_input/tunings/cmip6_twolayer_tuning_params.json", "r") as read_file:
params = json.load(read_file)
cmip6_models = list(params['q4x']['model_data']['EBM-epsilon'].keys())
cmip6_models
NMODELS = len(cmip6_models)
geoff_data = np.zeros((NMODELS, 6))
for im, model in enumerate(cmip6_models):
geoff_data[im,0] = params['q4x']['model_data']['EBM-epsilon'][model]
geoff_data[im,1] = params['lamg']['model_data']['EBM-epsilon'][model]
geoff_data[im,2] = params['cmix']['model_data']['EBM-epsilon'][model]
geoff_data[im,3] = params['cdeep']['model_data']['EBM-epsilon'][model]
geoff_data[im,4] = params['gamma_2l']['model_data']['EBM-epsilon'][model]
geoff_data[im,5] = params['eff']['model_data']['EBM-epsilon'][model]
geoff_df = pd.DataFrame(geoff_data, columns=['q4x','lamg','cmix','cdeep','gamma_2l','eff'], index=cmip6_models)
kde = st.gaussian_kde(geoff_df.T)
geoff_sample = kde.resample(size=int(SAMPLES*1.25), seed = SEEDS[15])
# remove unphysical combinations
geoff_sample[:,geoff_sample[0,:] <= 0] = np.nan
#geoff_sample[:,geoff_sample[1,:] >= -0.6] = np.nan
geoff_sample[1, :] = st.truncnorm.rvs(-2, 2, loc=-4/3, scale=0.5, size=int(SAMPLES*1.25), random_state=SEEDS[16])
geoff_sample[:,geoff_sample[2,:] <= 0] = np.nan
geoff_sample[:,geoff_sample[3,:] <= 0] = np.nan
geoff_sample[:,geoff_sample[4,:] <= 0] = np.nan
geoff_sample[:,geoff_sample[5,:] <= 0] = np.nan
mask = np.all(np.isnan(geoff_sample), axis=0)
geoff_sample = geoff_sample[:,~mask][:,:SAMPLES]
geoff_sample_df=pd.DataFrame(data=geoff_sample.T, columns=['q4x','lamg','cmix','cdeep','gamma_2l','eff'])
geoff_sample_df.to_csv('../data_output_large/geoff_sample.csv')
geoff_sample_df
f2x = st.norm.rvs(loc=F2XCO2_MEAN, scale=F2XCO2_NINETY/NINETY_TO_ONESIGMA, size=SAMPLES, random_state=SEEDS[73])
ecs = -f2x/geoff_sample[1,:]
tcr = f2x/(-geoff_sample[1,:] + geoff_sample[4,:]*geoff_sample[5,:])
np.save('../data_input_large/fair-samples/f2x_unconstrained.npy', f2x)
np.save('../data_input_large/fair-samples/ecs_unconstrained.npy', ecs)
np.save('../data_input_large/fair-samples/tcr_unconstrained.npy', tcr)
# -
pl.hist(ecs, bins=np.arange(0,11,0.2))
print(np.percentile(ecs, (5,16,50,84,95)))
pl.hist(tcr, bins=np.arange(0,5,0.2))
print(np.percentile(tcr, (5,16,50,84,95)))
fig, ax = pl.subplots()
ax.scatter(ecs, tcr)
ax.text(0.1,0.9,'r = %.2f' % np.corrcoef(ecs,tcr)[0,1], transform=ax.transAxes)
ax.set_ylim(0,8)
ax.set_xlim(0,10)
ax.set_xlabel('ECS')
ax.set_ylabel('TCR')
ax.set_title('ECS and TCR priors')
fig.tight_layout()
print(np.percentile(ecs, (5,16,50,84,95)))
print(np.percentile(tcr, (5,16,50,84,95)))
# ## Forcing uncertainties
#
# - Ensure check the AR6 notebook #040
# +
# these are standard deviations of the scale factor for normally distributed forcings (mean = 1). The list below is expressed in terms of 5-95% ranges.
unc_ranges = np.array([
0.12, # CO2
0.20, # CH4: updated value from etminan 2016
0.14, # N2O
0.19, # other WMGHGs
0.50, # Total ozone
1.00, # stratospheric WV from CH4
0.70, # contrails approx - half-normal
1.25, # bc on snow - half-normal
0.50, # land use change
5.0/20.0, # volcanic
0.50, # solar (amplitude)
])/NINETY_TO_ONESIGMA
NORMALS = len(unc_ranges)
scale_normals = st.norm.rvs(
size=(SAMPLES,NORMALS),
loc=np.ones((SAMPLES,NORMALS)),
scale=np.ones((SAMPLES, NORMALS)) * unc_ranges[None,:],
random_state=SEEDS[4]
)
## bc snow is asymmetric Gaussian. We can just scale the half of the distribution above/below best estimate
scale_normals[scale_normals[:,7]<1,7] = 0.08/0.1*(scale_normals[scale_normals[:,7]<1,7]-1) + 1
## so is contrails - the benefits of doing this are tiny :)
scale_normals[scale_normals[:,6]<1,6] = 0.0384/0.0406*(scale_normals[scale_normals[:,6]<1,6]-1) + 1
trend_solar = st.norm.rvs(size=SAMPLES, loc=+0.01, scale=0.07/NINETY_TO_ONESIGMA, random_state=SEEDS[50])
np.save('../data_input_large/fair-samples/scale_normals.npy', scale_normals)
np.save('../data_input_large/fair-samples/scale_trend_solar.npy', trend_solar)
# -
# ### Aerosol emissions
#
# check against #010, #040, #050
with open('../data_input/tunings/cmip6_aerosol_coefficients.json') as json_file:
cmip6_aerosol_data = json.load(json_file)
# +
cmip6_aci = np.zeros((11, 2))
for i, model in enumerate(['CanESM5', 'E3SM', 'GFDL-ESM4', 'GFDL-CM4', 'GISS-E2-1-G', 'HadGEM3-GC31-LL', 'IPSL-CM6A-LR', 'MIROC6', 'MRI-ESM2-0', 'NorESM2-LM', 'UKESM1-0-LL']):
for j, species in enumerate(['n0','n1']):
cmip6_aci[i,j] = np.log(cmip6_aerosol_data[model]['ERFaci'][species])
kde = st.gaussian_kde(cmip6_aci.T)
aci_coeffs=np.exp(kde.resample(size=int(SAMPLES), seed=SEEDS[8]).T)
np.save('../data_input_large/fair-samples/aci_coeffs.npy', aci_coeffs)
# -
# target ranges for aerosols:
# total ERFari -0.6 -0.3 -0.0
## BC 0.05 0.4 0.8 then subtract -0.1 for RA so -0.05 0.3 0.7
## SO2 -0.6 -0.4 -0.2
## OC -0.16 -0.09 -0.03
## Nitrate -0.3 -0.11 -0.03
bc_20101750 = st.norm.rvs(loc=0.3, scale=0.2/NINETY_TO_ONESIGMA, size=SAMPLES, random_state=SEEDS[95])
oc_20101750 = st.norm.rvs(loc=-0.09, scale=0.07/NINETY_TO_ONESIGMA, size=SAMPLES, random_state=SEEDS[96])
so2_20101750 = st.norm.rvs(loc=-0.4, scale=0.2/NINETY_TO_ONESIGMA, size=SAMPLES, random_state=SEEDS[97])
nit_20101750 = st.norm.rvs(loc=-0.11, scale=0.05/NINETY_TO_ONESIGMA, size=SAMPLES, random_state=SEEDS[98])
pl.hist(bc_20101750+oc_20101750+so2_20101750+nit_20101750)
np.percentile(bc_20101750+oc_20101750+so2_20101750+nit_20101750, (5,50,95))
# +
# Get SSP historical emissions
ssp_df = pd.read_csv('../data_input_large/rcmip-emissions-annual-means-v5-1-0.csv')
species = [
'Emissions|Sulfur',
'Emissions|BC',
'Emissions|OC',
'Emissions|NH3',
'Emissions|NOx'
]
unit_convert = np.ones(5)
unit_convert[0] = 32/64 # follow zeb exactly, but would be better to use fair.constants.molwt
unit_convert[4] = 14/46
emissions_out = np.zeros((351,5))
years_future = [2015] + list(range(2020,2101,10))
for i, specie in enumerate(species):
emissions_out[:265,i] = ssp_df.loc[
(ssp_df['Model']=='MESSAGE-GLOBIOM')&
(ssp_df['Region']=='World')&
(ssp_df['Scenario']=='ssp245')&
(ssp_df['Variable']==specie),
'1750':'2014']*unit_convert[i]
f = interp1d(years_future, ssp_df.loc[
(ssp_df['Model']=='MESSAGE-GLOBIOM')&
(ssp_df['Region']=='World')&
(ssp_df['Scenario']=='ssp245')&
(ssp_df['Variable']==specie),'2015':'2100'
].dropna(axis=1))
emissions_out[265:, i] = f(np.arange(2015, 2101))*unit_convert[i]
# -
beta_bc = bc_20101750/(np.mean(emissions_out[255:265,1])-emissions_out[0,1])
beta_oc = oc_20101750/(np.mean(emissions_out[255:265,2])-emissions_out[0,2])
beta_so2 = so2_20101750/(np.mean(emissions_out[255:265,0])-emissions_out[0,0])
beta_nh3 = nit_20101750/(np.mean(emissions_out[255:265,3])-emissions_out[0,3])
# ### Testing and scaling aerosol emissions
ERFari = np.zeros((351, SAMPLES))
for i in range(SAMPLES):
ERFari[:, i] = (
(emissions_out[:,0]-emissions_out[0,0]) * beta_so2[i] +
(emissions_out[:,1]-emissions_out[0,1]) * beta_bc[i] +
(emissions_out[:,2]-emissions_out[0,2]) * beta_oc[i] +
(emissions_out[:,3]-emissions_out[0,3]) * beta_nh3[i]
# (emissions_out[:,4]-emissions_out[0,4]) * beta_nox[i]
)
np.save('../data_input_large/fair-samples/beta_so2_unconstrained.npy', beta_so2)
np.save('../data_input_large/fair-samples/beta_bc_unconstrained.npy', beta_bc)
np.save('../data_input_large/fair-samples/beta_oc_unconstrained.npy', beta_oc)
np.save('../data_input_large/fair-samples/beta_nh3_unconstrained.npy', beta_nh3)
ERFaci_scale = st.norm.rvs(size=SAMPLES, loc=-1.0, scale=0.7/NINETY_TO_ONESIGMA, random_state=SEEDS[9])
# option here: use Smith et al 2020 ACP correlations in models and sample as joint normal. They are strongly anti-correlated.
#print(np.percentile(ERFari_scale, (5,16,50,84,95)))
print(np.percentile(ERFaci_scale, (5,16,50,84,95)))
#print(np.percentile(ERFari_scale+ERFaci_scale, (5,16,50,84,95)))
# +
#ERFari = np.zeros((270, SAMPLES))
ERFaci = np.zeros((351, SAMPLES))
beta = np.zeros(SAMPLES)
for i in tqdm(range(SAMPLES)):
forcing2010 = np.mean(
ghan([
emissions_out[255:265,0], emissions_out[255:265,1]+emissions_out[255:265,2]
], 0.97, aci_coeffs[i,0], aci_coeffs[i,1])
)
forcing1750 = ghan([emissions_out[0,0], emissions_out[0,1]+emissions_out[0,2]], 0.97, aci_coeffs[i,0], aci_coeffs[i,1])
ERFaci[:,i] = (ghan([emissions_out[:,0], emissions_out[:,1]+emissions_out[:,2]], 0.97, aci_coeffs[i,0], aci_coeffs[i,1]) - forcing1750)/(forcing2010-forcing1750)*(ERFaci_scale[i])
beta[i] = ERFaci_scale[i]/(forcing2010-forcing1750)
np.save('../data_input_large/fair-samples/beta_unconstrained.npy', beta)
# -
np.percentile(ERFari[260,:]+ERFaci[260,:], (5,16,50,84,95))
# ## Carbon cycle parameters
#
# Uniform distributions from Leach et al 2020 https://gmd.copernicus.org/preprints/gmd-2020-390/, using full range of CMIP6 tunings.
#
# Pre-industrial CO2 concentration uncertainty is from Jinho Ahn, chapter 2 AR6 WG1. SHOULD WE USE AR6 VALUE WHICH IS SLIGHTLY DIFFERENT?
# +
r0 = st.uniform.rvs(loc=27.7, scale=41.3-27.7, random_state=SEEDS[10], size=SAMPLES)
rC = st.uniform.rvs(loc=-0.00712, scale=0.0482+0.00712, random_state=SEEDS[11], size=SAMPLES)
rT = st.uniform.rvs(loc=-0.0847, scale=4.52+0.0847, random_state=SEEDS[12], size=SAMPLES)
pre_ind_co2 = st.norm.rvs(loc=277.147, scale=2.9, random_state=SEEDS[13], size=SAMPLES)
np.save('../data_input_large/fair-samples/r0_unconstrained.npy', r0)
np.save('../data_input_large/fair-samples/rC_unconstrained.npy', rC)
np.save('../data_input_large/fair-samples/rT_unconstrained.npy', rT)
np.save('../data_input_large/fair-samples/pre_ind_co2_unconstrained.npy', pre_ind_co2)
# -
# # I now think we have everything we need to run FaIR with
#
# Before tackling the AR6-WG3 format, let's see how they look before we let this loose on the world.
#ssp_df = pd.read_csv(os.path.join(os.path.dirname(fair.__file__), 'SSPs/data/rcmip-emissions-annual-means-4-0-0-ssp-only.csv'))
ssp_df = pd.read_csv('../data_input_large/rcmip-emissions-annual-means-v5-1-0.csv')
# +
years = np.arange(1750,2101)
startyear = 1750
first_scenyear = 2015
last_scenyear = 2100
first_row = int(first_scenyear-startyear)
last_row = int(last_scenyear-startyear)
species = [ # in fair 1.6, order is important
'|CO2|MAGICC Fossil and Industrial',
'|CO2|MAGICC AFOLU',
'|CH4',
'|N2O',
'|Sulfur',
'|CO',
'|VOC',
'|NOx',
'|BC',
'|OC',
'|NH3',
'|CF4',
'|C2F6',
'|C6F14',
'|HFC23',
'|HFC32',
'|HFC4310mee',
'|HFC125',
'|HFC134a',
'|HFC143a',
'|HFC227ea',
'|HFC245fa',
'|SF6',
'|CFC11',
'|CFC12',
'|CFC113',
'|CFC114',
'|CFC115',
'|CCl4',
'|CH3CCl3',
'|HCFC22',
'|HCFC141b',
'|HCFC142b',
'|Halon1211',
'|Halon1202',
'|Halon1301',
'|Halon2402',
'|CH3Br',
'|CH3Cl',
]
# Assume that units coming out of aneris don't change. One day I'll do unit parsing
unit_convert = np.ones(40)
unit_convert[1] = 12/44/1000
unit_convert[2] = 12/44/1000
unit_convert[4] = 28/44/1000
unit_convert[5] = 32/64
unit_convert[8] = 14/46
data_out = {}
scens = ['ssp245']
for scen in scens:
data_out[scen] = np.ones((351, 40)) * np.nan
data_out[scen][:,0] = years
years_future = [2015] + list(range(2020,2101,10))
for i, specie in enumerate(species):
data_out[scen][:first_row,i+1] = ssp_df.loc[(ssp_df['Model']=='MESSAGE-GLOBIOM')&(ssp_df['Region']=='World')&(ssp_df['Scenario']==scen)&(ssp_df['Variable'].str.endswith(specie)),str(startyear):'2014']*unit_convert[i+1]
f = interp1d(years_future, ssp_df.loc[(ssp_df['Model']=='MESSAGE-GLOBIOM')&(ssp_df['Region']=='World')&(ssp_df['Scenario']==scen)&(ssp_df['Variable'].str.endswith(specie)),'2015':'2100'].dropna(axis=1))
data_out[scen][first_row:(last_row+1), i+1] = f(np.arange(first_scenyear, last_scenyear+1))*unit_convert[i+1]
# -
data_out['ssp245'].shape
data_out['ssp245'][250,:]
# +
# add in natural emissions and natural forcing
ch4_n2o_df = pd.read_csv('../data_output/fair_wg3_natural_ch4_n2o.csv')
ch4_n2o = ch4_n2o_df.values[:351,1:]
df = pd.read_csv('../data_output/solar_erf.csv', index_col='year')
solar_forcing = df.solar_erf.loc[1750:2100].values
df = pd.read_csv('../data_output/volcanic_erf.csv', index_col='year')
volcanic_forcing = np.zeros((351))
volcanic_forcing[:269] = df.volcanic_erf.loc[1750:2018].values
# ramp down last 10 years to zero according to https://www.geosci-model-dev.net/9/3461/2016/gmd-9-3461-2016.html
volcanic_forcing[268:279] = volcanic_forcing[268] * np.linspace(1,0,11)
volcanic_forcing[279:] = 0.
# -
trend_solar.shape
for i in range(100):
F_solar = np.zeros(351)
F_solar[:270] = np.linspace(0,trend_solar[i],270) + solar_forcing[:270]*scale_normals[i,10]
F_solar[270:351] = trend_solar[i] + solar_forcing[270:351]*scale_normals[i,10]
pl.plot(F_solar)
o3_df = pd.read_csv('../data_input/tunings/cmip6_ozone_skeie_fits.csv', index_col=0)
o3_df#.loc['NMVOC','mean']
# +
ozone_feedback = st.norm.rvs(loc=-0.037, scale=0.012, size=SAMPLES, random_state=SEEDS[51])
# these coefficients are AerChemMIP - don't delete for now
#beta_ch4 = st.norm.rvs(loc=0.00017380181731885394, scale=6.207207761387641e-05/NINETY_TO_ONESIGMA, size=SAMPLES, random_state=SEEDS[52])
#beta_n2o = st.norm.rvs(loc=0.0007061379618263551, scale=0.0004707586412175701/NINETY_TO_ONESIGMA, size=SAMPLES, random_state=SEEDS[53])
#beta_ods = st.norm.rvs(loc=-0.00012432206866394785, scale=0.00011302006242177078/NINETY_TO_ONESIGMA, size=SAMPLES, random_state=SEEDS[54])
#beta_co = st.norm.rvs(loc=0.00015401820093350843, scale=0.00013103040974940267/NINETY_TO_ONESIGMA, size=SAMPLES, random_state=SEEDS[55])
#beta_voc = st.norm.rvs(loc=0.00032758074092083077, scale=0.00032758074092083077/NINETY_TO_ONESIGMA, size=SAMPLES, random_state=SEEDS[56])
#beta_nox = st.norm.rvs(loc=46/14*0.0017868021851642044, scale=46/14*0.0009827412018403123/NINETY_TO_ONESIGMA, size=SAMPLES, random_state=SEEDS[57])
# these coefficients are Skeie fits
beta_ch4 = st.norm.rvs(loc=o3_df.loc['CH4','mean'], scale=o3_df.loc['CH4','u90']/NINETY_TO_ONESIGMA, size=SAMPLES, random_state=SEEDS[52])
beta_n2o = st.norm.rvs(loc=o3_df.loc['N2O','mean'], scale=o3_df.loc['N2O','u90']/NINETY_TO_ONESIGMA, size=SAMPLES, random_state=SEEDS[53])
beta_ods = st.norm.rvs(loc=o3_df.loc['ODS','mean'], scale=-o3_df.loc['ODS','u90']/NINETY_TO_ONESIGMA, size=SAMPLES, random_state=SEEDS[54])
beta_co = st.norm.rvs(loc=o3_df.loc['CO','mean'], scale=o3_df.loc['CO','u90']/NINETY_TO_ONESIGMA, size=SAMPLES, random_state=SEEDS[55])
beta_voc = st.norm.rvs(loc=o3_df.loc['VOC','mean'], scale=o3_df.loc['VOC','u90']/NINETY_TO_ONESIGMA, size=SAMPLES, random_state=SEEDS[56])
beta_nox = st.norm.rvs(loc=46/14*o3_df.loc['NOx','mean'], scale=46/14*o3_df.loc['NOx','u90']/NINETY_TO_ONESIGMA, size=SAMPLES, random_state=SEEDS[57])
np.save('../data_input_large/fair-samples/ozone_feedback_unconstrained.npy', ozone_feedback)
np.save('../data_input_large/fair-samples/beta_ch4_unconstrained.npy', beta_ch4)
np.save('../data_input_large/fair-samples/beta_n2o_unconstrained.npy', beta_n2o)
np.save('../data_input_large/fair-samples/beta_ods_unconstrained.npy', beta_ods)
np.save('../data_input_large/fair-samples/beta_co_unconstrained.npy', beta_co)
np.save('../data_input_large/fair-samples/beta_voc_unconstrained.npy', beta_voc)
np.save('../data_input_large/fair-samples/beta_nox_unconstrained.npy', beta_nox)
# -
| notebooks/140_WG3_draw_fair_samples.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Snowpack inputs
# ===========
#
# __Goal__:
# - run SMRT with multi-layer snowpack, adjusting various parameters (like wet snow)
# - using real data to drive SMRT
#
# __Learning__: make_snowpack
#
# The following imports are valid for both excercices:
# +
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib notebook
from smrt import make_model, make_snowpack, sensor_list
from smrt.utils import dB
# -
# Multi-layer snowpack
# -----------------------------------
#
# Prepare a snowpack with a few layers. Variable density (e.g. 300, 350, 330, 400 kg m$^{-3}$), variable temperature (e.g. -15°C, -10°C, -5°C, 0°C) same radius (100$\mu$m) and same stickiness (0.15). Choose layer thickness (e.g. 10 cm, 30 cm, ...). The last layer must be very thick (we we'll work with soil later).
# N.B. if one argument is given as a scalar while the thickness is an arratyat least another is given as a list (or array) the scalar value is automatically applied to all layers.
# +
# prepare the multi-layer snowpack
# +
# prepare the sensor. Prepare the model and run it. Print or plot the results
# +
# Tips: we can draw the snowpack (a recently added function, maybe buggy) as follow:
from smrt.utils.mpl_plots import plot_snowpack
plt.figure()
plot_snowpack(sp, show_vars=['density', 'radius'])
# -
# Wet snow
# --------------
#
#
# The `make_snowpack` function can take several optional arguments for non-default behavior. One of them is "ice_permittivity_model". Currently, the defaut formulation is that from Mätzler 1987 for wet snow, so you can simply add a liquid_water argument.
#
from smrt import make_snowpack
# prepare the multi-layer snowpack
radius = 100e-6
density=300
temperature = 260
sp = make_snowpack(thickness=[0.1, 10],
microstructure_model='sticky_hard_spheres',
density=density,
radius=radius,
stickiness=0.15,
temperature=temperature,
liquid_water=[0.01, 0])
sp.layers[0].permittivity(1, 10e9)
# To make explicit the permittivity formulation (which is needed for non default permittivity). For this: 1) import the function wetsnow_permittivity defined in the file permittivity/wetsnow.py and 2) make a snowpack similar to the previous one except set ice_permittivity_model. This can be list or scalar (yes, Python does accept list of functions!).
# +
from smrt.permittivity.wetsnow import wetsnow_permittivity
from smrt import make_snowpack
# prepare the multi-layer snowpack
radius = 100e-6
density=300
temperature = 260
sp = make_snowpack(thickness=[0.1, 10],
microstructure_model='sticky_hard_spheres',
density=density,
radius=radius,
stickiness=0.15,
temperature=temperature,
ice_permittivity_model=wetsnow_permittivity,
liquid_water=[0.01, 0])
sp.layers[0].permittivity(1, 10e9)
# +
# prepare the sensor. Prepare the model and run it. Print or plot the results
# -
# Read snowpack data
# ------------------------
#
# Most of the time, the snowpack is defined in a file or several files. This does not change the way to run SMRT, only reading the data is different. A file called "data-domec-sp1-picard-2014.dat" is provided. Data has been acquired in Snowpit 1 at Dome C in 2012/13 (<NAME>, <NAME>, <NAME>, <NAME>. Influence of meter-scale wind-formed features on the variability of the microwave brightness temperature around Dome C in Antarctica, The Cryosphere, 8, 1105-1119, 2014, doi:10.5194/tc-8-1105-2014). You can open the file with your favorite editor to see how it looks or (under linux) use the magics of jupyter notebooks: put in a cell "!cat data-domec-sp1-picard-2014.dat".
thickness, density, radius, temperature = np.loadtxt("data-domec-sp1-picard-2014.dat", skiprows=2, unpack=True, delimiter=" ")
# check units in the file and you're ready to go.
# But wait! To check variable from within jupyter notebooks, just enter the variable name
# at the end of this cell (or another one) and SHIFT+ENTER to see the values.
# +
# make snowpack (without stickiness) and so on
# +
# (depending on time) you can also try to plot the simulate the impact of a +/-10% on density, either on all layers or just the first one.
# -
| 02_using_smrt/02_snow_inputs.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # Laboratorio 9
# +
import pandas as pd
import altair as alt
import matplotlib.pyplot as plt
from vega_datasets import data
alt.themes.enable('opaque')
# %matplotlib inline
# -
# En este laboratorio utilizaremos un conjunto de datos _famoso_, el GapMinder. Esta es una versión reducida que solo considera países, ingresos, salud y población.
#
# ¿Hay alguna forma natural de agrupar a estos países?
gapminder = data.gapminder_health_income()
gapminder.head()
# ## Ejercicio 1
#
# (1 pto.)
#
# Realiza un Análisis exploratorio, como mínimo un `describe` del dataframe y una visualización adecuada, por ejemplo un _scatter matrix_ con los valores numéricos.
gapminder.describe().T
gapminder.describe(include='all').T
alt.Chart(gapminder).mark_circle(opacity=0.5).encode(
alt.X(alt.repeat("column"), type='quantitative'),
alt.Y(alt.repeat("row"), type='quantitative'),
).properties(
width=150,
height=150
).repeat(
row=['income', 'health', 'population'],
column=['population', 'income', 'health']
)
# __Pregunta:__ ¿Hay alguna variable que te entregue indicios a simple vista donde se puedan separar países en grupos?
#
# __Respuesta:__ En las variables `income` y `health` se observa una separacion leve entre dos grupos. En las variables `health` y `population` tambien se observa una separacion
# ## Ejercicio 2
#
# (1 pto.)
#
# Aplicar un escalamiento a los datos antes de aplicar nuestro algoritmo de clustering. Para ello, definir la variable `X_raw` que corresponde a un `numpy.array` con los valores del dataframe `gapminder` en las columnas _income_, _health_ y _population_. Luego, definir la variable `X` que deben ser los datos escalados de `X_raw`.
from sklearn.preprocessing import StandardScaler
X_raw = gapminder[['income','health','population']].to_numpy()
X = StandardScaler().fit(X_raw).transform(X_raw)
# ## Ejercicio 3
#
# (1 pto.)
#
# Definir un _estimator_ `KMeans` con `k=3` y `random_state=42`, luego ajustar con `X` y finalmente, agregar los _labels_ obtenidos a una nueva columna del dataframe `gapminder` llamada `cluster`. Finalmente, realizar el mismo gráfico del principio pero coloreado por los clusters obtenidos.
#
from sklearn.cluster import KMeans
k = 3
kmeans = KMeans(n_clusters=k, random_state=42)
kmeans.fit(X)
clusters = kmeans.labels_
gapminder["cluster"] = clusters
alt.Chart(gapminder).mark_circle(opacity=0.5).encode(
alt.X(alt.repeat("column"), type='quantitative'),
alt.Y(alt.repeat("row"), type='quantitative'),
color='cluster:N'
).properties(
width=150,
height=150
).repeat(
row=['income', 'health', 'population'],
column=['population', 'income', 'health']
)
# ## Ejercicio 4
#
# (1 pto.)
#
# __Regla del codo__
#
# __¿Cómo escoger la mejor cantidad de _clusters_?__
#
# En este ejercicio hemos utilizado que el número de clusters es igual a 3. El ajuste del modelo siempre será mejor al aumentar el número de clusters, pero ello no significa que el número de clusters sea el apropiado. De hecho, si tenemos que ajustar $n$ puntos, claramente tomar $n$ clusters generaría un ajuste perfecto, pero no permitiría representar si existen realmente agrupaciones de datos.
#
# Cuando no se conoce el número de clusters a priori, se utiliza la [regla del codo](https://jarroba.com/seleccion-del-numero-optimo-clusters/), que indica que el número más apropiado es aquel donde "cambia la pendiente" de decrecimiento de la la suma de las distancias a los clusters para cada punto, en función del número de clusters.
#
# A continuación se provee el código para el caso de clustering sobre los datos estandarizados, leídos directamente de un archivo preparado especialmente.En la línea que se declara `kmeans` dentro del ciclo _for_ debes definir un estimador K-Means, con `k` clusters y `random_state` 42. Recuerda aprovechar de ajustar el modelo en una sola línea.
elbow = pd.Series(name="inertia", dtype="float64").rename_axis(index="k")
for k in range(1, 10):
kmeans = KMeans(n_clusters=k, random_state=42).fit(X)
elbow.loc[k] = kmeans.inertia_ # Inertia: Sum of distances of samples to their closest cluster center
elbow = elbow.reset_index()
alt.Chart(elbow).mark_line(point=True).encode(
x="k:O",
y="inertia:Q"
).properties(
height=600,
width=800
)
# __Pregunta:__ Considerando los datos (países) y el gráfico anterior, ¿Cuántos clusters escogerías?
#
# __Respuesta:__ Escogeria tres clusters dado que por la regla del codo se observa que en el tres cambia la pendiente
| labs/lab09.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import pandas as pd
# %matplotlib inline
import matplotlib.pyplot as plt
# -
df = pd.read_csv("Bus_Breakdown_and_Delays.csv")
print(df.info())
df
# +
#for c in df.columns:
# print("---",c,"---")
# unq = df[c].value_counts()
# print(unq)
dtype_structure = {
"category":["School_Year", "Busbreakdown_ID", "Run_Type", "Bus_No", "Route_Number", "Reason",
"Schools_Serviced", "Boro", "Bus_Company_Name", "Incident_Number",
"Breakdown_or_Running_Late", "School_Age_or_PreK"],
#"float": ["How_Long_Delayed"], # This will need to be cleaned later
"int": ["Number_Of_Students_On_The_Bus"],
"datetime64":["Occurred_On", "Created_On", "Informed_On", "Last_Updated_On"],
"bool": ["Has_Contractor_Notified_Schools", "Has_Contractor_Notified_Parents", "Have_You_Alerted_OPT"],
"object": []
}
# -
print(df["How_Long_Delayed"][0:10])
# Cleaning How_Long_Delayed as it is string entries and have different variations of data
delayed_str = df["How_Long_Delayed"].values
trial = ["-", "hr", "min", "/"]
for t in trial:
matching = [s for s in delayed_str if t in str(s)]
print(matching[0:6])
# +
#Coverting to clean data frame
for dtp, col in dtype_structure.items():
df[col] = df[col].astype(dtp)
print(df.info())
# -
# Common types of Delays
rsn = df.groupby("Reason").size()
lth = range(len(rsn))
plt.bar(lth, rsn)
plt.xticks(lth, rsn.index, rotation=90)
plt.show()
plt.savefig("Common_types_of_Delays.png")
# Time delays occuring often
delay_time = df.groupby(df["Occurred_On"].map(lambda t: t.hour)).size()
lth = range(len(delay_time))
plt.bar(lth, delay_time)
plt.xticks(lth, delay_time.index, rotation=90)
plt.title("Number of Delays by Hour of the Day")
plt.ylabel("Number of delays reported")
plt.xlabel("Hour of the day")
plt.show()
# Days worst for traffic
traff_day = df[df["Reason"] == "Heavy Traffic"].groupby(
df["Occurred_On"].map(lambda t: t.weekday())).size()
wkd = ["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"]
lth = range(len(traff_day))
plt.bar(lth, traff_day)
plt.xticks(lth, wkd, rotation=90)
plt.title("Occurances of Heavy Traffic by Day of the Week")
plt.ylabel("Heavy Traffic Reports")
plt.show()
# +
# Delay Reasons on a particular weekdays
wkd = ["Monday", "Tuesday", "Wednesday", "Thursday", "Friday"]#, "Saturday", "Sunday"]
nrows = 4
ncols = 3
fig, axes = plt.subplots(nrows=nrows, ncols=ncols, figsize=(16, 6), dpi=80)
plt.suptitle("Occurance of Delay Reason by Weekday")
i = 1
for rsn in df["Reason"].unique():
reason_day = df[df["Reason"] == rsn].groupby(
df["Occurred_On"].map(lambda t: t.weekday())).size()
lth = range(len(reason_day))
plt.subplot(nrows,ncols,i)
plt.bar(lth, reason_day)
plt.xticks(lth, wkd, rotation=90)
#plt.title("Occurances of {r} by Day of the Week".format(r=rsn))
plt.ylabel(str(rsn))
i+=1
# delete empty axes
# axes[5,0].set_axis_off()
plt.show()
# -
| NYC_Bus_Analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import cv2
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def plot3d(pixels, colors_rgb,
axis_labels=list("RGB"), axis_limits=((0, 255), (0, 255), (0, 255))):
"""Plot pixels in 3D."""
# Create figure and 3D axes
fig = plt.figure(figsize=(8, 8))
ax = Axes3D(fig)
# Set axis limits
ax.set_xlim(*axis_limits[0])
ax.set_ylim(*axis_limits[1])
ax.set_zlim(*axis_limits[2])
# Set axis labels and sizes
ax.tick_params(axis='both', which='major', labelsize=14, pad=8)
ax.set_xlabel(axis_labels[0], fontsize=16, labelpad=16)
ax.set_ylabel(axis_labels[1], fontsize=16, labelpad=16)
ax.set_zlabel(axis_labels[2], fontsize=16, labelpad=16)
# Plot pixel values with colors given in colors_rgb
ax.scatter(
pixels[:, :, 0].ravel(),
pixels[:, :, 1].ravel(),
pixels[:, :, 2].ravel(),
c=colors_rgb.reshape((-1, 3)), edgecolors='none')
return ax # return Axes3D object for further manipulation
# Read a color image
img = cv2.imread("000275.png")
# Select a small fraction of pixels to plot by subsampling it
scale = max(img.shape[0], img.shape[1], 64) / 64 # at most 64 rows and columns
img_small = cv2.resize(img, (np.int(img.shape[1] / scale), np.int(img.shape[0] / scale)), interpolation=cv2.INTER_NEAREST)
# Convert subsampled image to desired color space(s)
img_small_RGB = cv2.cvtColor(img_small, cv2.COLOR_BGR2RGB) # OpenCV uses BGR, matplotlib likes RGB
img_small_HSV = cv2.cvtColor(img_small, cv2.COLOR_BGR2HSV)
img_small_rgb = img_small_RGB / 255. # scaled to [0, 1], only for plotting
# Plot and show
plot3d(img_small_RGB, img_small_rgb)
plt.show()
plot3d(img_small_HSV, img_small_rgb, axis_labels=list("HSV"))
plt.show()
| Object Detection/Color_Spaces.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Convolutional variational autoencoder with PyMC3 and Keras
# In this document, I will show how autoencoding variational Bayes (AEVB) works in PyMC3's automatic differentiation variational inference (ADVI). The example here is borrowed from [Keras example](https://github.com/fchollet/keras/blob/master/examples/variational_autoencoder_deconv.py), where convolutional variational autoencoder is applied to the MNIST dataset. The network architecture of the encoder and decoder are the same. However, PyMC3 allows us to define a probabilistic model, which combines the encoder and decoder, in the same way as other probabilistic models (e.g., generalized linear models), rather than directly implementing of Monte Carlo sampling and the loss function, as is done in the Keras example. Thus the framework of AEVB in PyMC3 can be extended to more complex models such as [latent dirichlet allocation](https://taku-y.github.io/notebook/20160928/lda-advi-ae.html).
# - Notebook Written by <NAME> (c) 2016
# To use Keras with PyMC3, we need to choose [Theano](http://deeplearning.net/software/theano/) as the backend for Keras.
# +
# %autosave 0
# %env KERAS_BACKEND=theano
# %env THEANO_FLAGS=device=cuda3,floatX=float32,optimizer=fast_run
import os
import sys
from collections import OrderedDict
import arviz as az
import keras
import matplotlib
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import numpy as np
import pymc3 as pm
import theano.tensor as tt
from keras import backend as K
from keras.layers import (Activation, BatchNormalization, Conv2D, Deconv2D,
Dense, Flatten, InputLayer, Reshape)
from theano import clone, config, function, pp, shared
print(f'Running on PyMC3 v{pm.__version__}')
# -
# %config InlineBackend.figure_format = 'retina'
az.style.use('arviz-darkgrid')
K.set_image_dim_ordering('th')
# ## Load images
# MNIST dataset can be obtained by [scikit-learn API](http://scikit-learn.org/stable/datasets/) or from [Keras datasets](https://keras.io/datasets/). The dataset contains images of digits.
# +
from keras.datasets import mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
data = pm.floatX(x_train.reshape(-1, 1, 28, 28))
data /= np.max(data)
# -
# ## Use Keras
# We define a utility function to get parameters from Keras models. Since we have set the backend to Theano, parameter objects are obtained as shared variables of Theano.
#
# In the code, 'updates' are expected to include update objects (dictionary of pairs of shared variables and update equation) of scaling parameters of batch normalization. While not using batch normalization in this example, if we want to use it, we need to pass these update objects as an argument of `theano.function()` inside the PyMC3 ADVI function. The current version of PyMC3 does not support it, it is easy to modify (I want to send PR in future).
#
# The learning phase below is used for Keras to known the learning phase, training or test. This information is important also for batch normalization.
# +
from keras.layers import BatchNormalization, Dense
from keras.models import Sequential
def get_params(model):
"""Get parameters and updates from Keras model
"""
shared_in_updates = list()
params = list()
updates = dict()
for l in model.layers:
attrs = dir(l)
# Updates
if 'updates' in attrs:
updates.update(l.updates)
shared_in_updates += [e[0] for e in l.updates]
# Shared variables
for attr_str in attrs:
attr = getattr(l, attr_str)
if isinstance(attr, tt.compile.SharedVariable):
if attr is not model.get_input_at(0):
params.append(attr)
return list(set(params) - set(shared_in_updates)), updates
# This code is required when using BatchNormalization layer
keras.backend.theano_backend._LEARNING_PHASE = \
shared(np.uint8(1), name='keras_learning_phase')
# -
# ## Encoder and decoder
# First, we define the convolutional neural network for encoder using the Keras API. This function returns a CNN model given the shared variable representing observations (images of digits), the dimension of latent space, and the parameters of the model architecture.
def cnn_enc(xs, latent_dim, nb_filters=64, nb_conv=3, intermediate_dim=128):
"""Returns a CNN model of Keras.
Parameters
----------
xs : theano.TensorVariable
Input tensor.
latent_dim : int
Dimension of latent vector.
"""
input_layer = InputLayer(input_tensor=xs,
batch_input_shape=xs.tag.test_value.shape)
model = Sequential()
model.add(input_layer)
cp1 = {'padding': 'same', 'activation': 'relu'}
cp2 = {'padding': 'same', 'activation': 'relu', 'strides': (2, 2)}
cp3 = {'padding': 'same', 'activation': 'relu', 'strides': (1, 1)}
cp4 = cp3
model.add(Conv2D(1, (2, 2), **cp1))
model.add(Conv2D(nb_filters, (2, 2), **cp2))
model.add(Conv2D(nb_filters, (nb_conv, nb_conv), **cp3))
model.add(Conv2D(nb_filters, (nb_conv, nb_conv), **cp4))
model.add(Flatten())
model.add(Dense(intermediate_dim, activation='relu'))
model.add(Dense(2 * latent_dim))
return model
# Then we define a utility class for encoders. This class does not depend on the architecture of the encoder except for input shape (`tensor4` for images), so we can use this class for various encoding networks.
class Encoder:
"""Encode observed images to variational parameters (mean/std of Gaussian).
Parameters
----------
xs : theano.tensor.sharedvar.TensorSharedVariable
Placeholder of input images.
dim_hidden : int
The number of hidden variables.
net : Function
Returns
"""
def __init__(self, xs, dim_hidden, net):
model = net(xs, dim_hidden)
self.model = model
self.xs = xs
self.out = model.get_output_at(-1)
self.means = self.out[:, :dim_hidden]
self.rhos = self.out[:, dim_hidden:]
self.params, self.updates = get_params(model)
self.enc_func = None
self.dim_hidden = dim_hidden
def _get_enc_func(self):
if self.enc_func is None:
xs = tt.tensor4()
means = clone(self.means, {self.xs: xs})
rhos = clone(self.rhos, {self.xs: xs})
self.enc_func = function([xs], [means, rhos])
return self.enc_func
def encode(self, xs):
# Used in test phase
keras.backend.theano_backend._LEARNING_PHASE.set_value(np.uint8(0))
enc_func = self._get_enc_func()
means, _ = enc_func(xs)
return means
def draw_samples(self, xs, n_samples=1):
"""Draw samples of hidden variables based on variational parameters encoded.
Parameters
----------
xs : numpy.ndarray, shape=(n_images, 1, height, width)
Images.
"""
# Used in test phase
keras.backend.theano_backend._LEARNING_PHASE.set_value(np.uint8(0))
enc_func = self._get_enc_func()
means, rhos = enc_func(xs)
means = np.repeat(means, n_samples, axis=0)
rhos = np.repeat(rhos, n_samples, axis=0)
ns = np.random.randn(len(xs) * n_samples, self.dim_hidden)
zs = means + pm.distributions.dist_math.rho2sd(rhos) * ns
return zs
# In a similar way, we define the decoding network and a utility class for decoders.
def cnn_dec(zs, nb_filters=64, nb_conv=3, output_shape=(1, 28, 28)):
"""Returns a CNN model of Keras.
Parameters
----------
zs : theano.tensor.var.TensorVariable
Input tensor.
"""
minibatch_size, dim_hidden = zs.tag.test_value.shape
input_layer = InputLayer(input_tensor=zs,
batch_input_shape=zs.tag.test_value.shape)
model = Sequential()
model.add(input_layer)
model.add(Dense(dim_hidden, activation='relu'))
model.add(Dense(nb_filters * 14 * 14, activation='relu'))
cp1 = {'padding': 'same', 'activation': 'relu', 'strides': (1, 1)}
cp2 = cp1
cp3 = {'padding': 'valid', 'activation': 'relu', 'strides': (2, 2)}
cp4 = {'padding': 'same', 'activation': 'sigmoid'}
output_shape_ = (minibatch_size, nb_filters, 14, 14)
model.add(Reshape(output_shape_[1:]))
model.add(Deconv2D(nb_filters, (nb_conv, nb_conv), data_format='channels_first', **cp1))
model.add(Deconv2D(nb_filters, (nb_conv, nb_conv), data_format='channels_first', **cp2))
output_shape_ = (minibatch_size, nb_filters, 29, 29)
model.add(Deconv2D(nb_filters, (2, 2), data_format='channels_first', **cp3))
model.add(Conv2D(1, (2, 2), **cp4))
return model
class Decoder:
"""Decode hidden variables to images.
Parameters
----------
zs : Theano tensor
Hidden variables.
"""
def __init__(self, zs, net):
model = net(zs)
self.model = model
self.zs = zs
self.out = model.get_output_at(-1)
self.params, self.updates = get_params(model)
self.dec_func = None
def _get_dec_func(self):
if self.dec_func is None:
zs = tt.matrix()
xs = clone(self.out, {self.zs: zs})
self.dec_func = function([zs], xs)
return self.dec_func
def decode(self, zs):
"""Decode hidden variables to images.
An image consists of the mean parameters of the observation noise.
Parameters
----------
zs : numpy.ndarray, shape=(n_samples, dim_hidden)
Hidden variables.
"""
# Used in test phase
keras.backend.theano_backend._LEARNING_PHASE.set_value(np.uint8(0))
return self._get_dec_func()(zs)
# ## Generative model
# We can construct the generative model with the PyMC3 API and the functions and classes defined above. We set the size of mini-batches to 100 and the dimension of the latent space to 2 for visualization.
# Constants
minibatch_size = 100
dim_hidden = 2
# We require a placeholder for images, into which mini-batches of images will be placed during ADVI inference. It is also the input for the encoder. Below, `enc.model` is a Keras model of the encoder network and we can check the model architecture using the method `summary()`.
# Placeholder of images
xs_t = tt.tensor4(name='xs_t')
xs_t.tag.test_value = np.zeros((minibatch_size, 1, 28, 28)).astype('float32')
# Encoder
enc = Encoder(xs_t, dim_hidden, net=cnn_enc)
enc.model.summary()
# The probabilistic model involves only two random variables; latent variable $\mathbf{z}$ and observation $\mathbf{x}$. We put a Normal prior on $\mathbf{z}$, decode the variational parameters of $q(\mathbf{z}|\mathbf{x})$ and define the likelihood of the observation $\mathbf{x}$.
with pm.Model() as model:
# Hidden variables
zs = pm.Normal('zs', mu=0, sigma=1, shape=(minibatch_size, dim_hidden), dtype='float32', total_size=len(data))
# Decoder and its parameters
dec = Decoder(zs, net=cnn_dec)
# Observation model
xs_ = pm.Normal('xs_', mu=dec.out, sigma=0.1, observed=xs_t, dtype='float32', total_size=len(data))
# In the generative model above, we do not know how the decoded variational parameters are passed to $q(\mathbf{z}|\mathbf{x})$. To do this, we will set the argument `local_RVs` in the ADVI function of PyMC3.
local_RVs = OrderedDict({zs: dict(mu=enc.means, rho=enc.rhos)})
# This argument is an `OrderedDict` whose keys are random variables to which the decoded variational parameters are set (`zs` in this model). Each value of the dictionary contains two Theano expressions representing variational mean (`enc.means`) and rhos (`enc.rhos`). A scaling constant (`len(data) / float(minibatch_size)`) is set automatically (as we specified it in the model saying what's the `total_size`) to compensate for the size of mini-batches of the corresponding log probability terms in the evidence lower bound (ELBO), the objective of the variational inference.
#
# The scaling constant for the observed random variables is set in the same way.
# We can also check the architecture of the decoding network, as we did for the encoding network.
dec.model.summary()
# ## Inference
# Let's use ADVI to fit the model.
# +
# In memory Minibatches for better speed
xs_t_minibatch = pm.Minibatch(data, minibatch_size)
with model:
approx = pm.fit(
15000,
local_rv=local_RVs,
more_obj_params=enc.params + dec.params,
obj_optimizer=pm.rmsprop(learning_rate=0.001),
more_replacements={xs_t:xs_t_minibatch},
)
# -
# ## Results
# We can plot the trace of the negative ELBO obtained during optimization, to verify convergence.
plt.plot(approx.hist);
# Finally, we can plot the distribution of the images in the latent space. To do this, we make a 2-dimensional grid of points and feed them into the decoding network. The mean of $p(\mathbf{x}|\mathbf{z})$ is the image corresponding to the samples on the grid.
nn = 10
zs = np.array([(z1, z2)
for z1 in np.linspace(-2, 2, nn)
for z2 in np.linspace(-2, 2, nn)]).astype('float32')
xs = dec.decode(zs)[:, 0, :, :]
xs = np.bmat([[xs[i + j * nn] for i in range(nn)] for j in range(nn)])
matplotlib.rc('axes', **{'grid': False})
plt.figure(figsize=(10, 10))
plt.imshow(xs, interpolation='none', cmap='gray')
plt.show()
# %load_ext watermark
# %watermark -n -u -v -iv -w
| docs/source/notebooks/convolutional_vae_keras_advi.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <table width = "100%">
# <tr style="background-color:white;">
# <!-- QWorld Logo -->
# <td style="text-align:left;width:200px;">
# <img src="../images/QWorld.png"> </td>
# <td style="text-align:right;vertical-align:bottom;font-size:16px;">
# Prepared by <a href="https://gitlab.com/pjr1363" target="_blank"> <NAME> </a></td>
# </tr>
# </table>
#
# <hr>
# ## Main Header
# Some text
# +
#Some code
# -
# ### Sub header
#
#
# Some text
# ### Task 1
#
# Task 1 content
# [click for our solution](Template_Notebook_Solutions.ipynb#task1)
| notebooks/.ipynb_checkpoints/Template_Notebook-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.5 (''.venv'': venv)'
# name: pythonjvsc74a57bd0d11f71130a82a6d116270efeb273ed9978c03859f680f986652fa78eeb835ba9
# ---
from keras.models import model_from_json
import numpy as np
import os
import cv2
json_file = open('model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights("model.h5")
print("Loaded model from disk")
img = cv2.imread('./NoFilterModeDataSet/Aboard1.png')
img = np.expand_dims(img, axis=0)
prediction = loaded_model.predict(img)
np.argmax(prediction)
len(prediction[0])
predictions = []
for i in range(1,101):
img = cv2.imread(f"./NoFilterModeDataSet/Aboard{i}.png")
img = np.expand_dims(img, axis=0)
predictions.append(np.argmax(loaded_model.predict(img)))
from collections import Counter
Counter(predictions)
| main.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from scipy.stats import norm
from time import sleep
import os
# -
# ## Ler arquivo .csv(excel)
filepath = 'C:/Users/victo/Desktop/university/Courses/reconhecimento_de_padrao/Markov_Python/content/embarcacoes/'
df = pd.read_csv(filepath+'embacacoes.csv')
# ## Selecionar trajeto e variavel que será analisada
N = 20 #selecione o trajeto
M = df.values #matriz que contem todas as informacoes
pos = np.argwhere(M[:,12] == N)# representa a posicao do barco N
#M = M[pos[:,0],:] #o vetor matriz sera contera apenas
s = M[0:,7] #vetor de velocidade do trajeto N
s = np.array([s]).T
# +
k = 2 #número de gaussianas
[N, Dim] = s.shape
aux=np.random.choice(N, N, replace=False)
w = s[aux[0:k],:]
for j in range(0,5):
rot = np.array([])
for n in range(0, N):
rot = np.append(rot, np.argmin(sum(pow(np.ones((k,1))*s[n, :]-w, 2).T)) )
for m in range(0, k):
w[m,:] = np.mean(s[np.where(rot == m), :], axis=1)
del aux, j, n, m
# -
c = np.zeros((k,Dim,Dim))
for i in range(0, k):
aux = s[np.where(rot == i)[0],:] - w[i, :]
c[i,:,:] = np.dot(aux.T, aux)/N
del aux, i
# +
# %matplotlib notebook
fig = plt.figure()
ax = fig.add_subplot(111)
plt.ion()
likehood = []
fig.show()
fig.canvas.draw()
for l in range(0,1):
P = np.zeros((N,k))
for j in range(0,N):
for i in range(0,k):
temp1 = 1/(pow(2*np.pi,Dim/2)*np.linalg.det(c[i,:, :]))
temp2 = np.dot(s[j,:]-w[i,:], np.dot(np.linalg.inv(c[i,:,:]),(s[j,:]-w[i,:])))
pi = 1/4*np.ones((k,1));
P[j,i] = pi[i,0]*temp1*np.exp(-1/2*temp2)
del temp1, temp2, i, j
L = P
den = np.sum(L, axis = 1)
for j in range(0, k):
for n in range(0, N):
L[n,j] = P[n,j]/den[n]
del n, j
Nk = np.sum(L, axis = 0)
uknovo = np.zeros((w.shape))
uknovo[0,:] = 1/Nk[0]*np.sum(L[:,0]*s.T,axis = 1)
uknovo[1,:] = 1/Nk[1]*np.sum(L[:,1]*s.T,axis = 1)
#ajustar covariância
cknovo = np.zeros((k,Dim,Dim))
for i in range(0, k):#len(u)):
aux = s - uknovo[i, :]
cknovo[i,:,:] = np.dot(L[:,i]*aux.T, aux)/Nk[i]
del aux
w = uknovo
c = cknovo
likehood = np.append(likehood,sum(np.log(np.sum(L, axis=1))))
rot = np.zeros((N))
for n in range(0, N):
rot[n] = int(np.argmax(L[n,:])+1)
ax.clear()
ax.plot(s[np.where(rot == 1)[0], 0], '*b')
ax.plot(s[np.where(rot == 2)[0], 0], '*r')
fig.canvas.draw()
# -
# %matplotlib notebook
plt.plot(M[np.where(rot == 1),10].T, M[np.where(rot == 1),9].T, 'b*')
plt.plot(M[np.where(rot == 2),10].T, M[np.where(rot == 2),9].T, 'r*')
# +
pos_f = np.argwhere(rot == 1)
pos_c = np.argwhere(rot == 2)
Uc_t , Nc_t= np.unique(rot[pos_c[:-1,0]+1], return_counts = True)
Uf_t , Nf_t= np.unique(rot[pos_f[:-2,0]+1], return_counts = True)
# %matplotlib widget
plt.plot(Uc_t, Nc_t/(np.sum(Nf_t)+np.sum(Nc_t)), '*r')
plt.plot(Uf_t, Nf_t/(np.sum(Nf_t)+np.sum(Nc_t)), '*b')
Nc_t/(+np.sum(Nc_t)),Nf_t/(np.sum(Nf_t))
a = np.array([Nf_t/(np.sum(Nf_t)),Nc_t/(+np.sum(Nc_t))])
# +
# %matplotlib widget
t = np.linspace(0,21,211)
g1 = 1/(c[0]*np.sqrt(2*np.pi))*np.exp(-1/2*pow((t-w[0])/c[0], 2))
g2 = 1/(c[1]*np.sqrt(2*np.pi))*np.exp(-1/2*pow((t-w[1])/c[1], 2))
plt.plot(t,g1.T/np.sum(g1),'r')
plt.plot(t,g2.T/np.sum(g2),'b')
# -
b = np.array([g1[0,:].T/np.sum(g1),g2[0,:].T/np.sum(g2)])
b.shape
b = pd.DataFrame(b.T,columns=["cruzeiro","pesca" ])
b.to_csv (r'C:/Users/victo/Desktop/university/Courses/reconhecimento_de_padrao/projeto/fishing_finder/content/distribution/b.csv', index = False, header=True, encoding='utf-8')
a = pd.DataFrame(a)
a.to_csv (r'C:/Users/victo/Desktop/university/Courses/reconhecimento_de_padrao/projeto/fishing_finder/content/distribution/a.csv', index = False, header=True, encoding='utf-8')
# # Ler distribuição de probabilidade inicial das velocidades
filepath = 'C:/Users/victo/Desktop/university/Courses/reconhecimento_de_padrao/projeto/fishing_finder/content/distribution/'
a = pd.read_csv(filepath+'a.csv')#probabilidade de mudar de estado
b = pd.read_csv(filepath+'b.csv')#probabilidade dos eventos de velocidade
a = a.values
b = b.values
pi = np.array([0,1])#variavel da probabilidade
# ### distribuição inicial das velocidades
# %matplotlib notebook
plt.plot(x,b[:,1])
plt.plot(x,b[:,0])
len(b)
# +
# %matplotlib notebook
fig = plt.figure()
ax1 = fig.add_subplot(211)
ax2 = fig.add_subplot(212)
plt.ion()
fig.show()
fig.canvas.draw()
erro = np.array([])
for z in range(0,10):
###################################### Algoritmo Foward #########################
alpha = np.zeros((2,len(s)))
eta = np.zeros(len(s))
alpha[:,0] = pi*b[int(s[0]*10),:]
eta[0] = np.sum(alpha[:,0])
alpha[:,0] = alpha[:,0]/eta[0]
#print(alpha[:,0])
for T in range(0, len(s)-1):
for j in range(0,2):
for i in range(0,2):
alpha[j,T+1] = alpha[j,T+1] + alpha[i,T]*a[i,j]*b[int(s[T+1]*10),j]#
eta[T+1] = np.sum(alpha[:,T+1])
alpha[:,T+1] = alpha[:,T+1]/eta[T+1]
############################ algoritmo Backward ########################
beta = np.zeros((2,len(s)))
beta[1,-1] = 1
del i, j
for T in range(len(s)-2, -1, -1):
for i in range(0,2):
for j in range(0,2):
beta[i,T] = beta[i,T] + beta[j,T+1]*a[i,j]*b[int(s[T+1]*10),j]#
beta[:,T] = beta[:,T]/eta[T+1]
del i, j
#################################### Calculo do Gamma ##############################################
gamma = np.zeros((2,len(s)))
for t in range(0,len(s)):
den = 0
den = np.sum(alpha[:,t]*beta[:,t])
for i in range(0,2):
gamma[i, t] = alpha[i, t]*beta[i, t]/den
################################### calculo do Zeta####################################
zeta = np.zeros((2,2, len(s)-1 ))
for t in range(0, len(s)-1):
for i in range(0,2):
for j in range(0,2):
zeta[i, j, t] = alpha[i,t]*a[i,j]*b[int(s[t+1]*10),j]*beta[j, t+1]/(eta[t+1]*np.sum(alpha[:,t]*beta[:,t]))
temp1 = np.sum(zeta, axis = 2)
temp2 = np.sum(zeta, axis = 1)
a_new = np.zeros((2,2))
for i in range(0,2):
for j in range(0,2):
a_new[i, j] = temp1[i, j]/np.sum(temp1, axis = 1)[i]
erro = np.append(erro, np.sum(np.abs(a-a_new)))
a = np.copy(a_new)
#########calculo das distribuicoes de probabilidade#################################################
u1 = np.sum(gamma[0,:]*s[:,0])/np.sum(gamma[0,:])
u2 = np.sum(gamma[1,:]*s[:,0])/np.sum(gamma[1,:])
c1 = np.sum(gamma[0,:]*abs(s[:,0]-u1))/np.sum(gamma[0,:])
c2 = np.sum(gamma[1,:]*abs(s[:,0]-u2))/np.sum(gamma[1,:])
x = np.arange(0,21.1,0.1)
g1 = 1/(c1*np.sqrt(2*np.pi))*np.exp(-1/2*pow((x-u1)/c1, 2))
g2 = 1/(c2*np.sqrt(2*np.pi))*np.exp(-1/2*pow((x-u2)/c2, 2))
b = np.array([g1/np.sum(g1),g2/np.sum(g2)]).T
pi = gamma[:,0]
#################################################Plot######################################################
ax1.clear()
ax2.clear()
ax1.plot(x,b[:,0], 'r')
ax1.plot(x,b[:,1], 'b')
ax2.plot(erro)
fig.canvas.draw()
# -
a
rot = np.zeros(len(gamma.T))
for i in range(0,len(gamma.T)):
if(gamma[0,i] > gamma[1,i]) :
rot[i] = 1;
else:
rot[i] = 0
# +
# %matplotlib notebook
plt.plot(M[np.where(rot == 1),10].T, M[np.where(rot == 1),9].T, 'b*')
plt.plot(M[np.where(rot == 0),10].T, M[np.where(rot == 0),9].T, 'r*')
# -
rot = np.array([rot])
rot.shape
# +
v = np.array([M[:,9], M[:,10], rot[0,:]])
pd.DataFrame(v[:,pos[:,0]])
pos = np.argwhere(M[:, -1] == 19)
v = v[:,pos[:,0]]
# %matplotlib notebook
plt.plot(v[0,np.where(v[2,:] == 1)].T, v[1,np.where(v[2,:] == 1)].T, '*b')
plt.plot(v[0,np.where(v[2,:] == 0)].T, v[1,np.where(v[2,:] == 0)].T, '*r')
plt.savefig("filepath.svg", format = 'svg', dpi=1000)
# +
# %matplotlib notebook
s = M[0:,7] #vetor de velocidade do trajeto N
s = np.array([s]).T
s = s[pos[:,0]]
plt.plot(s[np.where(v[2,:] == 1)], '*r')
plt.plot(s[np.where(v[2,:] == 0)], '*b')
# -
| jupyter/.ipynb_checkpoints/final_file-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Adaptive step-size selection for ODE filters
#
#
# In ProbNum the step size adaptation scheme from [Schober et al., 2019](https://arxiv.org/pdf/1610.05261.pdf) respectively [Bosch et al., 2021](https://arxiv.org/pdf/2012.08202.pdf) is implemented.
#
# +
# Make inline plots vector graphics instead of raster graphics
# %matplotlib inline
from IPython.display import set_matplotlib_formats
set_matplotlib_formats('pdf', 'svg')
# Plotting
import matplotlib.pyplot as plt
plt.style.use('../../probnum.mplstyle')
# -
import numpy as np
from probnum import randvars
from probnum.diffeq import probsolve_ivp
# We begin by defining an ODE: the usual Lotka-Volterra problem.
# +
def f(t, y):
y1, y2 = y
return np.array([0.5 * y1 - 0.05 * y1 * y2, -0.5 * y2 + 0.05 * y1 * y2])
def df(t, y):
y1, y2 = y
return np.array([[0.5 - 0.05 * y2, -0.05 * y1], [0.05 * y2, -0.5 + 0.05 * y1]])
t0 = 0.
tmax = 20.
y0 = np.array([20, 20])
# -
# ### IBM(1) with EK0
#
# We start by showing how solving an IVP with adaptive steps works.
# + tags=["nbsphinx-thumbnail"]
sol = probsolve_ivp(f, t0, tmax, y0, df=df, algo_order=4, method="EK1")
evalgrid = np.linspace(t0, tmax, 200)
means = sol(evalgrid).mean
plt.plot(evalgrid, means[:, 0], label="Prey")
plt.plot(evalgrid, means[:, 1], label="Predators")
plt.legend(loc="upper right")
plt.show()
# -
# Lets visualise the individual steps.
plt.plot(evalgrid, means[:, 0], label="Prey")
plt.plot(evalgrid, means[:, 1], label="Predators")
for t in sol.locations:
plt.axvline(t, linewidth=0.5, color="gray")
plt.legend(loc="upper right")
plt.show()
# Note how more steps are taken near the peaks.
# ## The same, for a few priors
#
# Let's consider how other priors fare in this setting.
# +
algo_orders = [2, 3]
filters = ["EK0", "EK1"]
fig, ax = plt.subplots(
nrows=len(algo_orders), ncols=len(filters), sharex=True, sharey=True, figsize=(12, 12)
)
evalgrid = np.linspace(t0, tmax, 100)
for idx in range(len(algo_orders)): # algo_orders in rows
for jdx in range(len(filters)): # filters in cols
sol = probsolve_ivp(f, t0, tmax, y0, df=df,
algo_order=algo_orders[idx],
method=filters[jdx],
)
solution = sol(evalgrid)
ts, means, stds = evalgrid, solution.mean, solution.std
ax[idx][jdx].plot(ts, means)
for t in sol.locations:
ax[idx][jdx].axvline(t, linewidth=0.2, color="black")
ax[idx][jdx].set_title(f"Order {algo_orders[idx]} w/{filters[jdx]}")
ax[idx][jdx].set_xlim((t0, tmax))
ax[idx][jdx].set_ylim((-10, 35))
plt.show()
# -
# They all seem to capture the true solution fairly well. The higher order takes significantly fewer steps than the lower order.
| docs/source/tutorials/odes/adaptive_steps_odefilter.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Day_26. Nested Logic
#
#
# <br>
#
# 
# +
ad, am, ay = map(int, input().split())
ed, em, ey = map(int, input().split())
d = ad - ed
m = am - em
y = ay - ey
if y > 0: print(10000)
elif y < 0: print(0)
elif m > 0: print(500 * m)
elif m < 0: print(0)
elif d > 0: print(15 * d)
elif d < 0: print(0)
# +
ad, am, ay = map(int, input().split())
ed, em, ey = map(int, input().split())
if (ay, am, ad) <= (ey, em, ed):
print(0)
elif ay > ey:
print(10000)
elif am > em:
print(500 * (am - em))
else:
print(15 * (ad - ed))
| Learn to code in 30 days/Day_26. Nested Logic.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
import json
import h5py
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
# -
data = h5py.File("../datasets/fcnet_tabular_benchmarks/fcnet_naval_propulsion_data.hdf5", 'r')
configs = []
for k_str in data.keys():
configs.append(json.loads(k_str))
df = pd.DataFrame(configs)
df
data = df.stack()
data.name = "test"
data.reset_index()
g = sns.pairplot(df)
data[k_str].keys()
data[k_str]['final_test_error']
data[k_str]['valid_mse']
| scratch/FCNet Tabular Benchmarks-Copy1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/adholmgren/tensorflow_playground/blob/master/tf_dataset.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="RIhbCtak8CYS" colab_type="text"
# Copyright 2020, <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# + [markdown] id="skZshN7Y85YR" colab_type="text"
# # A practical guide to the Dataset class in tensorflow
# The tensorflow Dataset class can streamline your machine learning application, but I've personally found many times when the documentation could benefit from some more examples or some more exposition. This notebook will hopefully serve as a guide, or at the very least a supplement, to the tensorflow documentation. An intermediate level of Python knowledge is assumed but not necessarily required, some concepts are best communicated with intermediate Python knowledge. Basic Python is required.
# + id="csllqN8Jpjar" colab_type="code" colab={}
try:
# # %tensorflow_version only exists in Colab.
# %tensorflow_version 2.x
except Exception:
# !pip install --quiet tensorflow-gpu>=2.0.0
# + id="6D8PWnkzlE7u" colab_type="code" colab={}
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import tensorflow.keras as K
# tf.enable_eager_execution()
# + [markdown] id="cW6XGcP6nSWy" colab_type="text"
# # Build dataset from arrays
# + [markdown] id="sD-rO2aUGZwS" colab_type="text"
# If the data comes in as arrays it's pretty simple to get the data into a tensorflow dataset using the `from_tensor_slices` method.
# + id="xbAEiXD_lz12" colab_type="code" colab={}
arr = np.array([7, 2, 1, 6, 3, 5, 9])
dataset = tf.data.Dataset.from_tensor_slices(arr)
# + [markdown] id="uPMpOJcg4V1a" colab_type="text"
# The tensor slices assumes the first dimension of the array is the dimension of new instances. For example, in the last code block it consumed the (7,) array and generated 7 instances of () shaped Tensors. As another example, the following code will consume 2 instances of 2x2 arrays.
# + id="8C1H8HnD4WWb" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 126} outputId="3e2b51ae-f20a-4742-f428-c939d9a49a6e"
dataset_2D = tf.data.Dataset.from_tensor_slices(np.reshape(np.arange(2**3), (2, 2, 2)))
for elem in dataset_2D:
print(elem)
# + [markdown] id="L8FUm9JnEAbp" colab_type="text"
# In comparison, this code will give just one instance of a 2x2x2 array.
# + id="I0izNSFjD5d9" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 126} outputId="16ac13fd-ddfc-4911-8804-f8517151ff84"
dataset_3D = tf.data.Dataset.from_tensor_slices(np.reshape(np.arange(2**3), (1, 2, 2, 2)))
for elem in dataset_3D:
print(elem)
# + [markdown] id="jg8kpRN0EGSh" colab_type="text"
# **Test yourself**: build a dataset with 3 instances of a 3x3 array. Build 1 instance of a 3x10 array.
# + id="JUl7LP1wQifP" colab_type="code" colab={}
# 3 instances of 3x3 array
# code here
# 1 instance of a 3x10 array
# code here
# + [markdown] id="2dicUb1SQzP4" colab_type="text"
# Personally, I feel this next method is a bit redundant (I haven't looked into the source code, there could some slightly different optimization), but there's also a `from_tensor` method that creates a single instance. I think the context that makes the most sense for this method is testing at inference.
# + id="wqIVSGELQyUX" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 126} outputId="295d971c-9f27-431d-ac68-d5d75e182465"
dataset_3D = tf.data.Dataset.from_tensors(np.reshape(np.arange(2**3), (2, 2, 2)))
for elem in dataset_3D:
print(elem)
# + [markdown] id="C0LdGDFtUwaB" colab_type="text"
# You can see that this is the same thing as using
# ```Python
# dataset_3D = tf.data.Dataset.from_tensor_slices(np.reshape(np.arange(2**3), (1, 2, 2, 2)))
# ```
# Basically, if your array is a single instance this is a way for the Dataset class to consume the single instance.
# + [markdown] id="bPeOb7imIPUt" colab_type="text"
# # Using methods in the dataset class
# + [markdown] id="kqAg3cZPRfZ8" colab_type="text"
# ### Dataset as an iterator
# + [markdown] id="VcISuxtRu19b" colab_type="text"
# Just like most Python objects, the dataset class has an iterator. In fact, the fundamental point of the dataset class is that its very essence is to be an iterator. All the methods within the dataset class either instantiate the iterator (e.g. `from_tensor_slices`), modify the iterator (e.g. `map`), or control the iterator (e.g. `batch`). The Dataset class gives users flexibility to control the memory and processing in flowing data inputs to Tensorflow's machine learning models. Most people can, and probably want to, stop there -- the dataset is fundamentally an iterator similar to Python's range, or numpy arrays, or a thousand other Python objects that iterate.
#
# For anyone interested in peaking a bit more into the nitty gritties read on. If you're not familiar with Python iterators and how they're built, here's a pretty good guide ([link](https://www.ics.uci.edu/~pattis/ICS-33/lectures/iterators.txt)). To really find out what's happening with the dataset iterator you have to go to the source code. The first thing that the source code lets you know is that it implements the Python iterator protocol and therefore can only be used in eager mode. This comment actually has the potential to cause confusion because the Dataset class can be used with a static graph, it will just function differently. An important consequence of eager vs. static execution, is that in static graph the entirety of arrays are placed into the static graph as Variables (potentially taking up a lot of memory and running into byte limits in graph serialization).
# ```Python
# def __iter__(self):
# """Creates an `Iterator` for enumerating the elements of this dataset.
# The returned iterator implements the Python iterator protocol and therefore
# can only be used in eager mode.
# Returns:
# An `Iterator` over the elements of this dataset.
# Raises:
# RuntimeError: If not inside of tf.function and not executing eagerly.
# """
# if (context.executing_eagerly()
# or ops.get_default_graph()._building_function): # pylint: disable=protected-access
# return iterator_ops.OwnedIterator(self)
# else:
# raise RuntimeError("__iter__() is only supported inside of tf.function "
# "or when eager execution is enabled.")
# ```
# We then see that it uses the tensorflow iterator_ops for its iterator, so we can go to that source code. That source code points us to the [iterator_ops](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/data/ops/iterator_ops.py). Of which, the main elements are `_create_iterator` and `_next_internal`. These are effective the `__iter__` and `__next__` methods for the class. Those functions themselves have some more nitty gritties, but suffice to say that you can trace it far enough to reassure yourself that the iterator is moving through nests of the Tensor class. The gist of `_create_iterator` is that it has some pretty thorough robustness so you don't break it, but ultimately it is assigning pieces that fundamentally want to work with tensors. The `_next_internal` checks to see whether the backend is in eager_execution or static graph execution. In eager execution, where you'll most easily see the results, the next method is primarily concerned with returning tensor elements. There's a lot more tracking and integrating the core fundamentals than described thus far, but hopefully this at least provides a shallow insight into the class.
# ```Python
# def _create_iterator(self, dataset):
# # pylint: disable=protected-access
# dataset = dataset._apply_options()
#
# # Store dataset reference to ensure that dataset is alive when this iterator
# # is being used. For example, `tf.data.Dataset.from_generator` registers
# # a few py_funcs that are needed in `self._next_internal`. If the dataset
# # is deleted, this iterator crashes on `self.__next__(...)` call.
# self._dataset = dataset
#
# ds_variant = dataset._variant_tensor
# self._element_spec = dataset.element_spec
# self._flat_output_types = structure.get_flat_tensor_types(
# self._element_spec)
# self._flat_output_shapes = structure.get_flat_tensor_shapes(
# self._element_spec)
# with ops.colocate_with(ds_variant):
# self._iterator_resource, self._deleter = (
# gen_dataset_ops.anonymous_iterator_v2(
# output_types=self._flat_output_types,
# output_shapes=self._flat_output_shapes))
# gen_dataset_ops.make_iterator(ds_variant, self._iterator_resource)
# # Delete the resource when this object is deleted
# self._resource_deleter = IteratorResourceDeleter(
# handle=self._iterator_resource,
# device=self._device,
# deleter=self._deleter)
#
# def _next_internal(self):
# """Returns a nested structure of `tf.Tensor`s containing the next element.
# """
# if not context.executing_eagerly():
# with ops.device(self._device):
# ret = gen_dataset_ops.iterator_get_next(
# self._iterator_resource,
# output_types=self._flat_output_types,
# output_shapes=self._flat_output_shapes)
# return structure.from_compatible_tensor_list(self._element_spec, ret)
#
# # This runs in sync mode as iterators use an error status to communicate
# # that there is no more data to iterate over.
# # TODO(b/77291417): Fix
# with context.execution_mode(context.SYNC):
# with ops.device(self._device):
# # TODO(ashankar): Consider removing this ops.device() contextmanager
# # and instead mimic ops placement in graphs: Operations on resource
# # handles execute on the same device as where the resource is placed.
# ret = gen_dataset_ops.iterator_get_next(
# self._iterator_resource,
# output_types=self._flat_output_types,
# output_shapes=self._flat_output_shapes)
#
# try:
# # Fast path for the case `self._structure` is not a nested structure.
# return self._element_spec._from_compatible_tensor_list(ret) # pylint: disable=protected-access
# except AttributeError:
# return structure.from_compatible_tensor_list(self._element_spec, ret)
# ```
# + [markdown] id="oN_ZeO66Zz4F" colab_type="text"
# Here's the most common way Dataset iterator is used
# + id="wH3Ju6JQ8i96" colab_type="code" colab={}
# create dataset
arr = np.array([7, 2, 1, 6, 3, 5, 9])
dataset = tf.data.Dataset.from_tensor_slices(arr)
# + id="X22Omo5ktP1E" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 144} outputId="6bb2d589-36fe-44fa-c75c-0350b38bf805"
# loop through dataset elements
for elem in dataset:
print(elem)
# + [markdown] id="f-M2JnN9aET1" colab_type="text"
# If you only wanted a certain number of elements, you could even set conditional break in there.
# + id="zpyJP4GLaS-s" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 72} outputId="07c2529f-d45e-49b0-ec87-ac94fca490a6"
count = 0
n_elem = 3
for elem in dataset:
print(elem)
count += 1
if count >= n_elem:
break
# + [markdown] id="VvXu5tSW-diS" colab_type="text"
# Another way to see just the first few elements is to work with the bare iterator.
# + id="UQ3-rVgg-ZQF" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 72} outputId="a7c51cff-9b5f-4144-879b-741cb9701e5e"
ds_iter = iter(dataset)
print(f'first element: {next(ds_iter)}')
print(f'second element: {next(ds_iter)}')
print(f'third element: {next(ds_iter)}')
del ds_iter
# + [markdown] id="5xDKViKc9aH5" colab_type="text"
# If you want to know what the for loop is really doing, we could do the functionally same operation written as below. The key is that the for loop goes until it hits a StopIteration exception that is built into the class's `next` method.
# + id="GwkwEnyU9Y_6" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 144} outputId="0c2f54db-912b-4958-85ec-eaad0b41cf4e"
ds_iter = iter(dataset)
try:
while True:
elem = next(ds_iter)
print(elem)
except StopIteration:
pass
finally:
del ds_iter
# + [markdown] id="Ll95DKJQZt8L" colab_type="text"
# Not as concise, is it?
# + [markdown] id="aRvsKP79D1vA" colab_type="text"
# ## Take
# + [markdown] id="C7jElLnusN4g" colab_type="text"
# Take, grab, give, this could have been called a lot of things but, alas, it is called `take`. This method takes a count (integer) and creates a new dataset that will iterate through tensors from at most count elements. The statement to reiterate is take builds a new dataset, it does not extract elements. Another tip worth noting is that you can overcount the take count and when the dataset iterates it will just go to max number of elements in the dataset. Similarly, any negative number simply takes the entire dataset.
# + id="tmh7HNljsMZA" colab_type="code" colab={}
arr = np.array([7, 2, 1, 6, 3, 5, 9])
dataset = tf.data.Dataset.from_tensor_slices(arr)
# + id="4nI1q6O0trqB" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 108} outputId="ea3fdc73-ff7c-40f0-edd3-87332a9b38da"
ds_take = dataset.take(3)
print(list(ds_take.as_numpy_iterator()), "\n")
ds_take_more = dataset.take(1000)
print("take over count: ", list(ds_take_more.as_numpy_iterator()))
ds_take_neg = dataset.take(-8)
print("take negative count (take all): ", list(ds_take_neg.as_numpy_iterator()))
ds_take_none = dataset.take(0)
print("take zero count (none): ", list(ds_take_none.as_numpy_iterator()))
# + [markdown] id="YL5qQ_UivWCv" colab_type="text"
# A decent analogy for `take` would be a wrapper on a generator with a stop condition that makes the first generator stop earlier if the count is reached and keep going if not. Since the [true class](https://github.com/tensorflow/tensorflow/blob/e26286914b6d3cf7ed0c9a47f50c07391c2174a6/tensorflow/core/kernels/data/take_dataset_op.cc) is defined in C++, this analogy is merely illustrative.
# + id="COwMHtrQvUev" colab_type="code" colab={}
# Similar concept to Dataset.take
def foo_generator(): # base iterator like Dataset
for i in range(10):
yield i
def foo_take(count):
counter = 0
pity_the_foo = foo_generator()
run_all = count < 0
while True:
try:
if (counter >= count) and not run_all:
raise StopIteration
yield next(pity_the_foo)
counter += 1
except StopIteration:
break
# + id="Karmjjc9wsyu" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 90} outputId="e6cc36c3-4dd0-40ff-b85e-2750e3c007cf"
print(list(foo_take(5)))
print(list(foo_take(1000)))
print(list(foo_take(-1)))
print(list(foo_take(0)))
# + [markdown] id="ZFJMJMGzQMnu" colab_type="text"
# ## Map dataset
# + id="SNkftOO-8kyR" colab_type="code" colab={}
arr = np.array([7, 2, 1, 6, 3, 5, 9])
dataset = tf.data.Dataset.from_tensor_slices(arr)
# + [markdown] id="g72VAcA9QWtV" colab_type="text"
# You can use map to apply a function to the dataset. For example, if you want to add 1 to to every element of the dataset you could do
# + id="jZ1iIrvVmCR6" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 162} outputId="7b449f2f-3222-4ff6-dd06-2fe391a94d70"
def add_one(x): # note: easily done anonymously lambda x: x + 1
return x + 1
dp1 = dataset.map(add_one)
print("dataset ", "mapped dataset")
for ds_elem, elem in zip(dataset, dp1):
print(ds_elem.numpy(), "\t", elem.numpy())
# + [markdown] id="vyV1gQn_Qng0" colab_type="text"
# As another example of a mapping function, we can make a function that one-hot encodes values corresponding to a class index.
# + id="MR0jPfHLszlV" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 144} outputId="7cd79766-2d87-4d3f-ab81-d2b4fccbce08"
def one_hot_encode(x, max_index):
tensor_zeros = tf.zeros(max_index, dtype=tf.int32)
tensor_ones = tf.ones(max_index, dtype=tf.int32)
tensor_range = tf.range(max_index, dtype=tf.int32)
# dataset defaulted to int64 type, hence the casting, types may vary by context
one_hot_array = tf.where(tensor_range == tf.cast(x, tf.int32), tensor_ones, tensor_zeros)
return one_hot_array
ds_one_hot = dataset.map(lambda x: one_hot_encode(x, tf.constant(10, dtype=tf.int32)))
for elem in ds_one_hot:
print(elem)
# + [markdown] id="gWjBfpr4pW88" colab_type="text"
# Alternatively, rather than manually making a one hot encoding function you could just use tensorflows built-in one_hot function.
# + id="BHXuwuGCjcyh" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 144} outputId="bc43e8ee-5469-4f61-d047-c5ba628e719e"
ds_one_hot = dataset.map(lambda x: tf.one_hot(x, tf.constant(10, dtype=tf.int32), dtype=tf.int32))
for elem, one_hot_elem in zip(dataset, ds_one_hot):
print(f'category index {elem.numpy()}, one_hot_encoding {one_hot_elem.numpy()}')
# + [markdown] id="h_Wyy0xdAUgN" colab_type="text"
# To really do the one-hot encoding to death, note that the tensors could also return as a sparse type.
# + id="wnZLPDlNAJui" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 291} outputId="40a7fe62-da13-4409-e411-596065a95e67"
def one_hot_encode(x, max_index):
return tf.sparse.SparseTensor(indices=[[x]], values=[tf.cast(1, dtype=tf.int64)], dense_shape=[max_index])
ds_one_hot = dataset.map(lambda x: one_hot_encode(x, tf.constant(10, dtype=tf.int64)))
for elem in ds_one_hot:
print(elem)
print(tf.sparse.to_dense(elem))
# + [markdown] id="5tDCdbYpDBVV" colab_type="text"
# ## Filter dataset
# + [markdown] id="pQULfD0eIKwy" colab_type="text"
# Both map and filter applies a function to each element in a dataset. Wheras map uses a function to alter the dataset elements in some way, filter works on a conditional return to delete dataset elements that fail the conditional test. The filter method is good for deleting bad dataset inputs.
#
# Let's imagine a dataset of images that vary in length and width. Most pretrained networks are trained for images of sizes 299x299, 256x256, or 224x224. So if images are coming in the general size of 300x300, some more (say 350x350) and some less (say 250x250) then for the most part I can safely crop down to the right size. However, if the data augmentation involves rotation or other affine transforms then extra support around the input size will be desired, and as such some images may need to get thrown out. The following example goes through such a case.
# + id="fg7gTTw7M_cZ" colab_type="code" colab={}
# generate random array of random size
def gen_series(n_samps):
i = 0
while True:
rand_shape = np.random.randint(2, 5)
# note need to explicitly output shape because a symbolic tensor does not have a shape
yield (rand_shape, np.random.random(size=(rand_shape, rand_shape)))
i += 1
if i > n_samps:
break
# + [markdown] id="faqKiLJW83q5" colab_type="text"
# Now set the random seed for reproducibility and make 5 random numpy arrays.
# + id="fMjtD0N1JvIZ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 380} outputId="0aee30b7-d45a-4315-a663-220bebe9c2be"
np.random.seed(314)
for elem in gen_series(5):
print(elem)
# + [markdown] id="4mlISeCvKvAR" colab_type="text"
# Turn the arrays into a dataset
# + id="itYcFMCtKuGT" colab_type="code" colab={}
np.random.seed(314)
dataset_shapes = tf.data.Dataset.from_generator(gen_series,
output_types=(tf.int32, tf.float32),
args = [5]
)
# + [markdown] id="nA-sGx0o9Dsm" colab_type="text"
# Filter out any arrays that have a shape less than 3 (or conversely keep arrays larger than 2)
# + id="_6E99bIQgg7K" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 344} outputId="ffb75228-05c9-4c11-c052-47391df9337a"
def filter_shape(shape, arr):
return shape > 2
np.random.seed(314)
ds_filter = dataset_shapes.filter(filter_shape)
list(ds_filter.as_numpy_iterator())
# + [markdown] id="oAtrDF3c0PUm" colab_type="text"
# Another case where a filter could be helpful is if the data hits some value then you know it's corrupt (maybe a CRC check, or just physical constraints). For example, it could be that you know its impossible for a speed-o-meter to register a speed less than 0.
# + id="YHLrrOxp0oiu" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 199} outputId="bbfd26f5-b6a3-434a-b183-9110ce790c20"
np.random.seed(272)
dataset_threshold = tf.data.Dataset.from_tensor_slices(np.random.rand(10)).map(lambda x: x - 0.3)
list(dataset_threshold.as_numpy_iterator())
# + [markdown] id="hEvqimxT-wsm" colab_type="text"
# Keep any values that are above 0
# + id="tGm6FzAC-hMV" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 108} outputId="f21b0266-df51-4806-bd07-d009faab57e9"
list(dataset_threshold.filter(lambda x: x > 0).as_numpy_iterator())
# + [markdown] id="oo6shlT1-9k_" colab_type="text"
# Try it out for yourself. Make an array with normally distributed data and filter out an events more than 2*sigma.
# + [markdown] id="2CiKDE7gWolw" colab_type="text"
# ## Interleave
# + [markdown] id="XIpjB0EPW7TJ" colab_type="text"
# Building from the concepts of `map`, `interleave` is `map` operation. What is different, though, is that `interleave` is a map of a dataset. Wheras `map` operates on tensor elements and typically involves processing those elements, the `interleave` method operates on a dataset itself and typically involves creating more samples. In this sense interleave often builds on a map.
# + [markdown] id="nQIfQGLSa07g" colab_type="text"
# Personally, I found this function was really difficult to wrap my head around.
#
# Interleave has two main controls, `cycle_length` and `block_length`.
# * `cycle_length`
# * `block_length`
#
# Cycle length is how many times to cycle through the datasets before starting to interleave, and block length is the length of the interleave.
# + id="Mywyost_LyDZ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 562} outputId="a6bf980f-642d-41d6-db7c-ab23772ea28d"
dataset = tf.data.Dataset.range(1, 6) # ==> [ 1, 2, 3, 4, 5 ]
# NOTE: New lines indicate "block" boundaries.
dataset = dataset.interleave(
lambda x: tf.data.Dataset.from_tensors(x).repeat(6),
cycle_length=3, block_length=4)
list(dataset.as_numpy_iterator())
# + id="lA9skUBgXPHT" colab_type="code" colab={}
base_arr = [['a'], ['b']]
num_array = [np.arange(4), np.arange(6, 10)]
# + id="yNRWuqeIez2m" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="f38e1d6c-9dab-43b6-9b7a-b531b087a9b8"
transform = lambda x: -x
dataset = tf.data.Dataset.from_tensor_slices(base_arr)
print(list(dataset.as_numpy_iterator()))
# + id="PWTJogF5Y8QQ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 794} outputId="517c7f06-51e8-4eb3-b31e-41a66ae0a635"
interleaved_set = dataset.interleave(lambda x: tf.data.Dataset.from_tensor_slices(num_array),
cycle_length=1,
block_length=1,
)
list(interleaved_set.as_numpy_iterator())
# + [markdown] id="TkPLZBDlW7zQ" colab_type="text"
# ## Shuffle
# + [markdown] id="chl095YwxMJ9" colab_type="text"
# Shuffle is pretty much what you imagine, it takes your dataset and will spit out a random permutation of your dataset. The main nuance to the shuffle function is the buffer size argument. That being said, the buffer size argument is itself pretty nuanced.
# + [markdown] id="H7EaryvYzvYG" colab_type="text"
# Take the following example: there's a dataset with numbers one through 9.
# With a buffer size of 2, the first shuffle picks from
# [0, 1]
# let's say that it picks 1, now the buffer has
# [0, 2]
# let's say that it picks 2 this time around, now the buffer has
# [0, 3]
# and the shuffled array, to date, has
# [1, 2, 3]
# elements. As such, the placement of 0 in later spots of the array follows a geometric distribution. On the other end, 9 will only ever be in the 8th or 9th index (zero-indexing), or more generally for a shuffle size of 2 a value in the nth index can only appear in the n-1 index or the nth index. Even more generally, for shuffle size of s, a value in the nth index (zero-indexing) can appear in the max(0, n-s+1) index, e.g. with a shuffle size of 10 the 10th element or 9 index can appear at position 9-10+1=0 index.
# + id="pwiqEFWNqkDl" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 399} outputId="674b418d-b4b2-436e-89e2-b66f89c1005a"
ds = tf.data.Dataset.from_tensor_slices(np.arange(10))
print([x.numpy() for x in ds])
ds_shuffle = ds.shuffle(2, reshuffle_each_iteration=True)
for _ in range(20):
print([x.numpy() for x in ds_shuffle])
# + [markdown] id="dvHPeW3O-_wV" colab_type="text"
# Another example of shuffle, but with a tuple input
# + id="uPaA5q7b_EMy" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 199} outputId="b8d0da82-f80e-46fa-d8c7-510f9be95e53"
rand_ds = tf.data.Dataset.from_tensor_slices((np.random.randn(10, 5), np.arange(10)))
for arr, label in rand_ds:
print(arr.numpy(), '\t', label.numpy())
# + id="L630KzEw_Z8Q" colab_type="code" colab={}
for i in range(10):
ds_iter = iter(rand_ds.shuffle(2, reshuffle_each_iteration=True))
arr, label = next(ds_iter)
print(arr.numpy(), '\t', label.numpy())
arr, label = next(ds_iter)
print(arr.numpy(), '\t', label.numpy())
print('\n')
# + [markdown] id="pa4v-SjpnYIr" colab_type="text"
# # Datasets with x, y
# + id="T-XFtZiyncz1" colab_type="code" colab={}
train, test = tf.keras.datasets.fashion_mnist.load_data()
# + id="Ka7zRUBW5C0m" colab_type="code" colab={}
images, labels = train
images = images/255.0
labels = labels.astype(np.int32)
# + id="abrtBhLU5gbX" colab_type="code" colab={}
mnist_ds = dataset.shuffle(5000).batch(32)
# + id="quxNWfXan6pq" colab_type="code" colab={}
mnist_ds
# + id="u_yww7s26L4k" colab_type="code" colab={}
images_reshaped = images.reshape((60000, -1))
# + id="_gvtS2cD_gPj" colab_type="code" colab={}
type(images_reshaped)
# + id="jL8j1V3F6Xaj" colab_type="code" colab={}
images_reshaped.shape
# + id="K-LzTcvY6ZXT" colab_type="code" colab={}
ds_linear = tf.data.Dataset.from_tensor_slices((images_reshaped))
# + id="fAjf3Rtq6guU" colab_type="code" colab={}
mnist_ds_linear = ds_linear.shuffle(10).batch(1)
# + id="538UZyRu6oqj" colab_type="code" colab={}
mnist_ds_linear
# + id="3oHp9DSh6q3T" colab_type="code" colab={}
for num_now in ds_linear.take(2):
plt.figure(); plt.imshow(np.reshape(num_now, (28, 28)), cmap='gray')
# + id="D1uwEbjl7QAo" colab_type="code" colab={}
for num_now in ds_linear.shuffle(5000).take(2):
plt.figure(); plt.imshow(np.reshape(num_now, (28, 28)), cmap='gray')
# + id="Bvx04TsR5ZxX" colab_type="code" colab={}
fmnist_train_ds = tf.data.Dataset.from_tensor_slices((images, labels))
fmnist_train_ds = fmnist_train_ds.shuffle(5000).batch(32)
# + id="6e4SxXpmOgzw" colab_type="code" colab={}
model = tf.keras.Sequential([
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(10)
])
# + id="vV1BmXWBO_Ti" colab_type="code" colab={}
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
# + id="4mbdR45XPARV" colab_type="code" colab={}
model.fit(fmnist_train_ds, epochs=10)
# + id="jSAORFjzPSYr" colab_type="code" colab={}
result = model.predict(fmnist_train_ds, steps = 1)
# + id="Fljyfl1BPU-D" colab_type="code" colab={}
test_ds = tf.data.Dataset.from_tensor_slices((images, labels))
# + id="B9dcQwHlRX3B" colab_type="code" colab={}
x, y = list(zip(*list(test_ds.batch(5).take(1).as_numpy_iterator())))
# + id="ff7DBvU_RZsJ" colab_type="code" colab={}
y
# + id="Nsv_NPJzQBZc" colab_type="code" colab={}
x, y = list(zip(*list(test_ds.batch(5).take(1).as_numpy_iterator())))
y_predict_all = model.predict(test_ds.batch(5), steps=1)
y_top = np.argmax(y_predict_all, axis=1)
print(y, y_top)
# + id="MaCEj-jIQS01" colab_type="code" colab={}
for image_now in x[0]:
plt.figure()
plt.imshow(image_now.squeeze(), cmap='gray')
# + id="CUImNHuL2sGk" colab_type="code" colab={}
x_lin = tf.range(0, 100, dtype=tf.float32)
y_lin = tf.range(0, 100, dtype=tf.float32)
# + id="MUMJKSAL3A0Z" colab_type="code" colab={}
x_ds = tf.data.Dataset.from_tensor_slices(x_lin)
y_ds = tf.data.Dataset.from_tensor_slices(y_lin)
# + id="qgRtWDOkA-WG" colab_type="code" colab={}
def add_noise(x):
return tf.random.normal(x.shape, mean=x)
# + id="fArSTK363LJ8" colab_type="code" colab={}
x_noisy = x_ds.map(add_noise)
# + id="J8bJ4NTb4jOh" colab_type="code" colab={}
xy_ds = tf.data.Dataset.zip((x_noisy, y_ds))
# + id="zo-VPie33w0C" colab_type="code" colab={}
model = tf.keras.Sequential([
tf.keras.layers.Dense(10, input_shape=[1]),
tf.keras.layers.Dense(1)]
)
# + id="NzpuWJbZ4Ugj" colab_type="code" colab={}
model.compile(loss="mse")
# + id="B11CCqfjAdC9" colab_type="code" colab={}
xy_ds.take(1)
# + id="p3zUJMrf4btG" colab_type="code" colab={}
model.fit(xy_ds.batch(5).repeat(), epochs=100, steps_per_epoch=10)
# + id="90R79fP-NEpj" colab_type="code" colab={}
model.predict(xy_ds.take(1), steps=1)
# + id="5nmlVfZXxDjc" colab_type="code" colab={}
# + [markdown] id="82VJJrMKClxf" colab_type="text"
# # Numpy (python) wrap
# + [markdown] id="aP7_kHAcFyec" colab_type="text"
# Sometimes it will be easier to repurpose previously written Python code, most likely a function using numpy or scipy, than rewriting it with tensorflow methods. The dataset objects are fundamentally built for graph mode, even if there are ways to expose pieces with eager execution, and as such datasets want to operate and use tensors.
# + id="yWQHpCydCoSq" colab_type="code" colab={}
def add_random_noise(x, mu=0.0, std=1.0):
if isinstance(x, np.float):
x_noisy = x + std * np.random.randn(1) + mu
else:
x_noisy = x + std * np.random.randn(*x.shape) + mu
return np.float32(x_noisy)
# + id="a82VTaOpIPfJ" colab_type="code" colab={}
arr = np.array([7, 2, 1, 6, 3, 5, 9], dtype=np.float32)
dataset = tf.data.Dataset.from_tensor_slices(arr)
# + id="Bmj3PQFLI9tc" colab_type="code" colab={}
@tf.function(input_signature=[tf.TensorSpec(None, tf.float32)])
def tf_add_random(input):
mu = 0.
sigma = 1.
np_random = lambda x: add_random_noise(x, mu, sigma)
y = tf.numpy_function(np_random, [input], tf.float32)
return y
# + id="0dD5GTBSJNN3" colab_type="code" colab={}
list(dataset.map(tf_add_random).as_numpy_iterator())
| tf_dataset.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Publishing and sharing visualizations
#
# We're going to get through how to publish a visualization by getting the final URL to share it. For that purpose, we'll need to use the "CARTO custom visualizations", as known as **Kuviz**.
#
# Let's start by creating a default credentials.
# ## Set the Credentials
# +
from cartoframes.auth import set_default_credentials
set_default_credentials(
username='your_carto_user',
api_key='your_api_key'
)
# -
# ## Publish
# ### Case 1: using a synchronized and public table
# +
from cartoframes.viz import Map, Layer
tmap = Map(Layer('public_table_name')) # -> Set here a table name from your account
# -
tmap.publish('cf_publish_case_1')
# The 'publish' method uses 'default_public' by default. Therefore, I don't need to use my API Key in this case. Additionally, it's possible to publish a visualization with **password**.
tmap.publish('cf_publish_case_1_password', password="<PASSWORD>")
# ### Case 2: using a synchronized and private table
# +
from cartoframes.viz import Map, Layer
tmap = Map(Layer(PRIVATE_TABLE))
# -
# In this case it's mandatory to add the `maps_api_key` parameter in the publish method. You can get more info at https://carto.com/developers/auth-api/guides/types-of-API-Keys/. This is due to the `publish` method uses `default_public` by default, and the dataset is private.
tmap.publish('cf_publish_case_2', maps_api_key='your_maps_api_key')
# ### Case 3: using a non-synchronized (or using local data)
# +
from cartoframes.viz import Map, Layer
from cartoframes.data import Dataset
ds = Dataset('private_table_name')
df = ds.download()
# do some changes in the DataFrame
# and recreate the Dataset
ds = Dataset(df)
tmap = Map(Layer(ds))
# -
# In this case, if you try to publish a non synchronized dataset, you will get an error:
#
# > 'The map layers are not synchronized with CARTO. Please, use the `sync_data` before publishing the map'
tmap.publish('cf_publish_case_3')
# As the error message says, we will need to make a previous step sychronizing the data. After that as your new table will be private, you will need to create a Maps API key with permissions for your new private table from your CARTO dashboard or Auth API. And finally, we will be ready to publish the visualization.
tmap.sync_data('private_table_name_sync')
tmap.publish('cf_publish_case_3', maps_api_key='your_maps_api_key')
# # Updating a Kuviz
# +
from cartoframes.viz import Map, Layer
tmap = Map(Layer('public_table_name'))
tmap.publish('cf_publish_update_1')
# -
# Changing name
tmap.update_publication('cf_publish_update_2', password=None)
# Adding password
tmap.update_publication('cf_publish_update_3', password="<PASSWORD>")
# Removing password
tmap.update_publication('cf_publish_update_4', password=None)
# # Delete a Kuviz
# +
from cartoframes.viz import Map, Layer, basemaps
tmap = Map(Layer(PUBLIC_TABLE))
tmap.publish('cf_publish_delete_1')
# -
tmap.delete_publication()
# # Get all the visualizations
from cartoframes.viz import Map
Map.all_publications()
| examples/05_publishing/01_publish_visualization_using_kuviz.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 01 - Introduction to Machine Learning
#
# by [<NAME>](albahnsen.com/)
#
# version 0.2, May 2016
#
# ## Part of the class [Machine Learning for Security Informatics](https://github.com/albahnsen/ML_SecurityInformatics)
#
#
#
# This notebook is licensed under a [Creative Commons Attribution-ShareAlike 3.0 Unported License](http://creativecommons.org/licenses/by-sa/3.0/deed.en_US). Special thanks goes to [<NAME>](http://www.vanderplas.com)
# ## What is Machine Learning?
#
# In this section we will begin to explore the basic principles of machine learning.
# Machine Learning is about building programs with **tunable parameters** (typically an
# array of floating point values) that are adjusted automatically so as to improve
# their behavior by **adapting to previously seen data.**
#
# Machine Learning can be considered a subfield of **Artificial Intelligence** since those
# algorithms can be seen as building blocks to make computers learn to behave more
# intelligently by somehow **generalizing** rather that just storing and retrieving data items
# like a database system would do.
#
# We'll take a look at two very simple machine learning tasks here.
# The first is a **classification** task: the figure shows a
# collection of two-dimensional data, colored according to two different class
# labels.
# Import libraries
# %matplotlib inline
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
plt.style.use('ggplot')
# +
# Create a random set of examples
from sklearn.datasets.samples_generator import make_blobs
X, Y = make_blobs(n_samples=50, centers=2,random_state=23, cluster_std=2.90)
plt.scatter(X[:, 0], X[:, 1], c=Y)
plt.show()
# -
# A classification algorithm may be used to draw a dividing boundary
# between the two clusters of points:
from sklearn.linear_model import SGDClassifier
clf = SGDClassifier(loss="hinge", alpha=0.01, n_iter=200, fit_intercept=True)
clf.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, .05), np.arange(y_min, y_max, .05))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()]).reshape(xx.shape)
plt.contour(xx, yy, Z)
plt.scatter(X[:, 0], X[:, 1], c=Y)
plt.show()
# This may seem like a trivial task, but it is a simple version of a very important concept.
# By drawing this separating line, we have learned a model which can **generalize** to new
# data: if you were to drop another point onto the plane which is unlabeled, this algorithm
# could now **predict** whether it's a blue or a red point.
# The next simple task we'll look at is a **regression** task: a simple best-fit line
# to a set of data:
# +
a = 0.5
b = 1.0
# x from 0 to 10
x = 30 * np.random.random(20)
# y = a*x + b with noise
y = a * x + b + np.random.normal(size=x.shape)
plt.scatter(x, y)
# -
from sklearn.linear_model import LinearRegression
clf = LinearRegression()
clf.fit(x[:, None], y)
# underscore at the end indicates a fit parameter
print(clf.coef_)
print(clf.intercept_)
x_new = np.linspace(0, 30, 100)
y_new = clf.predict(x_new[:, None])
plt.scatter(x, y)
plt.plot(x_new, y_new)
# Again, this is an example of fitting a model to data, such that the model can make
# generalizations about new data. The model has been **learned** from the training
# data, and can be used to predict the result of test data:
# here, we might be given an x-value, and the model would
# allow us to predict the y value. Again, this might seem like a trivial problem,
# but it is a basic example of a type of operation that is fundamental to
# machine learning tasks.
# ## Representation of Data in Scikit-learn
#
# Machine learning is about creating models from data: for that reason, we'll start by
# discussing how data can be represented in order to be understood by the computer. Along
# with this, we'll build on our matplotlib examples from the previous section and show some
# examples of how to visualize data.
#
# Most machine learning algorithms implemented in scikit-learn expect data to be stored in a
# **two-dimensional array or matrix**. The arrays can be
# either ``numpy`` arrays, or in some cases ``scipy.sparse`` matrices.
# The size of the array is expected to be `[n_samples, n_features]`
#
# - **n_samples:** The number of samples: each sample is an item to process (e.g. classify).
# A sample can be a document, a picture, a sound, a video, an astronomical object,
# a row in database or CSV file,
# or whatever you can describe with a fixed set of quantitative traits.
# - **n_features:** The number of features or distinct traits that can be used to describe each
# item in a quantitative manner. Features are generally real-valued, but may be boolean or
# discrete-valued in some cases.
#
# The number of features must be fixed in advance. However it can be very high dimensional
# (e.g. millions of features) with most of them being zeros for a given sample. This is a case
# where `scipy.sparse` matrices can be useful, in that they are
# much more memory-efficient than numpy arrays.
# ## A Simple Example: the Iris Dataset
#
# As an example of a simple dataset, we're going to take a look at the
# iris data stored by scikit-learn.
# The data consists of measurements of three different species of irises.
# There are three species of iris in the dataset, which we can picture here:
# +
from IPython.core.display import Image, display
display(Image(url='images/iris_setosa.jpg'))
print("Iris Setosa\n")
display(Image(url='images/iris_versicolor.jpg'))
print("Iris Versicolor\n")
display(Image(url='images/iris_virginica.jpg'))
print("Iris Virginica")
display(Image(url='images/iris_with_length.png'))
print('Iris versicolor and the petal and sepal width and length')
print('From, Python Data Analytics, Apress, 2015.')
# -
# ### Quick Question:
#
# **If we want to design an algorithm to recognize iris species, what might the data be?**
#
# Remember: we need a 2D array of size `[n_samples x n_features]`.
#
# - What would the `n_samples` refer to?
#
# - What might the `n_features` refer to?
#
# Remember that there must be a **fixed** number of features for each sample, and feature
# number ``i`` must be a similar kind of quantity for each sample.
# ### Loading the Iris Data with Scikit-Learn
#
# Scikit-learn has a very straightforward set of data on these iris species. The data consist of
# the following:
#
# - Features in the Iris dataset:
#
# 1. sepal length in cm
# 2. sepal width in cm
# 3. petal length in cm
# 4. petal width in cm
#
# - Target classes to predict:
#
# 1. Iris Setosa
# 2. Iris Versicolour
# 3. Iris Virginica
#
# ``scikit-learn`` embeds a copy of the iris CSV file along with a helper function to load it into numpy arrays:
from sklearn.datasets import load_iris
iris = load_iris()
iris.keys()
n_samples, n_features = iris.data.shape
print((n_samples, n_features))
print(iris.data[0])
print(iris.data.shape)
print(iris.target.shape)
print(iris.target)
print(iris.target_names)
# ### Dimensionality Reduction: PCA
#
# Principal Component Analysis (PCA) is a dimension reduction technique that can find the combinations of variables that explain the most variance.
#
# Consider the iris dataset. It cannot be visualized in a single 2D plot, as it has 4 features. We are going to extract 2 combinations of sepal and petal dimensions to visualize it:
X, y = iris.data, iris.target
from sklearn.decomposition import PCA
pca = PCA(n_components=3)
pca.fit(X)
X_reduced = pca.transform(X)
plt.scatter(X_reduced[:, 0], X_reduced[:, 1], c=y)
X, y = iris.data, iris.target
from sklearn.manifold import Isomap
pca = Isomap(n_components=3)
pca.fit(X)
X_reduced2 = pca.transform(X)
plt.scatter(X_reduced2[:, 0], X_reduced2[:, 1], c=y)
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = Axes3D(fig)
ax.set_title('Iris Dataset by PCA', size=14)
ax.scatter(X_reduced[:,0],X_reduced[:,1],X_reduced[:,2], c=y)
ax.set_xlabel('First eigenvector')
ax.set_ylabel('Second eigenvector')
ax.set_zlabel('Third eigenvector')
ax.w_xaxis.set_ticklabels(())
ax.w_yaxis.set_ticklabels(())
ax.w_zaxis.set_ticklabels(())
plt.show()
# ### Clustering: K-means
#
# Clustering groups together observations that are homogeneous with respect to a given criterion, finding ''clusters'' in the data.
#
# Note that these clusters will uncover relevent hidden structure of the data only if the criterion used highlights it.
# +
from sklearn.cluster import KMeans
k_means = KMeans(n_clusters=3, random_state=0) # Fixing the RNG in kmeans
k_means.fit(X)
y_pred = k_means.predict(X)
plt.scatter(X_reduced[:, 0], X_reduced[:, 1], c=y_pred);
# -
# Lets then evaluate the performance of the clustering versus the ground truth
# +
from sklearn.metrics import confusion_matrix
# Compute confusion matrix
cm = confusion_matrix(y, y_pred)
np.set_printoptions(precision=2)
print(cm)
# -
def plot_confusion_matrix(cm, title='Confusion matrix', cmap=plt.cm.Blues):
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(iris.target_names))
plt.xticks(tick_marks, iris.target_names, rotation=45)
plt.yticks(tick_marks, iris.target_names)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.figure()
plot_confusion_matrix(cm)
# ### Classification Logistic Regression
# +
from sklearn.linear_model import LogisticRegression
from sklearn import cross_validation
errors = []
for i in range(1000):
X_train, X_test, y_train, y_test = cross_validation.train_test_split(iris.data, iris.target, test_size=0.4, random_state=i)
clf = LogisticRegression()
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
acc = (y_pred == y_test).sum()
err = 1- acc / n_samples
errors.append(err)
plt.plot(list(range(1000)), errors)
errors = np.array(errors)
print(errors.max(), errors.min(), errors.mean(), errors.std())
# +
from sklearn.ensemble import RandomForestClassifier
errors = []
for i in range(1000):
X_train, X_test, y_train, y_test = cross_validation.train_test_split(iris.data, iris.target, test_size=0.4, random_state=i)
clf = RandomForestClassifier()
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
acc = (y_pred == y_test).sum()
err = 1- acc / n_samples
errors.append(err)
plt.plot(list(range(1000)), errors)
errors = np.array(errors)
print(errors.max(), errors.min(), errors.mean(), errors.std())
# -
# ### Recap: Scikit-learn's estimator interface
#
# Scikit-learn strives to have a uniform interface across all methods,
# and we'll see examples of these below. Given a scikit-learn *estimator*
# object named `model`, the following methods are available:
#
# - Available in **all Estimators**
# + `model.fit()` : fit training data. For supervised learning applications,
# this accepts two arguments: the data `X` and the labels `y` (e.g. `model.fit(X, y)`).
# For unsupervised learning applications, this accepts only a single argument,
# the data `X` (e.g. `model.fit(X)`).
# - Available in **supervised estimators**
# + `model.predict()` : given a trained model, predict the label of a new set of data.
# This method accepts one argument, the new data `X_new` (e.g. `model.predict(X_new)`),
# and returns the learned label for each object in the array.
# + `model.predict_proba()` : For classification problems, some estimators also provide
# this method, which returns the probability that a new observation has each categorical label.
# In this case, the label with the highest probability is returned by `model.predict()`.
# + `model.score()` : for classification or regression problems, most (all?) estimators implement
# a score method. Scores are between 0 and 1, with a larger score indicating a better fit.
# - Available in **unsupervised estimators**
# + `model.predict()` : predict labels in clustering algorithms.
# + `model.transform()` : given an unsupervised model, transform new data into the new basis.
# This also accepts one argument `X_new`, and returns the new representation of the data based
# on the unsupervised model.
# + `model.fit_transform()` : some estimators implement this method,
# which more efficiently performs a fit and a transform on the same input data.
# ## Flow Chart: How to Choose your Estimator
#
# This is a flow chart created by scikit-learn super-contributor [Andreas Mueller](https://github.com/amueller) which gives a nice summary of which algorithms to choose in various situations. Keep it around as a handy reference!
from IPython.display import Image
Image(url="http://scikit-learn.org/dev/_static/ml_map.png")
# Original source on the [scikit-learn website](http://scikit-learn.org/stable/tutorial/machine_learning_map/)
# # Machine Learning for Security Informatics
#
# There are several applications of machine learning for security informatics
#
#
# ## Intrusion Detection
#
# 
# An Intrusion Detection System (IDS) is a software that monitors a single or a
# network of computers for malicious activities (attacks) that are aimed at stealing
# or censoring information or corrupting network protocols. Most techniques used
# in today’s IDS are not able to deal with the dynamic and complex nature of cyber
# attacks on computer networks. Hence, efficient adaptive methods like various
# techniques of machine learning can result in higher detection rates, lower false
# alarm rates and reasonable computation and communication costs.
#
# 
# ## Fraud Detection
# Fraud detection is one of the earliest industrial applications of data mining and machine learning.
# Fraud detection is typically handled as a binary classification problem, but the class population is unbalanced because instances of fraud are usually very rare compared to the overall volume of transactions. Moreover, when fraudulent transactions are discovered, the business typically takes measures to block the accounts from transacting to prevent further losses. Therefore, model performance is measured by using account-level metrics, which will be discussed in detail later.
# 
# ## Phishing Detection
#
# Phishing, by definition, is the
# act of defrauding an online user in order to obtain personal information by posing as
# a trustworthy institution or entity. Users usually have a hard time differentiating
# between legitimate and malicious sites because they are made to look exactly the
# same. Therefore, there is a need to create better tools to combat attackers.
#
# 
# ## Malware Classification
#
# In recent years, the malware industry has become a well organized market involving large amounts of money. Well funded, multi-player syndicates invest heavily in technologies and capabilities built to evade traditional protection, requiring anti-malware vendors to develop counter mechanisms for finding and deactivating them. In the meantime, they inflict real financial and emotional pain to users of computer systems.
#
# One of the major challenges that anti-malware faces today is the vast amounts of data and files which need to be evaluated for potential malicious intent. For example, Microsoft's real-time detection anti-malware products are present on over 160M computers worldwide and inspect over 700M computers monthly. This generates tens of millions of daily data points to be analyzed as potential malware. One of the main reasons for these high volumes of different files is the fact that, in order to evade detection, malware authors introduce polymorphism to the malicious components. This means that malicious files belonging to the same malware "family", with the same forms of malicious behavior, are constantly modified and/or obfuscated using various tactics, such that they look like many different files.
# 
# ## Man-in-the-Browser Attacks
#
# Man-in-the-Browser (MITB) attacks are the most destructive threat on the Internet stealing money from customer accounts right
# now. These attacks infect a webpage by taking advantage of security vulnerabilities in browsers and common web plugins,
# modifying web pages and transactions as they are happening in real time. Cybercriminals launching an MITB attack can intercept
# and change the content on a website by injecting new HTML code and then perform unauthorized transactions while a customer
# has an online banking session open, but the client will only see the transaction performed as they intended on their screen. If the
# customer checks the URL or SSL certificates of the transactional site, they will be the same. Even the most sophisticated security
# professional may not know an incident is happening, because the entire point of an MITB attack is to mimic the page that malicious
# code is being injected into as much as possible, so that the banking customer doesn't realize that something is amiss.
# 
| notebooks/01-IntroMachineLearning.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # ¿Por qué computación cuántica?
# ## ¿Qué es una computadora?
#
# Dado que has logrado acceder a esta página web, ya deberías saber qué es una computadora. Hoy en día, las computadoras toman muchas formas: desde computadoras portátiles y teléfonos móviles hasta los sistemas que controlan los semáforos. ¡Parece que las computadoras pueden hacer cualquier cosa! Estos sistemas pueden ser muy complejos y especializados, pero todos tienen una cosa en común: una computadora lleva a cabo un conjunto de instrucciones sobre cierta información de entrada para darnos nueva información (de salida).
#
# Las instrucciones que damos a las computadoras deben ser muy específicas e inequívocas. Llamamos a estos conjuntos de instrucciones *algoritmos,* y gran parte de la investigación en computadoras se centra en el comportamiento de diferentes algoritmos. En este curso, solo consideraremos las computadoras en su forma más simple; sin teclados, ratones o pantallas, solo información y algoritmos.
#
# 
# + [markdown] formulas={"T": {"meaning": "This is the average time our search algorithm takes to run.", "type": "Locally defined variable"}, "exp": {"meaning": "This is the number of digits in our secret number. Because this is a superscript, this means we are doing 10 to the power of d.", "type": "Locally defined variable"}, "prop_to": {"meaning": "'Proportional to': Everything to the left of this symbol is <a href='https://en.wikipedia.org/wiki/Proportionality_(mathematics)'>proportional to</a> the things on the right.", "type": "Universal notation"}} gloss={"resources": {"text": "A resource is anything the algorithm needs to run. In computer science, this usually means either the time needed by the algorithm, or the space (e.g. computer memory).", "title": "Resources"}}
# ## Clasificación de algoritmos de computadora
#
# Para comprender el papel de las computadoras cuánticas entre las computadoras tradicionales modernas, primero debemos aprender cómo medimos el rendimiento de los diferentes algoritmos.
#
# En ciencias de la computación, clasificamos los algoritmos según cómo crecen los [recursos](gloss:resources) que utilizan con el tamaño de la entrada. A esto lo llamamos la *complejidad* del algoritmo. Por ejemplo, un algoritmo que decide si un número es par solo necesita mirar el último dígito de ese número. En este caso, la ‘entrada’ es un número y la salida es ‘par’ o ‘impar’. Llamamos a esto un algoritmo de *tiempo constante*, porque el tiempo que tarda el algoritmo en completarse no depende del tamaño del número de entrada. Es posible que a diferentes computadoras les tome diferentes cantidades de tiempo obtener este resultado, pero eso se debe a otros factores y no a la longitud de la entrada.
#
# 
#
# Veamos un ejemplo diferente. Esta vez, la entrada son dos números de igual longitud y el problema es sumarlos. En este caso, la salida será un nuevo número. Al sumar dos números de varios dígitos, un algoritmo común que probablemente aprendiste en la escuela comienza con el dígito más a la derecha de cada número y los suma. Luego se mueve un dígito a la izquierda (llevando un ‘1’ si el resultado fue mayor que 9) y repite el proceso. La computadora repite esto hasta que no hay más dígitos para agregar y el algoritmo finaliza.
#
# 
#
# <!-- ::: q-block.exercise -->
#
# ### ¿Cómo de compleja es la suma?
#
# <!-- ::: q-quiz(goal="intro-why-qc-0") -->
#
# <!-- ::: .question -->
#
# El tiempo que tarda este algoritmo de suma en completarse...
#
# <!-- ::: -->
#
# <!-- ::: .option(correct) -->
#
# 1. ...crece linealmente (proporcionalmente) con la longitud del número de entrada (tiempo lineal).
#
# <!-- ::: -->
#
# <!-- ::: .option -->
#
# 1. ...no se ve afectado por la longitud del número de entrada (tiempo constante)
#
# <!-- ::: -->
#
# <!-- ::: .option -->
#
# 1. ...crece con el cuadrado de la longitud del número de entrada (tiempo cuadrático)
#
# <!-- ::: -->
#
# <!-- ::: -->
#
# <!-- ::: -->
#
# Nuevamente, diferentes computadoras ejecutarán este algoritmo a diferentes velocidades; una laptop puede realizar sumas, millones de veces más rápido que un ser humano. Pero ya sea que pueda hacer un millón de operaciones por segundo o solo una, el ritmo de crecimiento será el mismo.
#
# 
#
# He aquí un último ejemplo que nos interesa especialmente. Digamos que tengo un número secreto (como un PIN), y el problema es adivinarlo. En este caso, el tamaño del problema es la longitud del número.
#
# Digamos que la única forma en la que podemos verificar si nuestra respuesta es correcta es introduciéndola en un teclado. Dado que no tenemos información sobre cuál podría ser ese número, el mejor algoritmo para encontrar este número secreto utiliza un método de ‘fuerza bruta’, lo que significa que no hace nada inteligente y simplemente prueba todos los números posibles.
#
# ¿Cuánto tiempo llevaría esto? En teoría, podríamos tener suerte y adivinar la respuesta de una sola vez, pero esto es muy poco probable. En promedio, tendríamos que probar alrededor de la mitad de las entradas posibles, por lo que el tiempo de ejecución de nuestro algoritmo es proporcional al número de combinaciones posibles. La pregunta ahora es: ¿Cómo crece el número de combinaciones posibles con la longitud del número secreto?
#
# 
#
# Cada dígito que agregamos a nuestro número secreto multiplica el número de combinaciones posibles por 10. Por ejemplo, un número secreto con 1 dígito tiene 10 valores posibles (0, 1, 2, 3, 4, 5, 6, 7, 8 y 9), y un número secreto de 2 dígitos tiene 100 valores posibles. Suponiendo que el tiempo necesario para adivinar cada dígito es similar (independientemente de la longitud), podemos representar esto matemáticamente así:
#
# $$ \cssId{T}{T} \cssId{prop_to}{\propto} 10^\cssId{exp}{d}$$
#
# Notarás que el número de dígitos (d) es el exponente en esta ecuación y, como tal, decimos que este es un algoritmo de *tiempo exponencial* y que el tiempo de ejecución crece exponencialmente con la longitud de la entrada.
#
# 
# + [markdown] gloss={"intractable": {"text": "An intractable problem is one which can be solved in theory, but requires too many resources in practice.", "title": "Intractable"}}
# ## ¿Por qué medimos algoritmos como este?
#
# Diferentes computadoras tienen diferentes fortalezas; ciertas operaciones pueden ser más rápidas en una computadora que en otra. Al estudiar el crecimiento frente al tamaño de entrada, podemos ignorar los detalles específicos del dispositivo y, de hecho, medir el *algoritmo*, en lugar de la combinación específica de algoritmo y computadora. Es importante tener en cuenta que saber cómo escala un algoritmo con el tamaño de entrada también nos dice si el algoritmo crecerá de manera manejable o no.
#
# Pensemos en el algoritmo de suma de tiempo lineal que vimos arriba. Si pudiéramos sumar dos números de 10 dígitos en un segundo, debido a la tasa de crecimiento lineal, deberíamos poder sumar dos números de 20 dígitos en dos segundos. Cada 10 dígitos adicionales deberían añadir aproximadamente un segundo más a nuestro tiempo de cálculo.
#
# Por el contrario, imagina que puedes encontrar un PIN de 10 dígitos en 1 segundo utilizando el algoritmo de búsqueda de tiempo exponencial anterior. Esto significa que tu computadora es lo suficientemente rápida para intentar ~5,000,000,000 combinaciones por segundo. Esperaríamos que esta computadora que usa este algoritmo tarde aproximadamente 5,000,000,000 segundos (~150 años) para encontrar un PIN de 20 dígitos. Añadir otros 10 dígitos aumenta esto a alrededor de 150,000,000,000 años (~120 veces la edad del universo). Los algoritmos de tiempo exponencial con incluso una entrada de tamaño modesto (en este caso, ~30 dígitos) pueden volverse no solo difíciles, sino literalmente imposibles de llevar a cabo.
#
# Si bien este problema de búsqueda de PIN es un ejemplo artificial que pretendemos que sea lo más simple posible, hay muchos problemas reales en informática para los que solo tenemos algoritmos ineficientes. A pesar de la impresionante velocidad de las computadoras actuales, estos problemas [intratables](gloss:intractable) pueden ser demasiado difíciles incluso para las supercomputadoras más potentes.
#
# Pero si podemos encontrar algoritmos que crezcan de manera más eficiente, estos problemas intratables pueden volverse manejables de repente, incluso con computadoras relativamente lentas o poco fiables. Aquí es dónde la computación cuántica entra en juego.
#
# ## ¿Cómo puede ayudar la computación cuántica?
#
# Hasta ahora, hemos pensado en los algoritmos de una manera muy abstracta, pero las computadoras que ejecutan estos algoritmos deben existir en el mundo real. Ya sean estas computadoras microchips de alta potencia o humanos con bolígrafos y papel, todas las computadoras se rigen en última instancia por las leyes de la física y las operaciones que pueden realizar limitan los algoritmos que podemos crear.
#
# La física es un intento de desentrañar el conjunto de reglas que rigen en el universo. A principios del siglo XX, a través de delicados experimentos en laboratorios, los físicos vieron comportamientos extraños que la física no podía explicar en aquel momento. Esto significaba que las reglas no eran del todo precisas, por lo que desarrollaron de manera más completa la física ‘cuántica’, que describe muy bien este comportamiento.
#
# Los físicos crearon la física cuántica para explicar un comportamiento que nunca antes habían visto, y los informáticos descubrieron que podían (en teoría) explotar este comportamiento recién descubierto para crear algoritmos más eficientes. Como resultado, hay ciertos problemas que creemos que son intratables para las computadoras convencionales, pero que son manejables para una computadora ‘cuántica’ que puede explotar este comportamiento. Uno de estos problemas es la *factorización de enteros*.
#
# Supongamos que tenemos un número entero al que llamaremos '$x$'. Un algoritmo de factorización encuentra los enteros $p$ y $q$ tales que $p×q = x$. Esto a veces es fácil; puedes decir de un vistazo que $2000 = 2 × 1000$, pero si $x$ es el producto de dos números primos grandes, este problema se vuelve muy difícil. Cuando hablamos de factorización de enteros, vamos a asumir el escenario más difícil (el peor de los casos). En la siguiente celda de código, estamos asignando un número de 250 dígitos a la variable <code>x</code> :
# -
x = 2140324650240744961264423072839333563008614715144755017797754920881418023447140136643345519095804679610992851872470914587687396261921557363047454770520805119056493106687691590019759405693457452230589325976697471681738069364894699871578494975937497937
# + [markdown] gloss={"coreyears": {"text": "Conventional computer chips are often made from processors called <a href=\"https://en.wikipedia.org/wiki/Multi-core_processor\">\"cores\"</a>. A <i>core-year</i> is the equivalent of using one of these cores continuously for a year. For reference, a modern laptops have around 2-4 cores. The meaning of this number depends on how powerful the core is, but this should give you a rough idea of the computing power involved.", "title": "Core Year"}}
# En 2020, los investigadores factorizaron este número utilizando una supercomputadora clásica y ~2700 [años de núcleo](gloss:coreyears) de potencia de procesamiento. Este fue un gran esfuerzo y un récord en el momento de escribir este artículo. Podemos verificar sus resultados en la celda de código a continuación (¡afortunadamente, tenemos algoritmos eficientes para la multiplicación!):
# +
p = 64135289477071580278790190170577389084825014742943447208116859632024532344630238623598752668347708737661925585694639798853367
q = 33372027594978156556226010605355114227940760344767554666784520987023841729210037080257448673296881877565718986258036932062711
p*q == x # Evalúa a 'True'
# + [markdown] gloss={"RSA": {"text": "RSA numbers are numbers taken from the RSA factoring challenge. These numbers are intentionally chosen to be difficult to factor.<p>'RSA' are the initials of three of the people that invented the protocol that uses these large numbers to encrypt information.", "title": "RSA Number"}}
# El resultado que se muestra es el valor de la última línea de la celda. En este caso, podemos ver que <code>p*q == x</code> se evalúa como <code>True</code> . Aunque no está probado matemáticamente, estamos bastante seguros de que no existe un algoritmo eficiente para factorizar tales números en las computadoras tradicionales. De hecho, gran parte del cifrado de Internet se basa en la suposición de que este problema es intratable y que factorizar un número [RSA](gloss:RSA) de 617 dígitos es imposible. Por el contrario, conocemos algoritmos de factorización eficientes para computadoras cuánticas que, una vez que tengamos computadoras cuánticas lo suficientemente grandes, estimamos que podrían factorizar estos números en menos de un día.
# + [markdown] gloss={"noise": {"text": "Noise is useless information that's difficult to distinguish from useful information. For example, it's hard to hear someone talking to you if there are lots of other people talking loudly nearby.", "title": "Noise"}, "qiskit": {"text": "Qiskit is a software development kit for working with quantum computers.", "title": "Qiskit"}, "qubits": {"text": "A 'qubit' is a 'quantum bit'. We will study these later in this course.", "title": "Qubit"}, "transistor": {"text": "A transistor is an electronic device. They can be used to switch electric currents on and off, and can be used to build a computer processor.", "title": "Transistor"}}
# ## ¿Donde nos encontramos ahora?
#
# Ahora sabemos que las computadoras cuánticas pueden llevar a cabo algoritmos más eficientes, pero las computadoras cuánticas que tenemos hoy en día son demasiado pequeñas e inestables para dar una ventaja sobre las computadoras tradicionales.
#
# En un nivel muy simple, hay dos factores que limitan el tamaño de los problemas que pueden resolver nuestras computadoras cuánticas. El primero es la cantidad de datos que pueden almacenar y sobre los cuales trabajar, que solemos medir en [*qubits*](gloss:qubits). Si no tenemos suficientes qubits, simplemente no podemos almacenar ni operar en problemas por encima de cierto tamaño. El segundo es la tasa de error de nuestra computadora cuántica; dado que solo vemos el comportamiento cuántico en experimentos de laboratorio delicados, crear computadoras cuánticas es un proceso delicado. Las computadoras cuánticas que tenemos ahora son ruidosas, lo que significa que a menudo se equivocan e introducen ‘[ruido](gloss:noise)’ en nuestros resultados. ¡Demasiado ruido y nuestros resultados no tendrán sentido!
#
# Por el momento, las computadoras cuánticas que tenemos son experimentales. Están limitadas por el número de qubits y las tasas de error, por lo que los problemas más grandes que pueden resolver actualmente aún son fácilmente manejables para las computadoras convencionales.
#
# En algún momento en el futuro, esto cambiará. Llegaremos a la ‘ventaja cuántica’, en la que realmente tendrá sentido desde el punto de vista económico resolver un problema utilizando una computadora cuántica en lugar de una computadora convencional. ¿Cómo lo sabemos? *¡Porque medimos los algoritmos por su tasa de crecimiento!* Sabemos que, mientras las computadoras cuánticas sigan desarrollándose de manera constante, eventualmente tomarán el relevo de las computadoras clásicas.
#
# 
#
# La estimación para factorizar un número RSA de 617 dígitos en menos de un día supuso ~20 millones de qubits ruidosos. En el momento de escribir este artículo, IBM tiene actualmente una computadora cuántica de 65 qubits y tiene como objetivo crear un sistema con más de 1000 qubits para 2023. Hay otros algoritmos que creemos que nos darán una ventaja cuántica mucho antes de este hito, pero parece que aún estamos muy lejos.
#
# Deberíamos recordar de dónde vienen las computadoras convencionales. A continuación se muestra una imagen del primer [transistor](gloss:transistor), creado en 1947. Los transistores son los componentes básicos de los procesadores de las computadoras modernas.
#
#  Crédito de la imagen: Empleado federal <a href="https://clintonwhitehouse4.archives.gov/Initiatives/Millennium/capsule/mayo.html">Enlace</a>, <a href="https://commons.wikimedia.org/w/index.php?curid=554340">Dominio Público</a> .
#
# 70 años después, los chips de nuestras computadoras modernas pueden contener miles de millones de transistores.
#
# En el resto de este curso, exploraremos los efectos cuánticos que nos permiten crear algoritmos más eficientes. Al final de este curso, podrás utilizar el paquete de software, [Qiskit](gloss:qiskit), para programar una computadora cuántica y ejecutar uno de estos algoritmos.
# -
# <!-- ::: q-block.exercise -->
#
# ### Test rápido
#
# <!-- ::: q-quiz(goal="intro-why-qc-1") -->
#
# <!-- ::: .question -->
#
# Las computadoras cuánticas finalmente...
#
# <!-- ::: -->
#
# <!-- ::: .option(correct) -->
#
# 1. ...harán cálculos que son demasiado difíciles para las computadoras convencionales.
#
# <!-- ::: -->
#
# <!-- ::: .option -->
#
# 1. ...reemplazarán a las computadoras convencionales.
#
# <!-- ::: -->
#
# <!-- ::: .option -->
#
# 1. ...aumentarán la velocidad de las computadoras convencionales.
#
# <!-- ::: -->
#
# <!-- ::: -->
#
# <!-- ::: -->
| translations/es/intro/why-quantum-computing.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from rapidz import Stream
from shed import FromEventStream, ToEventStream
from bluesky import RunEngine
from ophyd.sim import hw
hw = hw()
import operator as op
RE = RunEngine()
import bluesky.plans as bp
from pprint import pprint
from databroker import Broker
db = Broker.named('temp')
# +
source = Stream()
# pull data out of the start document
my_number = FromEventStream('start', 'my_number', upstream=source)
# pull data out of the events
data = FromEventStream('event', ('data', 'motor1'), upstream=source, principle=True)
# starmap is the same as map(lambda x: f(*x))
output = data.combine_latest(my_number, emit_on=0).starmap(op.add).map(op.mul, 5)
finish = ToEventStream(output, 'answer')
dbf = finish.DBFriendly()
dbf.starsink(db.insert)
dbf.sink(pprint)
finish.visualize()
# -
token = RE.subscribe(lambda *x: source.emit(x))
t2 = RE.subscribe(db.insert)
RE(bp.scan([hw.motor1], hw.motor1, 0, 10, 2, md={'my_number': 5}))
RE.unsubscribe(token)
RE.unsubscribe(t2)
from shed.replay import replay
from rapidz.graph import _clean_text, readable_graph
graph, parents, data, vs = replay(db, db[-1])
for k, v in graph.nodes.items():
v.update(label=_clean_text(str(v['stream'])).strip())
readable_graph = readable_graph(graph)
print(list(readable_graph.nodes.keys()))
readable_graph.nodes['map; mul']['stream'].args=(10,)
readable_graph.nodes['answer ToEventStream']['stream'].sink(print)
for v in vs:
d = data[v['uid']]
parents[v["node"]].update(d)
readable_graph.nodes['map; mul']['stream'].visualize()
| examples/provenance_shed.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R
# language: R
# name: ir
# ---
# # Machine Learning with H2O - Tutorial 2: Basic Data Manipulation
#
# <hr>
#
# **Objective**:
#
# - This tutorial demonstrates basic data manipulation with H2O.
#
# <hr>
#
# **Titanic Dataset:**
#
# - Source: https://www.kaggle.com/c/titanic/data
#
# <hr>
#
# **Full Technical Reference:**
#
# - http://docs.h2o.ai/h2o/latest-stable/h2o-r/h2o_package.pdf
#
# <br>
#
# Start and connect to a local H2O cluster
suppressPackageStartupMessages(library(h2o))
h2o.init(nthreads = -1)
# Import Titanic data (local CSV)
titanic = h2o.importFile("kaggle_titanic.csv")
# Explore the dataset using various functions
head(titanic, 10)
# <br>
#
# Explain why we need to transform
#
# <br>
# Explore the column 'Survived'
h2o.describe(titanic[, 'Survived'])
# Use hist() to create a histogram
h2o.hist(titanic[, 'Survived'])
# Use table() to summarize 0s and 1s
h2o.table(titanic[, 'Survived'])
# Convert 'Survived' to categorical variable
titanic[, 'Survived'] = as.factor(titanic[, 'Survived'])
# Look at the summary of 'Survived' again
# The feature is now an 'enum' (enum is the name of categorical variable in Java)
h2o.describe(titanic[, 'Survived'])
# <br>
#
# Doing the same for 'Pclass'
#
# <br>
# Explore the column 'Pclass'
h2o.describe(titanic[,'Pclass'])
# Use hist() to create a histogram
h2o.hist(titanic[, 'Pclass'])
# Use table() to summarize 1s, 2s and 3s
h2o.table(titanic[, 'Pclass'])
# Convert 'Pclass' to categorical variable
titanic[, 'Pclass'] = as.factor(titanic[, 'Pclass'])
# Look at the summary of 'Pclass' again
# The feature is now an 'enum' (enum is the name of categorical variable in Java)
h2o.describe(titanic[, 'Pclass'])
| introduction_to_machine_learning/r_02_data_manipulation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Elastic wave equation implementation on a staggered grid
#
# This is a first attempt at implemenenting the elastic wave equation as described in:
#
# [1] <NAME> (1986). ”P-SV wave propagation in heterogeneous media: Velocity‐stress finite‐difference method.” GEOPHYSICS, 51(4), 889-901. https://doi.org/10.1190/1.1442147
#
# The current version actually attempts to mirror the FDELMODC implementation by <NAME>:
#
# [2] https://janth.home.xs4all.nl/Software/fdelmodcManual.pdf
#
# ## Explosive source
#
# We will first attempt to replicate the explosive source test case described in [1], Figure 4. We start by defining the source signature $g(t)$, the derivative of a Gaussian pulse, given by Eq 4:
#
# $$g(t) = -2 \alpha(t - t_0)e^{-\alpha(t-t_0)^2}$$
# +
from devito import *
from examples.seismic.source import WaveletSource, RickerSource, GaborSource, TimeAxis
from examples.seismic import plot_image
import numpy as np
from sympy import init_printing, latex
init_printing(use_latex=True)
# -
# Initial grid: 1km x 1km, with spacing 100m
extent = (2000., 2000.)
shape = (201, 201)
x = SpaceDimension(name='x', spacing=Constant(name='h_x', value=extent[0]/(shape[0]-1)))
z = SpaceDimension(name='z', spacing=Constant(name='h_z', value=extent[1]/(shape[1]-1)))
grid = Grid(extent=extent, shape=shape, dimensions=(x, z))
# +
class DGaussSource(WaveletSource):
def wavelet(self, f0, t):
a = 0.004
return -2.*a*(t - 1/f0) * np.exp(-a * (t - 1/f0)**2)
# Timestep size from Eq. 7 with V_p=6000. and dx=100
t0, tn = 0., 600.
dt = (10. / np.sqrt(2.)) / 6.
time_range = TimeAxis(start=t0, stop=tn, step=dt)
src = RickerSource(name='src', grid=grid, f0=0.01, time_range=time_range)
src.coordinates.data[:] = [1000., 1000.]
src.show()
# +
# Now we create the velocity and pressure fields
vx= TimeFunction(name='vx', grid=grid, staggered=(0, 1, 0))
vz = TimeFunction(name='vz', grid=grid, staggered=(0, 0, 1))
txx = TimeFunction(name='txx', grid=grid)
tzz = TimeFunction(name='tzz', grid=grid)
txz = TimeFunction(name='txz', grid=grid, staggered=(0, 1, 1))
# And finally we create the discretized/indexed symbols
VX = vx.indexed
VZ = vz.indexed
TXX = txx.indexed
TZZ = tzz.indexed
TXZ = txz.indexed
# -
def derivative(f, dim, order, stagger=centered):
"""
Utility function to generate staggered derivatives
"""
diff = dim.spacing
if stagger == left:
off = -.5
elif stagger == right:
off = .5
else:
off = 0.
idx = [(dim + int(i+.5+off)*diff) for i in range(-int(order / 2), int(order / 2))]
return f.diff(dim).as_finite_difference(idx, x0=dim + off*dim.spacing)
# +
# Now let's try and create the staggered updates
t = grid.stepping_dim
time = grid.time_dim
# We need some initial conditions
V_p = 4.0
V_s = 1.0
density = 3.
# The source injection term
src_xx = src.inject(field=txx.forward, expr=src)
src_zz = src.inject(field=tzz.forward, expr=src)
#c1 = 9.0/8.0;
#c2 = -1.0/24.0;
# Thorbecke's parameter notation
cp2 = V_p*V_p
cs2 = V_s*V_s
ro = 1/density
mu = cs2*ro
l = (cp2*ro - 2*mu)
# fdelmodc reference implementation
u_vx = Eq(vx.forward, vx - dt*ro*(derivative(txx, dim=x, order=4, stagger=left)
+ derivative(txz, dim=z, order=4, stagger=right)))
u_vz = Eq(vz.forward, vz - ro*dt*(derivative(txz, dim=x, order=4, stagger=right)
+ derivative(tzz, dim=z, order=4, stagger=left)))
u_txx = Eq(txx.forward, txx - (l+2*mu)*dt * derivative(vx.forward, dim=x, order=4, stagger=right)
- l*dt * derivative(vz.forward, dim=z, order=4, stagger=right))
u_tzz = Eq(tzz.forward, tzz - (l+2*mu)*dt * derivative(vz.forward, dim=z, order=4, stagger=right)
- l*dt * derivative(vx.forward, dim=x, order=4, stagger=right))
u_txz = Eq(txz.forward, txz - mu*dt * (derivative(vx.forward, dim=z, order=4, stagger=left)
+ derivative(vz.forward, dim=x, order=4, stagger=left)))
# -
op = Operator([u_vx, u_vz, u_txx, u_tzz, u_txz] + src_xx + src_zz)
# +
# Reset the fields
vx.data[:] = 0.
vz.data[:] = 0.
txx.data[:] = 0.
tzz.data[:] = 0.
txz.data[:] = 0.
op()
# -
# Let's see what we got....
plot_image(vx.data[0], vmin=-.5*1e-2, vmax=.5*1e-2, cmap="seismic")
plot_image(vz.data[0], vmin=-.5*1e-2, vmax=.5*1e-2, cmap="seismic")
plot_image(txx.data[0], vmin=-.5*1e-2, vmax=.5*1e-2, cmap="seismic")
plot_image(tzz.data[0], vmin=-.5*1e-2, vmax=.5*1e-2, cmap="seismic")
plot_image(txz.data[0], vmin=-.5*1e-2, vmax=.5*1e-2, cmap="seismic")
# +
# Now that looks pretty! But let's do it again with a higher order...
order = 12
u_vx = Eq(vx.forward, vx - dt*ro*(derivative(txx, dim=x, order=order, stagger=left)
+ derivative(txz, dim=z, order=order, stagger=right)))
u_vz = Eq(vz.forward, vz - ro*dt*(derivative(txz, dim=x, order=order, stagger=right)
+ derivative(tzz, dim=z, order=order, stagger=left)))
u_txx = Eq(txx.forward, txx - (l+2*mu)*dt * derivative(vx.forward, dim=x, order=order, stagger=right)
- l*dt * derivative(vz.forward, dim=z, order=order, stagger=right))
u_tzz = Eq(tzz.forward, tzz - (l+2*mu)*dt * derivative(vz.forward, dim=z, order=order, stagger=right)
- l*dt * derivative(vx.forward, dim=x, order=order, stagger=right))
u_txz = Eq(txz.forward, txz - mu*dt * (derivative(vx.forward, dim=z, order=order, stagger=left)
+ derivative(vz.forward, dim=x, order=order, stagger=left)))
op = Operator([u_vx, u_vz, u_txx, u_tzz, u_txz] + src_xx + src_zz)
# Reset the fields
vx.data[:] = 0.
vz.data[:] = 0.
txx.data[:] = 0.
tzz.data[:] = 0.
txz.data[:] = 0.
op()
plot_image(vx.data[0], vmin=-.5*1e-2, vmax=.5*1e-2, cmap="seismic")
plot_image(vz.data[0], vmin=-.5*1e-2, vmax=.5*1e-2, cmap="seismic")
plot_image(txx.data[0], vmin=-.5*1e-2, vmax=.5*1e-2, cmap="seismic")
plot_image(tzz.data[0], vmin=-.5*1e-2, vmax=.5*1e-2, cmap="seismic")
plot_image(txz.data[0], vmin=-.5*1e-2, vmax=.5*1e-2, cmap="seismic")
# -
| examples/seismic/tutorials/07_elastic.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# # Style Transfer with Deep Neural Networks
#
#
# In this notebook, we’ll *recreate* a style transfer method that is outlined in the paper, [Image Style Transfer Using Convolutional Neural Networks, by Gatys](https://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Gatys_Image_Style_Transfer_CVPR_2016_paper.pdf) in PyTorch.
#
# In this paper, style transfer uses the features found in the 19-layer VGG Network, which is comprised of a series of convolutional and pooling layers, and a few fully-connected layers. In the image below, the convolutional layers are named by stack and their order in the stack. Conv_1_1 is the first convolutional layer that an image is passed through, in the first stack. Conv_2_1 is the first convolutional layer in the *second* stack. The deepest convolutional layer in the network is conv_5_4.
#
# <img src='notebook_ims/vgg19_convlayers.png' width=80% />
#
# ### Separating Style and Content
#
# Style transfer relies on separating the content and style of an image. Given one content image and one style image, we aim to create a new, _target_ image which should contain our desired content and style components:
# * objects and their arrangement are similar to that of the **content image**
# * style, colors, and textures are similar to that of the **style image**
#
# An example is shown below, where the content image is of a cat, and the style image is of [Hokusai's Great Wave](https://en.wikipedia.org/wiki/The_Great_Wave_off_Kanagawa). The generated target image still contains the cat but is stylized with the waves, blue and beige colors, and block print textures of the style image!
#
# <img src='notebook_ims/style_tx_cat.png' width=80% />
#
# In this notebook, we'll use a pre-trained VGG19 Net to extract content or style features from a passed in image. We'll then formalize the idea of content and style _losses_ and use those to iteratively update our target image until we get a result that we want. You are encouraged to use a style and content image of your own and share your work on Twitter with @udacity; we'd love to see what you come up with!
# +
# import resources
# %matplotlib inline
from PIL import Image
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.optim as optim
from torchvision import transforms, models
# -
# ## Load in VGG19 (features)
#
# VGG19 is split into two portions:
# * `vgg19.features`, which are all the convolutional and pooling layers
# * `vgg19.classifier`, which are the three linear, classifier layers at the end
#
# We only need the `features` portion, which we're going to load in and "freeze" the weights of, below.
# +
# get the "features" portion of VGG19 (we will not need the "classifier" portion)
vgg = models.vgg19(pretrained=True).features
# freeze all VGG parameters since we're only optimizing the target image
for param in vgg.parameters():
param.requires_grad_(False)
# +
# move the model to GPU, if available
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
vgg.to(device)
# -
# ### Load in Content and Style Images
#
# You can load in any images you want! Below, we've provided a helper function for loading in any type and size of image. The `load_image` function also converts images to normalized Tensors.
#
# Additionally, it will be easier to have smaller images and to squish the content and style images so that they are of the same size.
def load_image(img_path, max_size=400, shape=None):
''' Load in and transform an image, making sure the image
is <= 400 pixels in the x-y dims.'''
image = Image.open(img_path).convert('RGB')
# large images will slow down processing
if max(image.size) > max_size:
size = max_size
else:
size = max(image.size)
if shape is not None:
size = shape
in_transform = transforms.Compose([
transforms.Resize(size),
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406),
(0.229, 0.224, 0.225))])
# discard the transparent, alpha channel (that's the :3) and add the batch dimension
image = in_transform(image)[:3,:,:].unsqueeze(0)
return image
# Next, I'm loading in images by file name and forcing the style image to be the same size as the content image.
# load in content and style image
content = load_image('images/octopus.jpg').to(device)
# Resize style to match content, makes code easier
style = load_image('images/hockney.jpg', shape=content.shape[-2:]).to(device)
# helper function for un-normalizing an image
# and converting it from a Tensor image to a NumPy image for display
def im_convert(tensor):
""" Display a tensor as an image. """
image = tensor.to("cpu").clone().detach()
image = image.numpy().squeeze()
image = image.transpose(1,2,0)
image = image * np.array((0.229, 0.224, 0.225)) + np.array((0.485, 0.456, 0.406))
image = image.clip(0, 1)
return image
# display the images
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(20, 10))
# content and style ims side-by-side
ax1.imshow(im_convert(content))
ax2.imshow(im_convert(style))
# ---
# ## VGG19 Layers
#
# To get the content and style representations of an image, we have to pass an image forward through the VGG19 network until we get to the desired layer(s) and then get the output from that layer.
# print out VGG19 structure so you can see the names of various layers
print(vgg)
# ## Content and Style Features
#
# #### TODO: complete the mapping of layer names to the names found in the paper for the _content representation_ and the _style representation_.
#
# The first layer (0) to `conv1_1` has been done for you, below.
def get_features(image, model, layers=None):
""" Run an image forward through a model and get the features for
a set of layers. Default layers are for VGGNet matching Gatys et al (2016)
"""
## TODO: Complete mapping layer names of PyTorch's VGGNet to names from the paper
## Need the layers for the content and style representations of an image
if layers is None:
layers = {'0': 'conv1_1',
'5': 'conv2_1',
'10': 'conv3_1',
'19': 'conv4_1',
'21': 'conv4_2',
'28': 'conv5_1'}
## -- do not need to change the code below this line -- ##
features = {}
x = image
# model._modules is a dictionary holding each module in the model
for name, layer in model._modules.items():
x = layer(x)
if name in layers:
features[layers[name]] = x
return features
# ---
# ## Gram Matrix
#
# The output of every convolutional layer is a Tensor with dimensions associated with the `batch_size`, a depth, `d` and some height and width (`h`, `w`). The Gram matrix of a convolutional layer can be calculated as follows:
# * Get the depth, height, and width of a tensor using `batch_size, d, h, w = tensor.size`
# * Reshape that tensor so that the spatial dimensions are flattened
# * Calculate the gram matrix by multiplying the reshaped tensor by it's transpose
#
# *Note: You can multiply two matrices using `torch.mm(matrix1, matrix2)`.*
#
# #### TODO: Complete the `gram_matrix` function.
def gram_matrix(tensor):
""" Calculate the Gram Matrix of a given tensor
Gram Matrix: https://en.wikipedia.org/wiki/Gramian_matrix
"""
## get the batch_size, depth, height, and width of the Tensor
## reshape it, so we're multiplying the features for each channel
## calculate the gram matrix
batch_size,d,h,w = tensor.size()
matrix = tensor.view(d,-1)
gram = torch.mm(matrix,matrix.t())
return gram
# ## Putting it all Together
#
# Now that we've written functions for extracting features and computing the gram matrix of a given convolutional layer; let's put all these pieces together! We'll extract our features from our images and calculate the gram matrices for each layer in our style representation.
# +
# get content and style features only once before forming the target image
content_features = get_features(content, vgg)
style_features = get_features(style, vgg)
# calculate the gram matrices for each layer of our style representation
style_grams = {layer: gram_matrix(style_features[layer]) for layer in style_features}
# create a third "target" image and prep it for change
# it is a good idea to start of with the target as a copy of our *content* image
# then iteratively change its style
target = content.clone().requires_grad_(True).to(device)
# -
# ---
# ## Loss and Weights
#
# #### Individual Layer Style Weights
#
# Below, you are given the option to weight the style representation at each relevant layer. It's suggested that you use a range between 0-1 to weight these layers. By weighting earlier layers (`conv1_1` and `conv2_1`) more, you can expect to get _larger_ style artifacts in your resulting, target image. Should you choose to weight later layers, you'll get more emphasis on smaller features. This is because each layer is a different size and together they create a multi-scale style representation!
#
# #### Content and Style Weight
#
# Just like in the paper, we define an alpha (`content_weight`) and a beta (`style_weight`). This ratio will affect how _stylized_ your final image is. It's recommended that you leave the content_weight = 1 and set the style_weight to achieve the ratio you want.
# +
# weights for each style layer
# weighting earlier layers more will result in *larger* style artifacts
# notice we are excluding `conv4_2` our content representation
style_weights = {'conv1_1': 1.,
'conv2_1': 0.8,
'conv3_1': 0.5,
'conv4_1': 0.3,
'conv5_1': 0.1}
# you may choose to leave these as is
content_weight = 1 # alpha
style_weight = 1e6 # beta
# -
# ## Updating the Target & Calculating Losses
#
# You'll decide on a number of steps for which to update your image, this is similar to the training loop that you've seen before, only we are changing our _target_ image and nothing else about VGG19 or any other image. Therefore, the number of steps is really up to you to set! **I recommend using at least 2000 steps for good results.** But, you may want to start out with fewer steps if you are just testing out different weight values or experimenting with different images.
#
# Inside the iteration loop, you'll calculate the content and style losses and update your target image, accordingly.
#
# #### Content Loss
#
# The content loss will be the mean squared difference between the target and content features at layer `conv4_2`. This can be calculated as follows:
# ```
# content_loss = torch.mean((target_features['conv4_2'] - content_features['conv4_2'])**2)
# ```
#
# #### Style Loss
#
# The style loss is calculated in a similar way, only you have to iterate through a number of layers, specified by name in our dictionary `style_weights`.
# > You'll calculate the gram matrix for the target image, `target_gram` and style image `style_gram` at each of these layers and compare those gram matrices, calculating the `layer_style_loss`.
# > Later, you'll see that this value is normalized by the size of the layer.
#
# #### Total Loss
#
# Finally, you'll create the total loss by adding up the style and content losses and weighting them with your specified alpha and beta!
#
# Intermittently, we'll print out this loss; don't be alarmed if the loss is very large. It takes some time for an image's style to change and you should focus on the appearance of your target image rather than any loss value. Still, you should see that this loss decreases over some number of iterations.
#
# #### TODO: Define content, style, and total losses.
# +
# for displaying the target image, intermittently
show_every = 400
# iteration hyperparameters
optimizer = optim.Adam([target], lr=0.003)
steps = 2000 # decide how many iterations to update your image (5000)
for ii in range(1, steps+1):
## TODO: get the features from your target image
## Then calculate the content loss
target_features = None
content_loss = None
# the style loss
# initialize the style loss to 0
style_loss = 0
# iterate through each style layer and add to the style loss
for layer in style_weights:
# get the "target" style representation for the layer
target_feature = target_features[layer]
_, d, h, w = target_feature.shape
## TODO: Calculate the target gram matrix
target_gram = None
## TODO: get the "style" style representation
style_gram = None
## TODO: Calculate the style loss for one layer, weighted appropriately
layer_style_loss = None
# add to the style loss
style_loss += layer_style_loss / (d * h * w)
## TODO: calculate the *total* loss
total_loss = None
## -- do not need to change code, below -- ##
# update your target image
optimizer.zero_grad()
total_loss.backward()
optimizer.step()
# display intermediate images and print the loss
if ii % show_every == 0:
print('Total loss: ', total_loss.item())
plt.imshow(im_convert(target))
plt.show()
# -
# ## Display the Target Image
# display content and final, target image
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(20, 10))
ax1.imshow(im_convert(content))
ax2.imshow(im_convert(target))
| style-transfer/.ipynb_checkpoints/Style_Transfer_Exercise-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import numpy as np
import sys
file_name="\测试数据.xlsx"
file_path=r"F:\OneDrive\华莱士\电商"
file=file_path+file_name
df = pd.read_excel(file,
usecols = "D,M,R:AD",
sheet_name= '订单明细')
df.head(10)
# +
# 设定保底和费率
设定保底=4.5
设定费率=15
# 测算保底和费率
测算保底=0 # 这个没保底的
测算费率=20
print(
"""
配送方式 费率 保底
美团快送 15.0% 4.5
美团专送 15.0% 4.5
到店取餐 5.0% 0.5
津贴联盟 -
代理商配送 16.0% -按原价
商家自配 16.0% -
只测算美团的
""")
df.dtypes
# +
# 版本0.21.0引入了infer_objects()方法,用于将具有对象数据类型的DataFrame的列转换为更具体的类型。
df['美团活动补贴']=df['美团活动补贴'].replace('-',0).astype(float)
df['用户支付配送费']=df['用户支付配送费'].replace('-',0).astype(float)
df['用户线上支付金额']=df['用户线上支付金额'].replace('-',0).astype(float)
try:
df['费率']=df['费率'].str.strip("%").astype(float) # Python如何将百分号的字符转成数字
except AttributeError as err:
print(err)
df['保底']=df['保底'].replace('-',0).astype(float) # Python强行转换格式
df美团待测算=df
df美团待测算.dtypes
# -
print("当前最大费率(代理商费率16,美团专送15):",df['费率'].max(),"当前最大保底:",df['保底'].max())
print("""
费率 计数
""",df美团待测算['费率'].value_counts())
# +
print("描述性统计,数据收集目标:保底订单计数,总订单计数,全部客单价、保底客单价、抽点客单价(筛选美团外卖)")
df美团待测算.loc[df美团待测算['平台服务费']== -df美团待测算['保底'],'是否保底']=True # 算保底
df美团待测算['是否保底']=df美团待测算['是否保底'].fillna(False)
df美团待测算.head(10)
# -
df描述性统计1=df美团待测算.groupby(['交易类型','配送方式','是否保底'])['平台服务费'].count().reset_index()
df描述性统计1['统计']='计数'
df描述性统计1['文件名']=file_name
df描述性统计2=df美团待测算.groupby(['交易类型','配送方式','是否保底'])['平台服务费'].mean().reset_index()
df描述性统计2['统计']='平均值'
df描述性统计2['文件名']=file_name
df描述性统计3=df美团待测算.groupby(['交易类型','配送方式','是否保底'])['平台服务费'].sum().reset_index()
df描述性统计3['统计']='求和'
df描述性统计3['文件名']=file_name
pd.concat([df描述性统计1,df描述性统计2,df描述性统计3],axis=0).to_csv(file_path + file_name + '输出' + '.csv')
# 服务费计算方法
# df美团专送.loc[df美团专送['校验']>=1] 这样会出来一堆代理商配送,这种代理商配送的,是原价*费率,我们一定要谈掉,太坑爹了
# df美团待测算['无保底服务费']=(df美团待测算['用户线上支付金额'] - df美团待测算['用户支付配送费'] + df美团待测算['美团活动补贴'])*设定费率/100
| jupyter_notebook/HLS-temp.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
df1 = pd.read_csv('data/Salary_Data.csv')
df1.head()
df1.shape
sns.scatterplot(data=df1,x='YearsExperience',y='Salary')
plt.show()
y = df1.iloc[:,1].values
y
x = df1.iloc[:,:1].values
x
# # Splitting data for Training and Testing
from sklearn.model_selection import train_test_split
X_train,X_test,Y_train,Y_test = train_test_split(x,y,train_size=0.7,random_state=0)
X_train.shape
X_test.shape
Y_train.shape
Y_test.shape
# # Creating a ML Model
from sklearn.linear_model import LinearRegression
regressor = LinearRegression()
regressor.fit(X_train,Y_train)
y_predict = regressor.predict(X_test)
y_predict
Y_test
# # Model Coefficients
regressor.intercept_
regressor.coef_
# # Equation of Line --> y = 9360.26* x + 26777.39
# # Model Evaluation
from sklearn import metrics
MAE = metrics.mean_absolute_error(Y_test,y_predict)
MAE
MSE = metrics.mean_squared_error(Y_test,y_predict)
MSE
RMSE = np.sqrt(MSE)
RMSE
R2 = metrics.r2_score(Y_test,y_predict)
R2
# # Hands on Task
df1 = pd.read_csv('data/auto-mpg.csv')
df1.head()
# # HandsOn - TASK
#
# ### 1. Create a Machine Learning Model for predicting Mpg from all the input features in the data.
#
# 1. EDA
# 2. Data Clearning & Pre-Processing
# 3. Build ML Model
# 4. Model Evaluation.
#
| MLDC MAY'21 - Day 3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
from math import pi
from scipy.special import hankel2, jv
# basic periodic scatter information
from novice_stakes.periodic_scatter import Bragg
# complete reflection coefficent calculation modules to check results
from novice_stakes.periodic_scatter import CosineRs, QuadRs
# +
# acoustic parameters
theta_inc = 35. * pi / 180
fc = 500. # monofrequency source
c = 1500. # sound speed, m/s
kc = 2 * pi * fc / c
# source and reciever parameters
xsrc = 0
zsrc = -10.
xrcr = 200.
zrcr = -20.
# +
# setup xaxis centered around receiver
decimation = 8 # integration lengths per wavelength
dx = fc / (8 * c)
ilength = 100000
# compute xaxis
numx = int(np.ceil(ilength / dx))
xaxis = np.arange(numx) * dx + (xrcr - ilength / 2)
# +
# Periodic surface formulation
# Sinusidal surface
H = 2.
L = 45.
K = 2 * pi / L
# Periodic length determines the Bragg scatter angles
numeva = 10
bragg = Bragg(L)
qvec = bragg.qvec(theta_inc, numeva, fc)
a0, aq, b0, bq = bragg.bragg_angles(theta_inc, qvec, fc)
# surface specifactions for one period
num_per = int(np.ceil(L / dx))
x_per = np.arange(num_per) * dx
# -
# Use far field approximation for hankel function for scatter pressure integral
ztest = 25.
hexact = hankel2(0,ztest)
happx = np.sqrt(2 / (pi * ztest)) * np.exp(-1j * (ztest - pi / 4))
np.abs(hexact - happx) / np.abs(hexact)
# +
# Assume no structure for source or surface
# recover the image source for a flat surface
dpinc_KA = (kc * np.sin(theta_inc) / 2) \
* np.exp(-1j * kc * (np.cos(theta_inc) * xaxis + np.sin(theta_inc) * np.abs(zsrc)))
rra = np.sqrt((xrcr - xaxis) ** 2 + zrcr ** 2)
gra = np.sqrt(2 / (pi * kc * rra)) * np.exp(-1j * (kc * rra - pi / 4))
# negative sign is consistant with other integrals that include hankel of 2nd kind
pKA = -np.sum(dpinc_KA * gra) * dx
pimg = -np.exp(-1j * kc * (np.cos(theta_inc) * xrcr + np.sin(theta_inc) * np.abs(zrcr + zsrc)))
np.abs(pKA - pimg) / np.abs(pimg)
# +
# Assume periodic source and surface, flat surface
# source term
projection = b0
KA_per = -2j * projection * np.exp(-1j * b0 * -zsrc)
# receiver term using grating greens function
gra = np.exp(-1j * (bq[:, None] * -zrcr + qvec[:, None] * K * (xrcr - x_per))) / bq[:, None]
gra = (1j / (2 * L)) * np.sum(gra, axis=0)
# surface integral for scattered pressure
p_sca_per = -np.exp(-1j * a0 * xrcr) * np.sum(KA_per * gra) * dx
np.abs(p_sca_per - pimg) / np.abs(pimg)
# +
# non-structured KA surface integral for a sinusoidal surface
eta = (H / 2) * np.cos(K * xaxis)
eta_p = -(H * K / 2) * np.sin(K * xaxis)
projection = np.dot(np.array([np.cos(theta_inc), np.sin(theta_inc)]), np.array([-eta_p, np.ones_like(xaxis)]))
dpinc_KA = (kc * projection / 2) \
* np.exp(-1j * kc * (np.cos(theta_inc) * xaxis + np.sin(theta_inc) * np.abs(eta - zsrc)))
rra = np.sqrt((xrcr - xaxis) ** 2 + (zrcr - eta) ** 2)
gra = np.sqrt(2 / (pi * kc * rra)) * np.exp(-1j * (kc * rra - pi / 4))
# negative sign is consistant with other integrals that include hankel of 2nd kind
pKA = -np.sum(dpinc_KA * gra) * dx
pKA
# +
# Integrate KA using periodic greens function, sinusoidal surface
eta = (H / 2) * np.cos(K * x_per)
eta_p = -(H * K / 2) * np.sin(K * x_per)
# source term
projection = np.dot(np.array([a0, b0]),
np.array([-eta_p, np.ones_like(x_per)]))
KA_per = -2j * projection * np.exp(-1j * b0 * (eta - zsrc))
# receiver term
phase = bq[:, None] * (eta - zrcr) + qvec[:, None] * K * (xrcr - x_per)
gra = np.exp(-1j * phase) / bq[:, None]
gra = (1j / (2 * L)) * np.sum(gra, axis=0)
# surface integral for scattered pressure
p_sca_per = -np.exp(-1j * a0 * xrcr) * np.sum(KA_per * gra) * dx
p_sca_per
# +
# Reflection coefficent formulation for scatter pressure
# source term
projection = np.dot(np.array([a0, b0]),
np.array([-eta_p, np.ones_like(x_per)]))
KA_per = -2j * projection * np.exp(-1j * b0 * eta)
# receiver term
gra = (1j / (2 * L)) * np.exp(-1j * (bq[:, None] * eta - qvec[:, None] * K * x_per)) / bq[:, None]
# integration for reflection coefficents
R_int = -np.sum(KA_per * gra, axis=1) * dx
p_sca_r = np.dot(R_int, np.exp(-1j * (-b0 * zsrc + aq * xrcr - bq * zrcr)))
np.abs(p_sca_r - p_sca_per)
# -
# Analytic integration for KA reflection coefficents specific to a sinusoidal surface
r_analytic = 1j ** qvec * jv(qvec, -H * (b0 + bq) / 2) \
* (a0 * qvec * K / (bq * (b0 + bq)) - b0 / bq)
np.max(np.abs(R_int - r_analytic))
# confirm agreement with module calculations
r_cos = CosineRs(H, L, c=c)
r_KA_ana = r_cos.ka(theta_inc, qvec, fc)
p_KA_ana = bragg.p_sca(theta_inc, qvec, fc, r_KA_ana, xsrc, zsrc, xrcr, zrcr)
np.abs(p_sca_r - p_KA_ana)
# confirm agreement with module calculations
r_quad = QuadRs(x_per, eta, eta_p, c=c)
r_KA_quad = r_quad.ka(theta_inc, qvec, fc)
p_KA_quad = bragg.p_sca(theta_inc, qvec, fc, r_KA_quad, xsrc, zsrc, xrcr, zrcr)
np.abs(p_sca_r - p_KA_quad)
np.max(np.abs(r_KA_ana - r_KA_quad))
| ipynb/ka_plane_wave_sin.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="80xnUmoI7fBX"
# ##### Copyright 2020 The TensorFlow Authors.
# + cellView="form" colab={} colab_type="code" id="8nvTnfs6Q692"
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + [markdown] colab_type="text" id="WmfcMK5P5C1G"
# # Introduction to the TensorFlow Models NLP library
# + [markdown] colab_type="text" id="cH-oJ8R6AHMK"
# <table class="tfo-notebook-buttons" align="left">
# <td>
# <a target="_blank" href="https://www.tensorflow.org/official_models/nlp/nlp_modeling_library_intro"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
# </td>
# <td>
# <a target="_blank" href="https://colab.research.google.com/github/tensorflow/models/blob/master/official/colab/nlp/nlp_modeling_library_intro.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
# </td>
# <td>
# <a target="_blank" href="https://github.com/tensorflow/models/blob/master/official/colab/nlp/nlp_modeling_library_intro.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
# </td>
# <td>
# <a href="https://storage.googleapis.com/tensorflow_docs/models/official/colab/nlp/nlp_modeling_library_intro.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
# </td>
# </table>
# + [markdown] colab_type="text" id="0H_EFIhq4-MJ"
# ## Learning objectives
#
# In this Colab notebook, you will learn how to build transformer-based models for common NLP tasks including pretraining, span labelling and classification using the building blocks from [NLP modeling library](https://github.com/tensorflow/models/tree/master/official/nlp/modeling).
# + [markdown] colab_type="text" id="2N97-dps_nUk"
# ## Install and import
# + [markdown] colab_type="text" id="459ygAVl_rg0"
# ### Install the TensorFlow Model Garden pip package
#
# * `tf-models-official` is the stable Model Garden package. Note that it may not include the latest changes in the `tensorflow_models` github repo. To include latest changes, you may install `tf-models-nightly`,
# which is the nightly Model Garden package created daily automatically.
# * `pip` will install all models and dependencies automatically.
# + colab={} colab_type="code" id="Y-qGkdh6_sZc"
# !pip install -q tf-models-official==2.3.0
# + [markdown] colab_type="text" id="e4huSSwyAG_5"
# ### Import Tensorflow and other libraries
# + colab={} colab_type="code" id="jqYXqtjBAJd9"
import numpy as np
import tensorflow as tf
from official.nlp import modeling
from official.nlp.modeling import layers, losses, models, networks
# + [markdown] colab_type="text" id="djBQWjvy-60Y"
# ## BERT pretraining model
#
# BERT ([Pre-training of Deep Bidirectional Transformers for Language Understanding](https://arxiv.org/abs/1810.04805)) introduced the method of pre-training language representations on a large text corpus and then using that model for downstream NLP tasks.
#
# In this section, we will learn how to build a model to pretrain BERT on the masked language modeling task and next sentence prediction task. For simplicity, we only show the minimum example and use dummy data.
# + [markdown] colab_type="text" id="MKuHVlsCHmiq"
# ### Build a `BertPretrainer` model wrapping `TransformerEncoder`
#
# The [TransformerEncoder](https://github.com/tensorflow/models/blob/master/official/nlp/modeling/networks/transformer_encoder.py) implements the Transformer-based encoder as described in [BERT paper](https://arxiv.org/abs/1810.04805). It includes the embedding lookups and transformer layers, but not the masked language model or classification task networks.
#
# The [BertPretrainer](https://github.com/tensorflow/models/blob/master/official/nlp/modeling/models/bert_pretrainer.py) allows a user to pass in a transformer stack, and instantiates the masked language model and classification networks that are used to create the training objectives.
# + colab={} colab_type="code" id="EXkcXz-9BwB3"
# Build a small transformer network.
vocab_size = 100
sequence_length = 16
network = modeling.networks.TransformerEncoder(
vocab_size=vocab_size, num_layers=2, sequence_length=16)
# + [markdown] colab_type="text" id="0NH5irV5KTMS"
# Inspecting the encoder, we see it contains few embedding layers, stacked `Transformer` layers and are connected to three input layers:
#
# `input_word_ids`, `input_type_ids` and `input_mask`.
#
# + colab={} colab_type="code" id="lZNoZkBrIoff"
tf.keras.utils.plot_model(network, show_shapes=True, dpi=48)
# + colab={} colab_type="code" id="o7eFOZXiIl-b"
# Create a BERT pretrainer with the created network.
num_token_predictions = 8
bert_pretrainer = modeling.models.BertPretrainer(
network, num_classes=2, num_token_predictions=num_token_predictions, output='predictions')
# + [markdown] colab_type="text" id="d5h5HT7gNHx_"
# Inspecting the `bert_pretrainer`, we see it wraps the `encoder` with additional `MaskedLM` and `Classification` heads.
# + colab={} colab_type="code" id="2tcNfm03IBF7"
tf.keras.utils.plot_model(bert_pretrainer, show_shapes=True, dpi=48)
# + colab={} colab_type="code" id="F2oHrXGUIS0M"
# We can feed some dummy data to get masked language model and sentence output.
batch_size = 2
word_id_data = np.random.randint(vocab_size, size=(batch_size, sequence_length))
mask_data = np.random.randint(2, size=(batch_size, sequence_length))
type_id_data = np.random.randint(2, size=(batch_size, sequence_length))
masked_lm_positions_data = np.random.randint(2, size=(batch_size, num_token_predictions))
outputs = bert_pretrainer(
[word_id_data, mask_data, type_id_data, masked_lm_positions_data])
lm_output = outputs["masked_lm"]
sentence_output = outputs["classification"]
print(lm_output)
print(sentence_output)
# + [markdown] colab_type="text" id="bnx3UCHniCS5"
# ### Compute loss
# Next, we can use `lm_output` and `sentence_output` to compute `loss`.
# + colab={} colab_type="code" id="k30H4Q86f52x"
masked_lm_ids_data = np.random.randint(vocab_size, size=(batch_size, num_token_predictions))
masked_lm_weights_data = np.random.randint(2, size=(batch_size, num_token_predictions))
next_sentence_labels_data = np.random.randint(2, size=(batch_size))
mlm_loss = modeling.losses.weighted_sparse_categorical_crossentropy_loss(
labels=masked_lm_ids_data,
predictions=lm_output,
weights=masked_lm_weights_data)
sentence_loss = modeling.losses.weighted_sparse_categorical_crossentropy_loss(
labels=next_sentence_labels_data,
predictions=sentence_output)
loss = mlm_loss + sentence_loss
print(loss)
# + [markdown] colab_type="text" id="wrmSs8GjHxVw"
# With the loss, you can optimize the model.
# After training, we can save the weights of TransformerEncoder for the downstream fine-tuning tasks. Please see [run_pretraining.py](https://github.com/tensorflow/models/blob/master/official/nlp/bert/run_pretraining.py) for the full example.
#
#
# + [markdown] colab_type="text" id="k8cQVFvBCV4s"
# ## Span labeling model
#
# Span labeling is the task to assign labels to a span of the text, for example, label a span of text as the answer of a given question.
#
# In this section, we will learn how to build a span labeling model. Again, we use dummy data for simplicity.
# + [markdown] colab_type="text" id="xrLLEWpfknUW"
# ### Build a BertSpanLabeler wrapping TransformerEncoder
#
# [BertSpanLabeler](https://github.com/tensorflow/models/blob/master/official/nlp/modeling/models/bert_span_labeler.py) implements a simple single-span start-end predictor (that is, a model that predicts two values: a start token index and an end token index), suitable for SQuAD-style tasks.
#
# Note that `BertSpanLabeler` wraps a `TransformerEncoder`, the weights of which can be restored from the above pretraining model.
#
# + colab={} colab_type="code" id="B941M4iUCejO"
network = modeling.networks.TransformerEncoder(
vocab_size=vocab_size, num_layers=2, sequence_length=sequence_length)
# Create a BERT trainer with the created network.
bert_span_labeler = modeling.models.BertSpanLabeler(network)
# + [markdown] colab_type="text" id="QpB9pgj4PpMg"
# Inspecting the `bert_span_labeler`, we see it wraps the encoder with additional `SpanLabeling` that outputs `start_position` and `end_postion`.
# + colab={} colab_type="code" id="RbqRNJCLJu4H"
tf.keras.utils.plot_model(bert_span_labeler, show_shapes=True, dpi=48)
# + colab={} colab_type="code" id="fUf1vRxZJwio"
# Create a set of 2-dimensional data tensors to feed into the model.
word_id_data = np.random.randint(vocab_size, size=(batch_size, sequence_length))
mask_data = np.random.randint(2, size=(batch_size, sequence_length))
type_id_data = np.random.randint(2, size=(batch_size, sequence_length))
# Feed the data to the model.
start_logits, end_logits = bert_span_labeler([word_id_data, mask_data, type_id_data])
print(start_logits)
print(end_logits)
# + [markdown] colab_type="text" id="WqhgQaN1lt-G"
# ### Compute loss
# With `start_logits` and `end_logits`, we can compute loss:
# + colab={} colab_type="code" id="waqs6azNl3Nn"
start_positions = np.random.randint(sequence_length, size=(batch_size))
end_positions = np.random.randint(sequence_length, size=(batch_size))
start_loss = tf.keras.losses.sparse_categorical_crossentropy(
start_positions, start_logits, from_logits=True)
end_loss = tf.keras.losses.sparse_categorical_crossentropy(
end_positions, end_logits, from_logits=True)
total_loss = (tf.reduce_mean(start_loss) + tf.reduce_mean(end_loss)) / 2
print(total_loss)
# + [markdown] colab_type="text" id="Zdf03YtZmd_d"
# With the `loss`, you can optimize the model. Please see [run_squad.py](https://github.com/tensorflow/models/blob/master/official/nlp/bert/run_squad.py) for the full example.
# + [markdown] colab_type="text" id="0A1XnGSTChg9"
# ## Classification model
#
# In the last section, we show how to build a text classification model.
#
# + [markdown] colab_type="text" id="MSK8OpZgnQa9"
# ### Build a BertClassifier model wrapping TransformerEncoder
#
# [BertClassifier](https://github.com/tensorflow/models/blob/master/official/nlp/modeling/models/bert_classifier.py) implements a [CLS] token classification model containing a single classification head.
# + colab={} colab_type="code" id="cXXCsffkCphk"
network = modeling.networks.TransformerEncoder(
vocab_size=vocab_size, num_layers=2, sequence_length=sequence_length)
# Create a BERT trainer with the created network.
num_classes = 2
bert_classifier = modeling.models.BertClassifier(
network, num_classes=num_classes)
# + [markdown] colab_type="text" id="8tZKueKYP4bB"
# Inspecting the `bert_classifier`, we see it wraps the `encoder` with additional `Classification` head.
# + colab={} colab_type="code" id="snlutm9ZJgEZ"
tf.keras.utils.plot_model(bert_classifier, show_shapes=True, dpi=48)
# + colab={} colab_type="code" id="yyHPHsqBJkCz"
# Create a set of 2-dimensional data tensors to feed into the model.
word_id_data = np.random.randint(vocab_size, size=(batch_size, sequence_length))
mask_data = np.random.randint(2, size=(batch_size, sequence_length))
type_id_data = np.random.randint(2, size=(batch_size, sequence_length))
# Feed the data to the model.
logits = bert_classifier([word_id_data, mask_data, type_id_data])
print(logits)
# + [markdown] colab_type="text" id="w--a2mg4nzKm"
# ### Compute loss
#
# With `logits`, we can compute `loss`:
# + colab={} colab_type="code" id="9X0S1DoFn_5Q"
labels = np.random.randint(num_classes, size=(batch_size))
loss = modeling.losses.weighted_sparse_categorical_crossentropy_loss(
labels=labels, predictions=tf.nn.log_softmax(logits, axis=-1))
print(loss)
# + [markdown] colab_type="text" id="mzBqOylZo3og"
# With the `loss`, you can optimize the model. Please see [run_classifier.py](https://github.com/tensorflow/models/blob/master/official/nlp/bert/run_classifier.py) or the colab [fine_tuning_bert.ipynb](https://github.com/tensorflow/models/blob/master/official/colab/fine_tuning_bert.ipynb) for the full example.
| research/object_detection/nlp_modeling_library_intro.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python (reco_gpu)
# language: python
# name: reco_gpu
# ---
# <i>Copyright (c) Microsoft Corporation. All rights reserved.</i>
#
# <i>Licensed under the MIT License.</i>
# ## FastAI Recommender
#
# This notebook shows how to use the [FastAI](https://fast.ai) recommender which is using [Pytorch](https://pytorch.org/) under the hood.
# +
# set the environment path to find Recommenders
import sys
sys.path.append("../../")
import time
import os
import itertools
import pandas as pd
import papermill as pm
import torch, fastai
from fastai.collab import *
from fastai.tabular import *
from reco_utils.dataset import movielens
from reco_utils.dataset.python_splitters import python_random_split
from reco_utils.evaluation.python_evaluation import map_at_k, ndcg_at_k, precision_at_k, recall_at_k
from reco_utils.evaluation.python_evaluation import rmse, mae, rsquared, exp_var
print("System version: {}".format(sys.version))
print("Pandas version: {}".format(pd.__version__))
print("Fast AI version: {}".format(fastai.__version__))
print("Torch version: {}".format(torch.__version__))
print("Cuda Available: {}".format(torch.cuda.is_available()))
print("CuDNN Enabled: {}".format(torch.backends.cudnn.enabled))
# -
# Defining some constants to refer to the different columns of our dataset.
USER, ITEM, RATING, TIMESTAMP, PREDICTION, TITLE = 'UserId', 'MovieId', 'Rating', 'Timestamp', 'Prediction', 'Title'
# + tags=["parameters"]
# top k items to recommend
TOP_K = 10
# Select Movielens data size: 100k, 1m, 10m, or 20m
MOVIELENS_DATA_SIZE = '100k'
# Model parameters
N_FACTORS = 40
EPOCHS = 5
# +
ratings_df = movielens.load_pandas_df(
size=MOVIELENS_DATA_SIZE,
header=[USER,ITEM,RATING,TIMESTAMP]
)
ratings_df.head()
# make sure the IDs are loaded as strings to better prevent confusion with embedding ids
ratings_df[USER] = ratings_df[USER].astype('str')
ratings_df[ITEM] = ratings_df[ITEM].astype('str')
# -
train_valid_df, test_df = python_random_split(ratings_df, ratio=[0.75, 0.25])
# ## Training
# fix random seeds to make sure our runs are reproducible
np.random.seed(101)
torch.manual_seed(101)
torch.cuda.manual_seed_all(101)
# +
start_time = time.time()
data = CollabDataBunch.from_df(train_valid_df, user_name=USER, item_name=ITEM, rating_name=RATING)
preprocess_time = time.time() - start_time
# -
data.show_batch()
# Now we will create a `collab_learner` for the data, which by default uses the [EmbeddingDotBias](https://docs.fast.ai/collab.html#EmbeddingDotBias) model. We will be using 40 latent factors. This will create an embedding for the users and the items that will map each of these to 40 floats as can be seen below. Note that the embedding parameters are not predefined, but are learned by the model.
#
# Although ratings can only range from 1-5, we are setting the range of possible ratings to a range from 0 to 5.5 -- that will allow the model to predict values around 1 and 5, which improves accuracy. Lastly, we set a value for weight-decay for regularization.
learn = collab_learner(data, n_factors=N_FACTORS, y_range=[0,5.5], wd=1e-1)
learn.model
# Now train the model for 5 epochs setting the maximal learning rate. The learner will reduce the learning rate with each epoch using cosine annealing.
# +
start_time = time.time()
learn.fit_one_cycle(EPOCHS, max_lr=5e-3)
train_time = time.time() - start_time + preprocess_time
print("Took {} seconds for training.".format(train_time))
# -
# Save the learner so it can be loaded back later for inferencing / generating recommendations
learn.export('movielens_model.pkl')
# ## Generating Recommendations
#
# Define two helper functions
# +
def cartesian_product(*arrays):
la = len(arrays)
dtype = np.result_type(*arrays)
arr = np.empty([len(a) for a in arrays] + [la], dtype=dtype)
for i, a in enumerate(np.ix_(*arrays)):
arr[...,i] = a
return arr.reshape(-1, la)
def score(learner, test_df, user_col, item_col, prediction_col, top_k=0):
"""score all users+movies provided and reduce to top_k items per user if top_k>0"""
# replace values not known to the model with #na#
total_users, total_items = learner.data.classes.values()
test_df.loc[~test_df[user_col].isin(total_users),user_col] = total_users[0]
test_df.loc[~test_df[item_col].isin(total_items),item_col] = total_items[0]
# map ids to embedding ids
u = learner.get_idx(test_df[user_col], is_item=False)
m = learner.get_idx(test_df[item_col], is_item=True)
# score the pytorch model
pred = learner.model.forward(u, m)
scores = pd.DataFrame({user_col: test_df[user_col], item_col:test_df[item_col], prediction_col:pred})
scores = scores.sort_values([user_col,prediction_col],ascending=[True,False])
if top_k > 0:
top_scores = scores.groupby(user_col).head(top_k).reset_index(drop=True)
else:
top_scores = scores
return top_scores
# -
# Load the learner from disk.
learner = load_learner(path=Path('.'),
fname='movielens_model.pkl')
# Get all users and items that the model knows
total_users, total_items = learner.data.classes.values()
total_items = np.array(total_items[1:])
total_users = np.array(total_users[1:])
# Get all users from the test set and remove any users that were know in the training set
test_users = test_df[USER].unique()
test_users = np.intersect1d(test_users, total_users)
# Build the cartesian product of test set users and all items known to the model
users_items = cartesian_product(np.array(test_users),np.array(total_items))
users_items = pd.DataFrame(users_items, columns=[USER,ITEM])
#
# Lastly, remove the user/items combinations that are in the training set -- we don't want to propose a movie that the user has already watched.
training_removed = pd.concat([users_items, train_valid_df[[USER,ITEM]], train_valid_df[[USER,ITEM]]]).drop_duplicates(keep=False)
# ### Score the model to find the top K recommendation
# +
start_time = time.time()
top_k_scores = score(learner, test_df=training_removed,
user_col=USER, item_col=ITEM, prediction_col=PREDICTION, top_k=TOP_K)
test_time = time.time() - start_time
print("Took {} seconds for {} predictions.".format(test_time, len(training_removed)))
# -
# Calculate some metrics for our model
eval_map = map_at_k(test_df, top_k_scores, col_user=USER, col_item=ITEM,
col_rating=RATING, col_prediction=PREDICTION,
relevancy_method="top_k", k=TOP_K)
eval_ndcg = ndcg_at_k(test_df, top_k_scores, col_user=USER, col_item=ITEM,
col_rating=RATING, col_prediction=PREDICTION,
relevancy_method="top_k", k=TOP_K)
eval_precision = precision_at_k(test_df, top_k_scores, col_user=USER, col_item=ITEM,
col_rating=RATING, col_prediction=PREDICTION,
relevancy_method="top_k", k=TOP_K)
eval_recall = recall_at_k(test_df, top_k_scores, col_user=USER, col_item=ITEM,
col_rating=RATING, col_prediction=PREDICTION,
relevancy_method="top_k", k=TOP_K)
print("Model:\t" + learn.__class__.__name__,
"Top K:\t%d" % TOP_K,
"MAP:\t%f" % eval_map,
"NDCG:\t%f" % eval_ndcg,
"Precision@K:\t%f" % eval_precision,
"Recall@K:\t%f" % eval_recall, sep='\n')
# The above numbers are lower than [SAR](../sar_single_node_movielens.ipynb), but expected, since the model is explicitly trying to generalize the users and items to the latent factors. Next look at how well the model predicts how the user would rate the movie. Need to score `test_df`, but this time don't ask for top_k.
scores = score(learner, test_df=test_df,
user_col=USER, item_col=ITEM, prediction_col=PREDICTION)
# Now calculate some regression metrics
# +
eval_r2 = rsquared(test_df, scores, col_user=USER, col_item=ITEM, col_rating=RATING, col_prediction=PREDICTION)
eval_rmse = rmse(test_df, scores, col_user=USER, col_item=ITEM, col_rating=RATING, col_prediction=PREDICTION)
eval_mae = mae(test_df, scores, col_user=USER, col_item=ITEM, col_rating=RATING, col_prediction=PREDICTION)
eval_exp_var = exp_var(test_df, scores, col_user=USER, col_item=ITEM, col_rating=RATING, col_prediction=PREDICTION)
print("Model:\t" + learn.__class__.__name__,
"RMSE:\t%f" % eval_rmse,
"MAE:\t%f" % eval_mae,
"Explained variance:\t%f" % eval_exp_var,
"R squared:\t%f" % eval_r2, sep='\n')
# -
# That RMSE is actually quite good when compared to these benchmarks: https://www.librec.net/release/v1.3/example.html
# Record results with papermill for tests
pm.record("map", eval_map)
pm.record("ndcg", eval_ndcg)
pm.record("precision", eval_precision)
pm.record("recall", eval_recall)
pm.record("rmse", eval_rmse)
pm.record("mae", eval_mae)
pm.record("exp_var", eval_exp_var)
pm.record("rsquared", eval_r2)
pm.record("train_time", train_time)
pm.record("test_time", test_time)
| notebooks/00_quick_start/fastai_movielens.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import numba as nb
import math
import matplotlib.pyplot as plt
import pandas as pd
import time
from numba import njit, prange
from os import listdir
from os.path import isfile, join
# %matplotlib inline
plt.style.use(['science','ieee'])
# -
# # IMPORT THE TRAINING DATA
for i in range(0,24):
if i < 12:
df = pd.read_csv('../data/training_data/' + str(i) + '.csv')
else:
df = pd.read_csv('../data/training_data/s' + str(i-12) + '.csv')
throttle = df.throttle
brake = df.brake
exec("t{} = np.array(df.t).astype('float64')".format(i))
exec("u{} = np.array([throttle, brake]).T.astype('float64')".format(i))
exec("v{} = np.array(df.v).astype('float64')".format(i))
# # MAKE FUNCTION
# + [markdown] heading_collapsed=true
# ## Generate population
# + hidden=true
def generate_population(num, dim, rng):
"""
Generate flowers:
Input:
num: number of flowers (integer)
dim: number of parameters (integer)
rng: range number used in initialization (list or numpy array)
Output:
flws: initial position of the flowers (numpy array)
"""
flws = np.zeros((num,dim))
for i in range(dim):
lim = rng[i]
flws[:, i] = np.random.uniform(lim[0], lim[1], size=num)
return flws
# + [markdown] heading_collapsed=true
# ## Constraint
# + hidden=true
ndim = 17
@njit
def admissible(param):
a1, a2, a3, b1, b2, b3, b4, c1, c2, c3, c4, td11, td12, td13, td21, td22, td23 = param
if a1 > 0. or a2 > 0. or a3 > 0. or b1 < 0. or b2 < 0. or c1 > 0. or c2 > 0. or np.around(td11)<0 or np.around(td12)<0 or np.around(td13)<0 or np.around(td21)<0 or np.around(td22)<0 or np.around(td23)<0:
return False
else:
return True
_ = admissible(np.random.randn(ndim))
# + [markdown] heading_collapsed=true
# ## Forward propagation
# -
@njit
def delayed_control_signal(i, u, u_list, td):
if i < td:
ut = 0.0
else:
if td == 0:
ut = u
else:
ut = u_list[i-td]
return ut
_ = delayed_control_signal(1, 0.1, np.array([0.1, 0.2]), 0)
# + hidden=true
@njit
def forward_propagation(t, u, param):
a1, a2, a3, b1, b2, b3, b4, c1, c2, c3, c4, td11, td12, td13, td21, td22, td23 = param
td11 = int(np.around(td11))
td12 = int(np.around(td12))
td13 = int(np.around(td13))
td21 = int(np.around(td21))
td22 = int(np.around(td22))
td23 = int(np.around(td23))
u1_list = np.empty(t.shape)
u2_list = np.empty(t.shape)
out = np.empty(t.shape)
y = 0.0
for i in range(t.shape[0]):
if i == 0:
dt = t[1] - t[0]
else:
dt = t[i] - t[i-1]
u1, u2 = u[i]
u11t = delayed_control_signal(i, u1, u1_list, td11)
u12t = delayed_control_signal(i, u1, u1_list, td12)
u13t = delayed_control_signal(i, u1, u1_list, td13)
u21t = delayed_control_signal(i, u2, u2_list, td21)
u22t = delayed_control_signal(i, u2, u2_list, td22)
u23t = delayed_control_signal(i, u2, u2_list, td23)
temp = 0.
if y != 0.:
temp = a1
y_dot = temp + a2 * y + a3 * y**2 \
+ b1 * u11t + b2 * np.exp(b3 * y + b4 * u12t) * u13t \
+ c1 * u21t + c2 * np.exp(c3 * y + c4 * u22t) * u23t
y += y_dot * dt
if y < 0.0:
y = 0.0
u1_list[i] = u1
u2_list[i] = u2
out[i] = y
return out
_ = forward_propagation(np.arange(10, dtype=float), np.random.randn(10,2), np.ones(ndim))
# + [markdown] heading_collapsed=true
# ## Cost and metric
# + hidden=true
@njit
def cost(t, u, v, param):
error = (v - forward_propagation(t, u, param))
cost = np.dot(error.T, error)
return cost
_ = cost(np.arange(10, dtype=float), np.random.randn(10,2), np.random.randn(10), np.ones(ndim))
@njit
def accuracy(t, u, v, param):
error = v - forward_propagation(t, u, param)
numerator = np.linalg.norm(error)
denumerator = np.linalg.norm(v - np.mean(v))
acc = 1.0 - numerator / denumerator
return acc
_ = accuracy(np.arange(10, dtype=float), np.random.randn(10,2), np.random.randn(10), np.ones(ndim))
@njit
def mean_absolute_error(t, u, v, param):
error = v - forward_propagation(t, u, param)
out = np.mean(np.abs(error))
return out
_ = mean_absolute_error(np.arange(10, dtype=float), np.random.randn(10,2), np.random.randn(10), np.ones(ndim))
@njit
def mean_squared_error(t, u, v, param):
error = v - forward_propagation(t, u, param)
out = np.mean(error**2)
return out
_ = mean_squared_error(np.arange(10, dtype=float), np.random.randn(10,2), np.random.randn(10), np.ones(ndim))
@njit
def maximum_absolute_error(t, u, v, param):
error = v - forward_propagation(t, u, param)
out = np.max(np.abs(error))
return out
_ = maximum_absolute_error(np.arange(10, dtype=float), np.random.randn(10,2), np.random.randn(10), np.ones(ndim))
# -
for i in range(0,24):
if i < 12:
df = pd.read_csv('../data/training_data/' + str(i) + '.csv')
else:
df = pd.read_csv('../data/training_data/s' + str(i-12) + '.csv')
throttle = df.throttle
brake = df.brake
exec("t{} = np.array(df.t).astype('float64')".format(i))
exec("u{} = np.array([throttle, brake]).T.astype('float64')".format(i))
exec("v{} = np.array(df.v).astype('float64')".format(i))
# + hidden=true
gain = np.array([1.0, 1.0, 1.0,
1.0, 1.0, 1.0,
1.0, 1.0, 1.0,
1.0, 1.0, 1.0,
1.0, 1.0, 1.0,
1.0, 1.0, 1.0,
1.0, 1.0, 1.0,
1.0, 1.0, 1.0])
@njit
def calculate_total_cost(param):
loss = np.Inf
if admissible(param):
loss = 0.
m = 0.
loss += cost(t0, u0, v0, param) * gain[0]
loss += cost(t1, u1, v1, param) * gain[1]
loss += cost(t2, u2, v2, param) * gain[2]
loss += cost(t3, u3, v3, param) * gain[3]
loss += cost(t4, u4, v4, param) * gain[4]
loss += cost(t5, u5, v5, param) * gain[5]
loss += cost(t6, u6, v6, param) * gain[6]
loss += cost(t7, u7, v7, param) * gain[7]
loss += cost(t8, u8, v8, param) * gain[8]
loss += cost(t9, u9, v9, param) * gain[9]
loss += cost(t10, u10, v10, param) * gain[10]
loss += cost(t11, u11, v11, param) * gain[11]
loss += cost(t12, u12, v12, param) * gain[12]
loss += cost(t13, u13, v13, param) * gain[13]
loss += cost(t14, u14, v14, param) * gain[14]
loss += cost(t15, u15, v15, param) * gain[15]
loss += cost(t16, u16, v16, param) * gain[16]
loss += cost(t17, u17, v17, param) * gain[17]
loss += cost(t18, u18, v18, param) * gain[18]
loss += cost(t19, u19, v19, param) * gain[19]
loss += cost(t20, u20, v20, param) * gain[20]
loss += cost(t21, u21, v21, param) * gain[21]
loss += cost(t22, u22, v22, param) * gain[22]
loss += cost(t23, u23, v23, param) * gain[23]
m += t0.shape[0] * gain[0]
m += t1.shape[0] * gain[1]
m += t2.shape[0] * gain[2]
m += t3.shape[0] * gain[3]
m += t4.shape[0] * gain[4]
m += t5.shape[0] * gain[5]
m += t6.shape[0] * gain[6]
m += t7.shape[0] * gain[7]
m += t8.shape[0] * gain[8]
m += t9.shape[0] * gain[9]
m += t10.shape[0] * gain[10]
m += t11.shape[0] * gain[11]
m += t12.shape[0] * gain[12]
m += t13.shape[0] * gain[13]
m += t14.shape[0] * gain[14]
m += t15.shape[0] * gain[15]
m += t16.shape[0] * gain[16]
m += t17.shape[0] * gain[17]
m += t18.shape[0] * gain[18]
m += t19.shape[0] * gain[19]
m += t20.shape[0] * gain[20]
m += t21.shape[0] * gain[21]
m += t22.shape[0] * gain[22]
m += t23.shape[0] * gain[23]
loss = loss / m
return loss
_ = calculate_total_cost(np.ones(ndim))
# + hidden=true
@njit(parallel=True)
def flowers_cost(flowers):
length = flowers.shape[0]
losses = np.empty(length)
for ii in prange(length):
losses[ii] = calculate_total_cost(flowers[ii])
return losses
_ = flowers_cost(np.ones((2,ndim)))
# + [markdown] heading_collapsed=true
# ## Pollination
# + hidden=true
@njit(parallel = True)
def fpa(flowers, loss_flowers, global_, global_loss_, p, alpha, gamma, var, s0):
num = flowers.shape[0]
dim = flowers.shape[1]
out = np.empty(flowers.shape)
temp = np.empty(dim)
loss = np.empty(loss_flowers.shape)
temp_loss = 0.
random_number = np.random.uniform(0., 1., num)
for i in prange(num):
# GLOBAL POLLINATION
if random_number[i] < p:
# Generate Levy Flight
upper = np.random.normal(0., np.sqrt(var), size=dim)
lower = np.abs(np.random.normal(0., 1., size=dim))**(1./alpha)
L = np.divide(upper, lower)
temp = flowers[i] + gamma * L * (global_ - flowers[i])
# LOCAL POLLINATION
else:
while(True):
i1, i2 = np.random.randint(0, num, size=2)
if i1==i2 or i1==i or i2==i:
if num <= 5: # For breaking the loop
None
else:
continue
break
epsilon = np.random.uniform(0.,1.)
temp = flowers[i] + epsilon * (flowers[i1] - flowers[i2])
# CALCULATE COST
temp_loss = calculate_total_cost(temp)
if np.isnan(temp_loss):
temp_loss = np.Inf
# UPDATE
if temp_loss < loss_flowers[i]:
out[i] = temp
loss[i] = temp_loss
else:
out[i] = flowers[i]
loss[i] = loss_flowers[i]
min_idx = np.argmin(loss)
min_loss = loss[min_idx]
if global_loss_ > min_loss:
global_loss_new = min_loss
global_new = out[min_idx, :]
else:
global_new = global_
global_loss_new = global_loss_
return out, loss, global_new, global_loss_new
xx1 = np.ones((2, ndim))
xx2 = np.ones(2)
xx3 = np.random.randn(ndim)
_ = fpa(xx1, xx2, xx3, 100.0, 0.8, 1.5, 0.1, 0.69, 0.1)
# -
# # SIMULATION (OPTIMIZATION)
# +
num = 50
n_sim = 20
n_itr = 10000
r_a1 = [0., -2.]
r_a2 = [0., -2.]
r_a3 = [0., -2.]
r_b1 = [0., 2.]
r_b2 = [0., 2.]
r_b3 = [-2., 2.]
r_b4 = [-2., 2.]
r_c1 = [0., -2.]
r_c2 = [0., -2.]
r_c3 = [-2., 2.]
r_c4 = [-2., 2.]
r_td11 = [0, 15]
r_td12 = [0, 15]
r_td13 = [0, 15]
r_td21 = [0, 6]
r_td22 = [0, 6]
r_td23 = [0, 6]
rng = [r_a1, r_a2, r_a3,
r_b1, r_b2, r_b3, r_b4,
r_c1, r_c2, r_c3, r_c4,
r_td11, r_td12, r_td13,
r_td21, r_td22, r_td23
]
dim = len(rng)
s0 = 0.1
p_threshold = 0.8
alpha = 1.5
gamma = 0.1
var = (math.gamma(1+alpha)/alpha/math.gamma((1+alpha)/2) * np.sin(np.pi * alpha/2)/2**((alpha-1)/2))**(1/alpha)
# +
# param_history = np.zeros((n_sim, dim))
loss_history = np.ones(n_sim) * np.Inf
the_best_param_history = np.zeros((n_itr, dim))
the_best_loss_history = np.zeros(n_itr)
for j in range(n_sim):
flowers = generate_population(num, dim, rng)
global_ = None
global_loss_ = np.Inf
loss_flowers = flowers_cost(flowers)
loss_flowers[np.isnan(loss_flowers)] = np.Inf
min_idx = np.argmin(loss_flowers)
min_loss = loss_flowers[min_idx]
if global_loss_ > min_loss:
global_loss_ = min_loss
global_ = flowers[min_idx, :]
global_history = np.empty((n_itr, dim))
global_history[0] = global_
global_loss_history = np.empty(n_itr)
global_loss_history[0] = global_loss_
for i in range(1, n_itr):
# Flower Pollination Algorithm
flowers, loss_flowers, global_, global_loss_ = fpa(flowers, loss_flowers, global_, global_loss_, p_threshold, alpha, gamma, var, s0)
if (i-1) % 500 == 0:
print('simulation: {} || iteration: {} || global_loss: {:.5f}'.format(j+1, i, global_loss_))
global_history[i] = global_
global_loss_history[i] = global_loss_
if np.min(loss_history) > global_loss_history[-1]:
the_best_loss_history = np.copy(global_loss_history)
the_best_param_history = np.copy(global_history)
param_history[j] = np.copy(global_history[-1])
loss_history[j] = np.copy(global_loss_history[-1])
print('simulation: {} || the best loss: {:.10f}'.format(j, the_best_loss_history[-1]))
# -
# Save the simulation
np.save('result/param_history.npy', param_history)
np.save('result/loss_history.npy', loss_history)
np.save('result/the_best_loss_history.npy', the_best_loss_history)
np.save('result/the_best_param_history.npy', the_best_param_history)
f = open("result/sim.cfg", "w+")
f.writelines('num: {} # The number of flowers\n'.format(num))
f.writelines('n_sim: {} # The number of simulation loop\n'.format(n_sim))
f.writelines('n_itr: {} # The number of iteration for each simulation\n'.format(n_itr))
f.writelines('\n# The boundary of the initialization value\n')
f.writelines('\tr_a1: {}\n'.format(r_a1))
f.writelines('\tr_a2: {}\n'.format(r_a2))
f.writelines('\tr_a3: {}\n'.format(r_a3))
f.writelines('\tr_b1: {}\n'.format(r_b1))
f.writelines('\tr_b2: {}\n'.format(r_b2))
f.writelines('\tr_b3: {}\n'.format(r_b3))
f.writelines('\tr_b4: {}\n'.format(r_b4))
f.writelines('\tr_c1: {}\n'.format(r_c1))
f.writelines('\tr_c2: {}\n'.format(r_c2))
f.writelines('\tr_c3: {}\n'.format(r_c3))
f.writelines('\tr_c4: {}\n'.format(r_c4))
f.writelines('\tr_td11: {}\n'.format(r_td11))
f.writelines('\tr_td12: {}\n'.format(r_td12))
f.writelines('\tr_td13: {}\n'.format(r_td13))
f.writelines('\tr_td21: {}\n'.format(r_td21))
f.writelines('\tr_td22: {}\n'.format(r_td22))
f.writelines('\tr_td23: {}\n'.format(r_td23))
f.writelines('\n# The gain of the dataset\n')
for i in range(gain.shape[0]):
f.writelines('\tdata-{}: {}\n'.format(i, gain[i]))
f.writelines('\n# The FPA hyperparameters\n')
f.writelines('\ts0: {}\n'.format(s0))
f.writelines('\tp_threshold: {}\n'.format(p_threshold))
f.writelines('\talpha: {}\n'.format(alpha))
f.writelines('\tgamma: {}\n'.format(gamma))
f.writelines('\tvar: {}\n'.format(var))
f.close()
# # RESULT
# ## Load Model
# Load the model
the_best_param_history = np.load('result/the_best_param_history.npy')
the_best_loss_history = np.load('result/the_best_loss_history.npy')
global_ = the_best_param_history[-1]
global_loss_ = the_best_loss_history[-1]
param_name = ['a1', 'a2', 'a3', 'b1', 'b2', 'b3', 'b4', 'c1', 'c2', 'c3', 'c4', 'd11', 'd12', 'd13', 'd21', 'd22', 'd23']
print('The minimum loss: {}'.format(global_loss_))
print('Parameters:')
for i in range(len(param_name)):
print('{}: {}'.format(param_name[i], global_[i]))
# ## Visualization
# Visualization
idx = 12
exec("t = np.copy(t{})".format(idx))
exec("u = np.copy(u{})".format(idx))
exec("v = np.copy(v{})".format(idx))
test = forward_propagation(t, u, global_)
plt.plot(t, v, label='ground-truth')
plt.plot(t, test, label='prediction')
plt.legend()
plt.show()
# ## Cost and Metrics
acc = []
mse = []
maae = []
for i in range(24):
exec("acc.append(accuracy(t{}, u{}, v{}, global_))".format(i, i, i))
exec("mse.append(mean_squared_error(t{}, u{}, v{}, global_))".format(i, i, i))
exec("maae.append(maximum_absolute_error(t{}, u{}, v{}, global_))".format(i, i, i))
print('Accuracy')
for i in range(24):
print('Data ke-{}: {:.2f}%'.format(i+1, acc[i]*100))
print('Mean Squared Error')
for i in range(24):
print('Data ke-{}: {:.5f}'.format(i+1, mse[i]))
print('Maximum Absolute Squared Error')
for i in range(24):
print('Data ke-{}: {:.3f}'.format(i+1, maae[i]))
num = 0
for i in range(24):
exec("num += t{}.shape[0]".format(i))
vv = np.empty(num)
vv_gt = np.empty(num)
nn = 0
for i in range(24):
exec("n = t{}.shape[0]".format(i))
exec("vv[nn: nn+n] = forward_propagation(t{}, u{}, global_)".format(i, i))
exec("vv_gt[nn: nn+n] = v{}".format(i))
nn += n
total_accuracy = (1 - np.linalg.norm(vv_gt - vv)/np.linalg.norm(vv_gt - np.mean(vv_gt)))
print('Total Accuracy: {:.2f}%'.format(total_accuracy*100))
| S2_System_Identification/A3_FPA/System Identification.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
import pandas as pd
# get the data from csv file.
df = pd.read_csv('train.csv', index_col=0)
X_test = pd.read_csv('test.csv', index_col=0)
# dicard unnecessary data
df = df[['Survived', 'Pclass', 'Sex', 'Age', 'SibSp', 'Parch']]
X_test = X_test[['Pclass', 'Sex', 'Age', 'SibSp', 'Parch']]
# +
# divide age into two categories, child and adult
df['Age'] = pd.cut(df['Age'], [0, 8, df['Age'].max()], labels=['child', 'adult'])
X_test['Age'] = pd.cut(X_test['Age'], [0, 8, X_test['Age'].max()], labels=['child', 'adult'])
X_test['Family'] = X_test['SibSp'] + X_test['Parch']
X_test = X_test.drop(['SibSp', 'Parch'], 1)
# -
# create a new feature 'family'
df['Family'] = df['SibSp'] + df['Parch']
df = df.drop(['SibSp', 'Parch'], 1)
# +
# change categories to numbers
df['Sex'] = df['Sex'].apply(lambda x: 1 if x == "male" else 0)
df['Family'] = df['Family'].apply(lambda x: 4 if x > 4 else x)
df['Age'] = df['Age'].apply(lambda x: 1 if x == "adult" else 0)
X_test['Sex'] = X_test['Sex'].apply(lambda x: 1 if x == "male" else 0)
X_test['Family'] = X_test['Family'].apply(lambda x: 4 if x > 4 else x)
X_test['Age'] = X_test['Age'].apply(lambda x: 1 if x == "adult" else 0)
# -
# obtain X_train and y_train from the dataframe.
X_train = df.drop('Survived', axis = 1)
y_train = df['Survived']
# +
# fill incomplete data in X_train
from sklearn.preprocessing import Imputer
imputer = Imputer(strategy='median', axis=1)
imputer = imputer.fit(X_train)
X_train = imputer.transform(X_train)
# fill incomplete data in X_test
imputer = Imputer(strategy='median', axis=1)
imputer = imputer.fit(X_test)
X_test = imputer.transform(X_test)
# -
from sklearn.preprocessing import PolynomialFeatures
poly = PolynomialFeatures(2)
X_train = poly.fit_transform(X_train)
X_test = poly.fit_transform(X_test)
# fit the model
from sklearn.linear_model import LogisticRegression
clf = LogisticRegression()
clf = clf.fit(X_train, y_train)
print("Training Accuracy:", clf.score(X_train, y_train))
# get the result
import numpy as np
result = pd.DataFrame(data=clf.predict(X_test), index = np.arange(892, 1310), columns=['Survived'])
# save the result to csv file
result.index.name = 'PassengerId'
result.to_csv('result.csv')
| Titanic Dataset (kaggle.com)/TitanicKaggle.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import covid_daily
covid_daily.__functions__
data = covid_daily.overview()
data.head()
print(data.head())
data = covid_daily.data(country='spain', chart='total-currently-infected-linear')
print(data.tail())
import matplotlib.pyplot as plt
data.plot()
# ---
AVAILABLE_CHARTS = [
'total-currently-infected-linear',
'deaths-cured-outcome-small',
'coronavirus-cases-linear',
'graph-cases-daily',
'graph-active-cases-total',
'coronavirus-deaths-linear',
'graph-deaths-daily',
'cases-cured-daily',
'deaths-cured-outcome'
]
from itertools import product
pairs = list(product((range(3)), (range(3))))
import warnings
warnings.filterwarnings('ignore')
# +
fig, axs = plt.subplots(3, 3, figsize=(20,15))
row = col = 0
for idx, chart in enumerate(AVAILABLE_CHARTS):
try:
data = covid_daily.data(country='spain', chart=chart)
data.plot(ax=axs[pairs[idx]], title=chart)
except:
pass
fig.tight_layout()
fig.show()
# plt.savefig('covid-daily-plot.png', dpi=300)
| scraper/package-test.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
import sys
sys.path.append('../../')
# Libraries
import numpy as np
from oqc.optimal_quantum_control import OptimalQuantumControl, Direct_Fidelity_Estimation, Expected_Value_Pulse, circuit2schedule
from qiskit import IBMQ, transpile
from qiskit.utils import QuantumInstance
from scipy.linalg import expm
# Load the backend
IBMQ.load_account()
provider = IBMQ.get_provider(group='open')
backend = provider.get_backend('ibmq_armonk')
# +
# Define initial parameters
initial_control_params = np.random.random(10)
time_derivative = 64
target_gate = np.array([[0,1], [1, 0]])
# Create the OptimalQuantumControl
control = OptimalQuantumControl(initial_control_params, backend, time_derivative, target_gate, False)
# Calculate the optimal parameters
optimal = control.control()
# -
optimal
schedule = control.grape_pulse(optimal)
schedule.draw()
Direct_Fidelity_Estimation( schedule, target_gate, 20, backend )
| src/tests/Optimizing NOT Gate experimentally.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/JUNTILLA-QUEENETHERESEJULIA/CPEN-21A-ECE-2-3/blob/main/Midterm_Exam.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="8Pk7U79HvHv8"
# #Midterm Exam
# + [markdown] id="4DjZk58CwaTh"
# ###Problem Statement 1
# + colab={"base_uri": "https://localhost:8080/"} id="7MM_985kweYx" outputId="7908d152-83e8-4eb4-fb82-60f91300655b"
a= "<NAME> <NAME>"
b= "202015865"
c= "19 Years Old"
d= "August 12,2002"
e= "BLK 2 LT 125 PH3 Mabuhay Homes 2000, Baranggay Paliparan II City of Dasmariñas, Cavite"
f= "Bachelor of Science in Electronics and Communications Engineering"
g= "1.65"
print ("Full Name: "+a)
print ("Student Number: "+b)
print ("Age: "+c)
print ("Birthday: "+d)
print ("Address: "+e)
print ("Course: "+f)
print ("Last Sem GWA: "+g)
# + [markdown] id="dZjR5LcjzbXF"
# ###Problem Statement 2
# + colab={"base_uri": "https://localhost:8080/"} id="dpWIvppEzfd5" outputId="2149fab3-404e-4a95-f72a-093522c83cca"
n=4
answ="Y"
print(2<n and n<6)
print(2<n or n==6)
print(not 2<n or n==6)
print(not n<6)
print(answ=="Y" or answ=="y")
print(answ=="Y" and answ=="y")
print(not answ=="y")
print((2<n) and n==5+1) or (answ=="no")
print(n==2 and n==7) or (answ=="Y")
print(n==2) and(n==7 or answ=="Y")
# + [markdown] id="Vqao8IGg4jku"
# ###Problem Statement 3
# + colab={"base_uri": "https://localhost:8080/"} id="yDVioJpB4nXq" outputId="78d754b5-3275-4c7f-bead-ef179aa56587"
x=2
y=-3
w=7
z= -10
print(x/y)
print(w/y/x)
print(z/y%x)
print(x%-y*w)
print(x%y)
print(z%w-y/x*5+5)
print(9-x%(2+y))
print(z//w)
print((2+y)**2)
print((w/x)*2)
| Midterm_Exam.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="IeWWbNXWjTNl"
# #Just for text
# + id="5UwbVHQ1_0Op"
import docx2txt
from bs4 import BeautifulSoup
text = docx2txt.process("Chartered Accountant.docx")
soup = BeautifulSoup(text)
# + id="wMbkUlTBLEiR" outputId="1769249c-c145-4345-9ef1-171a5e6c1c35" colab={"base_uri": "https://localhost:8080/", "height": 142}
soup.p.text
# + id="sdFcqrEtAZ9O"
text = text.split("\n")
text = [i.strip() for i in text]
# + id="VZ1w0KA-Aakc" outputId="1154577d-8b9b-4e09-8bac-d81a85690fcd" colab={"base_uri": "https://localhost:8080/", "height": 142}
text
# + id="TtyAxJvaIVbi"
text.remove("")
# + id="LpIpykD9IsRd" outputId="c4d50129-4ce0-4149-b74c-8e92ba0c96ce" colab={"base_uri": "https://localhost:8080/"}
text
# + id="MhwDOPIuAqnS" outputId="f8b91b54-7bd5-46a0-ad2f-4e2d85ff9de3" colab={"base_uri": "https://localhost:8080/"}
# !pip install docx
# !pip install exceptions
# + id="wTO4hMQwCoM7"
# print the list of the runs
# in a specified paragraph
print('\nList of runs objects in 1st paragraph:->>>')
print(doc.paragraphs[0].runs)
# print the text in a paragraph
print('\nText in the 1st paragraph:->>>')
print(doc.paragraphs[0].text)
# for printing the complete document
print('\nThe whole content of the document:->>>\n')
for para in doc.paragraphs:
print(para.text)
# + [markdown] id="rGrpag1jjYoF"
# #Best librarey ever for docx file
# + [markdown] id="JseBCKRfjdR1"
# ### It can extract text with heading style
# + id="DBHFFq7-ZtWW"
# !pip install docx
# !pip install python-docx
# + id="q9lQF2DpaTtz"
from docx import Document
# + id="4xye8RmcZtL7"
document = Document("test.docx")
# + id="0NPXFjHkZ01f" outputId="0fa83290-27f3-433d-f572-e82b38dcb22f" colab={"base_uri": "https://localhost:8080/"}
styles = document.styles
styles
# + id="t134LWRpaido" outputId="ac2beb21-303f-48a2-e8e1-f7d4e8b771d7" colab={"base_uri": "https://localhost:8080/"}
styles["Normal"]
# + id="fxmB8gKpbA6W" outputId="205040ce-00b7-476e-a1ba-d550300d960e" colab={"base_uri": "https://localhost:8080/"}
from docx.enum.style import WD_STYLE_TYPE
styles = document.styles
paragraph_styles = [s for s in styles if s.type == WD_STYLE_TYPE.PARAGRAPH]
for style in paragraph_styles:
print(style.name)
# + id="cH9GhcWsgzMM" outputId="9534e505-6655-40d2-b200-48d06d6cb38d" colab={"base_uri": "https://localhost:8080/", "height": 35}
document.paragraphs[0].style.name
# + id="Cc0IbtSXbJYT" outputId="c87de19b-f68a-4d86-bdd1-bc14647308fa" colab={"base_uri": "https://localhost:8080/"}
for i in document.paragraphs:
print(i.style.name + " : "+ i.text)
# + id="hD7QdM2vdKxV"
| docx_file_handeling_with_style.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/virajvaidya/ValueAtRiskModel/blob/main/ASX5.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="S5Rxc7GotW2S"
# !pip install yfinance
from scipy.stats import norm
import yfinance as yf
import pandas as pd
import numpy as np
import datetime as dt
import matplotlib.pyplot as plt
import requests
# + id="OigCcxkHtkE-"
apt = yf.Ticker("APT.AX") #AFTERPAY LTD
col = yf.Ticker("COL.AX") #COLES GROUP LTD
wow = yf.Ticker("WOW.AX") #WOOLWORTHS GROUP LTD
tls = yf.Ticker("TLS.AX") #TELSTRA CORPORATION LTD
art = yf.Ticker("ART.AX") #AIRTASKER LTD
# + [markdown] id="hBBfWZ8juSnq"
# #Stock Information
# + colab={"base_uri": "https://localhost:8080/"} id="bjZmZ1wVuUyE" outputId="da57a1b0-1ae3-41d5-8192-8d944babc9e7"
apt.info
# + id="Gi3IyqQQuW1G"
col.info
# + id="UxuOydyBuYAh"
wow.info
# + id="j9gMzO1CuZmw"
tls.info
# + id="4Bx-BM9yuauy"
art.info
# + [markdown] id="ZxHEGVzqud-O"
# #Cashflow
# + colab={"base_uri": "https://localhost:8080/", "height": 635} id="H5eTFecDuggi" outputId="5d06eddb-4aa7-470b-db83-f054659d075c"
apt.cashflow
# + id="DAh98oFJuiRu"
col.cashflow
# + id="pdJc63sVujvv"
wow.cashflow
# + id="y-upYt4hulFZ"
tls.cashflow
# + id="x12YKr46umlp"
art.cashflow
# + [markdown] id="NhwmvMqguwJx"
# #Balance Sheet
# + colab={"base_uri": "https://localhost:8080/", "height": 865} id="ZCMD17HTuxvy" outputId="104671c3-1aca-4857-a428-7b86569859a1"
apt.balance_sheet
# + id="uXtTez5Uu0rM"
col.balance_sheet
# + id="d2eXAGYuu17A"
wow.balance_sheet
# + id="QLAxWyRNu3QQ"
tls.balance_sheet
# + id="xiv46CZBu4ah"
art.balance_sheet
# + [markdown] id="bYicPzagvAll"
# #Data
# + id="HaTZQvjkvB9X"
aptdata=yf.download("APT.AX", period='max')
coldata=yf.download("COL.AX", period='max')
wowdata=yf.download("WOW.AX", period='max')
tlsdata=yf.download("TLS.AX", period='max')
artdata=yf.download("ART.AX", period='max')
# + [markdown] id="5XSDLrc7vW_H"
# #5 Year Stock Price Graphs
# + colab={"base_uri": "https://localhost:8080/", "height": 310} id="zYQK_tGSvWbO" outputId="b738de0a-32e4-4216-fad4-4451e759a909"
aptticker = yf.Ticker('APT.AX')
apt_df = aptticker.history(period="5y")
apt_df.rename(columns = {'Close':'APT Close'}, inplace = True)
apt_df['APT Close'].plot(title="Afterpay Stock Price")
# + colab={"base_uri": "https://localhost:8080/", "height": 310} id="LPfxZ-Rgva1R" outputId="ecb54548-cd80-4bbd-d093-c628a3c60ad6"
colticker = yf.Ticker('COL.AX')
col_df = colticker.history(period="5y")
col_df.rename(columns = {'Close':'COL Close'}, inplace = True)
col_df['COL Close'].plot(title="Coles Group Stock Price")
# + colab={"base_uri": "https://localhost:8080/", "height": 302} id="eD4y0bfhvbcc" outputId="64279581-c2c5-4af1-a234-4a9164d24c6e"
wowticker = yf.Ticker('WOW.AX')
wow_df = wowticker.history(period="5y")
wow_df.rename(columns = {'Close':'WOW Close'}, inplace = True)
wow_df['WOW Close'].plot(title="Woolworths Group Stock Price")
# + colab={"base_uri": "https://localhost:8080/", "height": 302} id="wO7ILbW2vcEd" outputId="5bd0e22a-6be6-405c-a947-cb7ec7116f93"
tlsticker = yf.Ticker('TLS.AX')
tls_df = tlsticker.history(period="5y")
tls_df.rename(columns = {'Close':'TLS Close'}, inplace = True)
tls_df['TLS Close'].plot(title="Telstra Corporation Stock Price")
# + colab={"base_uri": "https://localhost:8080/", "height": 310} id="7YsZhnatvcf9" outputId="18d5bd48-9d1b-4425-e0ee-c6d7ebe5a6cd"
artticker = yf.Ticker('ART.AX')
art_df = artticker.history(period="5y")
art_df.rename(columns = {'Close':'ART Close'}, inplace = True)
art_df['ART Close'].plot(title="Airtasker Stock Price")
# + id="dt2FP3HGwjzO"
import pandas as pd
df = pd.concat([apt_df['APT Close'], col_df['COL Close'], wow_df['WOW Close'],tls_df['TLS Close'], art_df['ART Close']], axis=1)
# + colab={"base_uri": "https://localhost:8080/", "height": 238} id="OARUr2qyw6tr" outputId="4bb1efad-37fa-4096-f0b9-eee35663e606"
df.head()
# + [markdown] id="pwy3pnGIxA8e"
# #5 year plot for comparison
# + colab={"base_uri": "https://localhost:8080/", "height": 373} id="tBqKXFBPxDvv" outputId="ebd1a58e-290a-440c-a3e6-138a95bdad8e"
import matplotlib as plt
df.plot()
fig = plt.pyplot.gcf()
fig.set_size_inches(18.5, 10.5)
# + id="V3oLtTUqxJ63"
returns=df.pct_change()
# + colab={"base_uri": "https://localhost:8080/", "height": 238} id="o1M-iy0kxMKc" outputId="837ca91f-32f8-40e1-b9e0-7adfb7f9659a"
returns.tail()
# + [markdown] id="wAgZlOw3xR0e"
# #Setting up a hypothetical portfolio
# + id="D21aT1vcxRIr"
import numpy as np
weights = np.array([.20, .20, .20, .20, .20]) #Allocating equal weights to the stocks in our portfolio
initinv = 1000000 #Initial investment in dollars
# + colab={"base_uri": "https://localhost:8080/", "height": 206} id="8jvGDdW5xcCE" outputId="cd9b9cb3-8e55-4487-c996-60163a7785c9"
cov_matrix = returns.cov()
cov_matrix
# + colab={"base_uri": "https://localhost:8080/"} id="4q-0IpVjxfVp" outputId="6e243dde-1513-4270-902a-b33e50248f01"
avgreturns = returns.mean()
avgreturns
# + id="gFUb_mimxifN"
portfoliomean = avgreturns.dot(weights)
portfoliostd = np.sqrt(weights.T.dot(cov_matrix).dot(weights))
invmean = (1+portfoliomean) * initinv
invstd = initinv * portfoliostd
# + id="YiBw9b31xnTE"
from scipy.stats import norm
conf = 0.05
confcutoff = norm.ppf(conf, invmean, invstd)
var = initinv - confcutoff
# + colab={"base_uri": "https://localhost:8080/"} id="yZutyX4rx3-v" outputId="314edc2c-9087-45e8-bece-2d1a04096cfb"
var
# + [markdown] id="fGTmvrgpx97k"
# #Interpretation
#
# The VaR value above of 29,198 means that:
#
# We can say with 95% confidence that on an initial investment of AUD 1 million, our losses will NOT exceed AUD 29,198.
| ASX5.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <font color = 'black' size = 5>Booleans and Operators</font>
#
# What are operators? Operators are special symbols in Python that carry out arithmetic or logical computation. The value that the operator operates on is called the operand. There are several types of operators in Python:
# <ol>
# <li><strong>Arithmetic operators</strong>: Arithmetic operators are used to perform mathematical operations like addition, subtraction, multiplication etc.</li>
# <li><strong>Relational operators</strong>: Relational operators are used to compare values. It either returns True or False according to the condition.</li>
# <li><strong>Logical operators</strong>: Logical operators are the ‘and’, ‘or’ and ‘not’ operators.</li>
# <li><strong>Bitwise operators</strong>: Bitwise operators act on operands as if they were string of binary digits. It operates bit by bit, hence the name.</li>
# <li><strong>Assignment operators</strong>: Assignment operators are used in Python to assign values to variables.</li>
# <li><strong>Special operators</strong>: Python language offers some special type of operators like the identity operator or the membership operator. They are described below with examples.
# <ol>
# <li>Identity operators: ‘is’ and ‘is not’ are the identity operators in Python. They are used to check if two values (or variables) are located on the same part of the memory. Two variables that are equal does not imply that they are identical</li>
# <li>Membership operators: in and not in are the membership operators in Python. They are used to test whether a value or variable is found in a sequence (string, list, tuple, set and dictionary).</li>
# </ol>
# </li>
# </ol>
# Relational operators between two variables (say ‘a’ and ‘b’) will return True if the relation between ‘a’ and ‘b’ evaluates to true, else False will be returned. The values True and False are called Boolean variables i.e. Booleans are variables which contain truth values.
# There are many relational operators used in Python, for instance greater (>), equality (=) and so on. Suppose if ‘a’ were 10 and ‘b’ were 20, then a>b will return False because ‘a’ is obviously lesser than ‘b’. </font>
#
# +
# Declaring a Boolean variable
myBool = True
# Various types of operators
result = 10 + 20 # Arithmetic operators
diff = 10 - 20 * 47 # Arithmetic operators
print(result)
print(diff)
var1 = 2 # Assignment operator
var2 = 56
myBool = var1 <= var2 # Relational operator
print(myBool)
bit = 40
bitop = 40 << 2 # Bitwise Left Shift operator
print(bitop)
a = True
b = False
c = a and b # Logical operator
print(c)
# -
# <font color = 'black' size = 5>Conditionals</font>
# <p font-size=10>Decision making is anticipation of conditions occurring while execution of the program and specifying actions taken according to the conditions.
# Decision structures evaluate multiple expressions which produce TRUE or FALSE as outcome. You need to determine which action to take and which statements to execute if outcome is TRUE or FALSE otherwise.
# This is where conditional statements are used. Conditional statements deal with conditions – what to do if something is this way? What to do if it’s not?
# The conditionals offered in Python are ‘if’, ‘else if’ (written as elif) and ‘else’. We start with writing an if statement and then writing the commands (what to execute if the if statement holds true), then we write elif (only if and when required) and write the commands under it, and finally the else statement. Note that indentation is very, very important in Python, because unlike in other languages you don’t have brackets here to contain multiple commands under a single if statement.
# </p>
# +
a = 100
b = 200
if a > b: # If statement
print(a)
elif a < b: # Else if statement
print(b)
else: # Else statement
print(a+b)
# -
# <font color = 'black' size =5>Loops</font>
# <p>In general, statements are executed sequentially: The first statement in a function is executed first, followed by the second, and so on. There may be a situation when you need to execute a block of code several number of times.
# Programming languages provide various control structures that allow for more complicated execution paths. A loop statement allows us to execute a statement or group of statements multiple times. We can control the number of times it is being done, and what all tasks are being done.
# There are multiple loops offered in Python, and the most basic one of them is the ‘while’ loop. Also, we don’t specify our iteration statement in a bracket (as we do in other languages), but instead write ‘while’ and then our statement. We then indent the loop statements below, and once we’re done with the loop statements we continue writing our program un-indented.
# +
n = 9
i = 0
# While loop
while i<n:
print(i+1)
i = i+2
# -
# <font color = 'black' size = 5>For Loop</font><br><br>
# Next is the ‘for’ loop. In a for loop we can specify the number of iterations by using the range() function. Range(a,b) will return all numbers between a and b (including a, excluding b), with an increment of 1. We can give it the step size (the amount by which the iterator is incremented) in input as well; for instance range(1,10,2) will print the alternate elements between 1 and 10 because step size is 2.
# There is one important type of statement to know, and that is the loop control statements. Loop control statements change execution from its normal sequence. When execution leaves a scope, all automatic objects that were created in that scope are destroyed. There are three types of loop control statements:
# <ol>
# <li><strong>break</strong> statement: It is used to break out of the loop the moment some condition occurs, and is useful for optimization (the loop doesn’t have to run unnecessarily once some required condition is fulfilled). </li>
# <li><strong>continue</strong> statement: It causes the loop to skip the remainder of its body and immediately retest its condition prior to reiterating.</li>
# <li><strong>pass</strong> statement: The pass statement in Python is used when a statement is required syntactically but you do not want any command or code to execute.</li>
#
# </ol>
#
# +
# For loop
for i in range(1,15,5):
print(i)
# Loop control statements
for i in range(1,20):
if i == 2:
continue
elif i==7:
break
print(i)
# -
# <p>There is a way to iterate fast and more conveniently over strings, arrays etc. using a ‘for’ loop. Suppose we have a string s, and we wish to iterate over it. We simply write ‘for x in s:’ and continue with our loop statements. Note that we did not have to write the range function or specify the string size; we simply wrote the string name and that is enough for iterating over every element. </p>
#
# </p>
# Fast iteration
myStr = 'Coding Ninjas'
for i in myStr:
print(i)
# <font size=5>Functions</font>
# <p>You must have come across functions on several occasions. Functions are used for writing blocks of code that can be used over and over again as per the demands of the program. A function is a block of organized, reusable code that is used to perform a single, related action. Functions provide better modularity for your application and a high degree of code reusing.</p><p> Functions can be written in various languages and for various purposes. Python gives you many built-in functions like print(), etc. but you can also create your own functions. These functions are called user-defined functions. Let's see how they're done in Python.</p>
# You can define functions to provide the required functionality. Here are simple rules to define a function in Python.
# <ol>
# <li>Function blocks begin with the keyword def followed by the function name and parentheses ( ( ) ).</li><li>
#
# Any input parameters or arguments should be placed within these parentheses. You can also define parameters inside these parentheses.</li><li>
#
# The first statement of a function can be an optional statement - the documentation string of the function or docstring.</li><li>
#
# The code block within every function starts with a colon (:) and is indented.</li><li>
#
# The statement return [expression] exits a function, optionally passing back an expression to the caller. A return statement with no arguments is the same as return None. </li>
# +
# Functions in Python
def product(x,y):
return x*y
X = 20
Y = 4
print(product(X,Y))
# -
def demo(a):
a += 1
a = 20
demo(a)
print(a)
# <p>We can provide a function with default arguments so we need not bother giving it all arguments during calls. Let us see an example.</p>
# +
def result(x=10,y=20):
a = x + y
return a
print(result()) # All default arguments will be used to calculate results.
print(result(13)) # 13 is the value we provide for x. Y will have default value 20.
print(result(11,23)) # These are the values which will be used to compute the results.
# -
# We can also pass any number of arguments we want; unlike in many other languages, we can have variable number of arguments in Python by using the *args argument.
# +
def show(*args):
print(args)
show(10,20)
show(10,100,2000)
| notes/2 loops and functions/Conditionals and Loops.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#
# # Rollout Starting Demo
#
# This notebook demonstrates our code's basic features by computing the rollout of expected improvement (EI) for horizon two. Let $y^*$ represent the current observed minimum and $(\cdot)^+ = \text{max}(\cdot, 0)$. Recall that EI is defined as $\text{EI}(\mathbf{x}) = \mathbb{E}[\big(y^* - y(\mathbf{x})\big)^+]$; that is, the expected reduction in the objective over the next step of BO. We define rollout for horizon two as:
#
# $$ \text{EI}_2(\mathbf{x}) = \mathbb{E}[\big(y^* - y_1(\mathbf{x}_1)\big)^+]. $$
#
# Our expectation is taken recursively with respect to $y_1$, which depends on $\mathbf{x}_1$, itself defined as:
#
# $$\mathbf{x}_1 = \text{argmax } \text{EI} (\mathbf{x} \;|\; y(\mathbf{x})).$$
#
# Thus, $\text{EI}_2(\mathbf{x})$ computes the expected reduction in the objective over the next two steps of BO, assuming EI is maximized at $\mathbf{x}$. $\text{EI}_2(\mathbf{x})$ has no analytic form, and so we estimate it via Monte Carlo integration (MC) as the average of $N$ sample paths. Each sample path has the following form: $y^{(i)} \rightarrow \mathbf{x}_1^{(i)} \rightarrow y_1^{(i)}$. $y^{(i)}$ is sampled from the GP posterior, $\mathbf{x}_1^{(i)}$ is chosen deterministically by maximizing EI, and then $y_1^{(i)}$ is sampled from the updated GP posterior at $\mathbf{x}_1^{(i)}$:
#
# $$ \text{EI}_2(\mathbf{x}) \approx \frac{1}{N}\sum_{i=1}^N\big(y^* - y_1^{(i)}(\mathbf{x}^{(i)}_1)\big)^+. $$
#
# Our demo follows.
# Basic import statements
import numpy as np
# %matplotlib inline
import matplotlib.pyplot as plt
# # Code overview
#
# The name of our package is `lookahead`, and is roughly based off of [Cornell MOE](https://github.com/wujian16/Cornell-MOE). `lookahead.acquisitions` contains the expected improvement, upper confidence bound, knowledge gradient, probability of improvement, and rollout acquisition functions. `lookahead.models` contains the necessary Gaussian process (GP) code.
#
# Below, we import two acquisition functions: EI and rollout of EI. We then import our GP wrapper, and defined our optimization domain as the unit hypercube in 1D. *Note*: our code will always assume that the data is normalized to the unit hypercube, and will not work otherwise.
from lookahead.acquisitions.expected_improvement import ExpectedImprovement
from lookahead.acquisitions.rollout_ei_vr import RolloutEI_VR as RolloutEI
from lookahead.model.gaussian_process import GaussianProcessSimple as GP
from lookahead.model.domain import ClosedInterval, TensorProductDomain
opt_domain = TensorProductDomain([ClosedInterval(0, 1)])
# # A toy problem
#
# Below, we build a GP assuming we have two observations at 0.15 and 0.85, both of which have value zero. Note that we fix the GP's hypers instead of learning them, for illustrative purposes. We plot the GP's mean in red, shade its uncertainty in magenta, and mark the two observations in black.
fig, ax = plt.subplots(1, 1)
xtrain = np.linspace(0.15, 0.85, 2)[:, None]
ytrain = np.array([0, 0])
gp = GP(xtrain, ytrain)
gp.set_hypers([5, 0.25])
x = np.linspace(0, 1, 40)[:, None]
y_gp = gp.mean(x)
y_var = np.sqrt(gp.variance(x))
_ = ax.plot(x, y_gp, color='r')
_ = ax.plot(xtrain, ytrain, 'k.', markersize=15)
_ = ax.fill_between(x[:, 0], y_gp - y_var, y_gp + y_var, color='m', alpha=0.25)
_ = ax.legend(['GP mean','Observations','GP uncertainty'])
# # Comparing EI and Rollout EI
#
# Next, we plot the contours of both EI and Rollout EI for horizon 2 (EI2). EI2 is computed with Monte Carlo integration, combined with the variance reduction techniques we describe in our paper to make its computation quick. Notice that whereas EI favors sampling in the middle of the domain, EI2 favors sampling away from the middle of the domain! Note that EI2 will look a little noisy, because we are estimating it very quickly.
# +
(fig, ax) = plt.subplots(1, 2, figsize=(8, 3))
ei = ExpectedImprovement(gp, opt_domain)
ei_vals = ei.evaluate_at_point_list(x)
# We estimate EI2 using 100 MC iterations, using grid search of size 50 to maximize the inner EI
ei2 = RolloutEI(gp, opt_domain, horizon=2, opt_mode='grid', mc_iters=20, grid_size=100)
ei2_vals = ei2.evaluate_at_point_list(x)
_ = ax[0].plot(x, ei_vals, '--g')
_ = ax[1].plot(x, ei2_vals, '--g')
_ = ax[0].set_title('EI Acquisition')
_ = ax[1].set_title('EI2 Acquisition')
# -
| demos/Rollout Starting Demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/TwistedAlex/TensorFlow2Practice/blob/main/TF_TUTORIAL.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="EBgHaCxIeKX-"
# **Eager execution**
# Evaluate operations immediately, without building graphs: operations *return concrete values* instead of constructing a compuational graph to run later.
# *An intuitive interface*: Structure your code naturally and use Python data structures. Quickly iterate on small models and small data.
# *Easier debugging*: Call ops directly to inspect running models and test changes. Use standard Python debugging toools for immediate error report.
# *Natural control flow*: Use Python control flow instead of graph control flow, simplifying the specification of dynamic models.
# + [markdown] id="-jadZi_NfET-"
# **Setup and basic usage**
# + id="iYSr91h9fFHE"
import os
import tensorflow as tf
import cProfile
tf.compat.v1.enable_eager_execution()
# + colab={"base_uri": "https://localhost:8080/"} id="3XAQxdWLjWDL" outputId="18a330e6-e731-4475-cad0-f8af79f29741"
# Eager execution is enabled by default in TF2.0
tf.executing_eagerly()
# Run TF ops and the results will return immediately:
x = [[2.]]
m = tf.matmul(x, x)
print("hello, {}".format(m))
a = tf.constant([[1, 2], [3, 4]])
print(a)
# + [markdown] id="f1RIAi5p95Pp"
# Broadcasting
# + colab={"base_uri": "https://localhost:8080/"} id="noyh6Fax96ei" outputId="43d504dc-a3a3-43f3-ff69-e54c78977f3b"
b = tf.add(a, 1)
print(b)
# + [markdown] id="SVmLsK7_963i"
# Operator overloading
# + colab={"base_uri": "https://localhost:8080/"} id="CqnMdIA6-izf" outputId="b704a6e8-4a90-49d3-e681-2853c80837f3"
print(a * b)
# + [markdown] id="q27eIQ5k-thE"
# Use NumPy values
# + colab={"base_uri": "https://localhost:8080/"} id="7dw1DZfE-uVY" outputId="33e2732c-b4c7-4ea5-9dbb-d08ab95de365"
import numpy as np
c = np.multiply(a, b)
print(c)
# + [markdown] id="7SyjI92w_Boh"
# Obtain numpy value from a tensor:
# + colab={"base_uri": "https://localhost:8080/"} id="sDGadXb3_ErT" outputId="c92b43f9-db41-4b32-ec92-df373c551222"
print(a.numpy())
# + [markdown] id="5Je2GJFr_V0t"
# **Dynamic control flow**
#
# A major benefit of eager execution is that all the functionality of the host language is available while your model is executing. So, for example, it is easy to write fizzbuzz:
# + id="-T2gO-_8_53C"
def fizzbuzz(max_num):
counter = tf.constant(0)
max_num = tf.convert_to_tensor(max_num)
for num in range(1, max_num.numpy()+1):
num = tf.constant(num)
if int(num % 3) == 0 and int(num % 5) == 0:
print('FizzBuzz')
elif int(num % 3) == 0:
print('Fizz')
elif int(num % 5) == 0:
print('Buzz')
else:
print(num.numpy())
counter += 1
# + [markdown] id="Rr8OK7BAABRQ"
# This has conditionals that depend on tensor values and it prints these values at runtime.
# + colab={"base_uri": "https://localhost:8080/"} id="zGxXjvcx_88m" outputId="718cc0e8-81b7-431a-cf7e-5c8dab652aea"
fizzbuzz(15)
# + [markdown] id="MBKkprX7FiQI"
# **Eager training**
#
# Computing gradients
#
# **Automatic differentiation** is useful for implementing machine learning algorithms such as backpropagation for training neural networks. During eager execution, use tf.GradientTape to trace operations for computing gradients later.
#
# You can use [tf.GradientTape](https://www.tensorflow.org/api_docs/python/tf/GradientTape) to train and/or compute gradients in eager. It is especially useful for complicated training loops.
#
#
# Args
#
# persistent: Boolean controlling whether a persistent gradient tape is created. False by default, which means at most one call can be made to the gradient() method on this object.
#
# watch_accessed_variables: Boolean controlling whether the tape will automatically watch any (trainable) variables accessed while the tape is active. Defaults to True meaning gradients can be requested from any result computed in the tape derived from reading a trainable Variable. If False users must explicitly watch any Variables they want to request gradients from.
#
#
# + colab={"base_uri": "https://localhost:8080/"} id="H_KPSMOkanMt" outputId="61689924-2eb5-4d64-cf9b-0c3d3b5b68df"
x = tf.constant(3.0)
with tf.GradientTape() as g:
g.watch(x)
y = x * x
dy_dx = g.gradient(y, x)
print(dy_dx)
# + [markdown] id="gOUkEO3V0KUq"
# **Train a model**
#
# The following example creates a multi-layer model that classifies the standard MNIST handwritten digits. It demonstrates the optimizer and layer APIs to build trainable graphs in an eager execution environment.
#
# Trainable variables (created by tf.Variable or tf.compat.v1.get_variable, where trainable=True is default in both cases) are automatically watched. Tensors can be manually watched by invoking the watch method on this context manager.
# + colab={"base_uri": "https://localhost:8080/"} id="5yUA8EbJFkAr" outputId="91fbb7aa-1260-4692-aa72-4ed9648ea42f"
# Fetch and format the mnist data
(mnist_images, mnist_labels), _ = tf.keras.datasets.mnist.load_data()
dataset = tf.data.Dataset.from_tensor_slices(
(tf.cast(mnist_images[...,tf.newaxis]/255, tf.float32),
tf.cast(mnist_labels,tf.int64)))
dataset = dataset.shuffle(1000).batch(32)
# + id="VwMsq1bn0h5V"
# Build the model
mnist_model = tf.keras.Sequential([
tf.keras.layers.Conv2D(16,[3,3], activation='relu',
input_shape=(None, None, 1)),
tf.keras.layers.Conv2D(16,[3,3], activation='relu'),
tf.keras.layers.GlobalAveragePooling2D(),
tf.keras.layers.Dense(10)
])
# + [markdown] id="zSF2Ir540tIK"
# Even without training, call the model and inspect the output in eager execution:
# + colab={"base_uri": "https://localhost:8080/"} id="pLNNRTx10t24" outputId="d49c3d78-3a69-47b7-d815-c8bba1d57602"
for images,labels in dataset.take(1):
print("Logits: ", mnist_model(images[0:1]).numpy())
# + [markdown] id="nhSaQt2bz6yg"
# While keras models have a builtin training loop (using the fit method), sometimes you need more customization. Here's an example, of a training loop implemented with eager:
# + id="io1QBgSI1NN2"
optimizer = tf.keras.optimizers.Adam()
loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
loss_history = []
# + id="ot3iUhnU1RRF"
def train_step(images, labels):
with tf.GradientTape() as tape:
logits = mnist_model(images, training=True)
# Add asserts to check the shape of the output.
tf.debugging.assert_equal(logits.shape, (32, 10))
loss_value = loss_object(labels, logits)
loss_history.append(loss_value.numpy().mean())
# grads = d(loss)/d(param)
grads = tape.gradient(loss_value, mnist_model.trainable_variables)
optimizer.apply_gradients(zip(grads, mnist_model.trainable_variables))
# + id="5Ots2NuJ1XUD"
def train(epochs):
for epoch in range(epochs):
for (batch, (images, labels)) in enumerate(dataset):
train_step(images, labels)
print ('Epoch {} finished'.format(epoch))
# + colab={"background_save": true, "base_uri": "https://localhost:8080/"} id="Lk0-Uys11ZOK" outputId="8b40837a-807a-463a-ddd0-c6316c5b5eec"
train(epochs = 5)
# + colab={"background_save": true} id="iXLvGY5P1eFH" outputId="a0038635-d34a-4ae9-9518-35d5dc3be3db"
import matplotlib.pyplot as plt
plt.plot(loss_history)
plt.xlabel('Batch #')
plt.ylabel('Loss [entropy]')
# + [markdown] id="U31POQY4JuGH"
#
# + [markdown] id="hI2adrV0mn7Y"
# Disable eager execution
# + colab={"base_uri": "https://localhost:8080/"} id="vFZvyYaolywK" outputId="a27e995f-0a21-4e9a-b3a4-8ffca0d2e4f2"
tf.compat.v1.disable_eager_execution()
a = tf.constant([[1, 2], [3, 4]])
print(a)
# + colab={"base_uri": "https://localhost:8080/"} id="ghyt0LWxl1bY" outputId="c223e361-78ab-4875-b50a-aaae388b1dd7"
b = tf.add(a, 1)
print(b)
# + id="i_OVp02V-xi5"
import numpy as np
c = np.multiply(a, b)
print(c)
# + [markdown] id="tXqf6bDilqtX"
#
| TF_TUTORIAL.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="LzK7JiSZ_MtX"
# **This is an example Notebook for running training on Higgs vs background signal classification. **
# + [markdown] id="T4kV4R6SDR8X"
# **Background:** High-energy collisions at the Large Hadron Collider (LHC) produce particles that interact with particle detectors. One important task is to classify different types of collisions based on their physics content, allowing physicists to find patterns in the data and to potentially unravel new discoveries.
# + [markdown] id="fHw_uCCcHJqn"
# **Problem statement:** The discovery of the Higgs boson by CMS and ATLAS Collaborations was announced at CERN in 2012. In this work, we focus on the potential of Machine Learning and Deep Learning in detecting potential Higgs signal from one of the background processes that mimics it.
# + [markdown] id="qAJnE-4uANmP"
# **Dataset:** The dataset is made available by the Center for Machine Learning and Intelligent Systems at University of California, Irvine.
# The dataset can be found on the [UCI Machine learning Repository](https://archive.ics.uci.edu/ml/datasets/HIGGS)
# + [markdown] id="5u1kJGUdAZ9m"
# **Description:** The dataset consists of a total of 11 million labeled samples of Higgs vs background events produced by Monte Carlo simulations. Each sample consists of 28 features. The first 21 features are kinematic properties measured at the level of the detectors. The last seven are functions of the first 21.
#
#
#
# + [markdown] id="myu16dmzSBmo"
# **Steps to load the training dataset**
# 1. Download the dataset from the UCI website.
# + id="mrM-WOaxRWsP" colab={"base_uri": "https://localhost:8080/"} outputId="3b056576-ab9c-4ee1-e02c-f50b3132a9af"
# !wget https://archive.ics.uci.edu/ml/machine-learning-databases/00280/HIGGS.csv.gz
# + [markdown] id="0xHJtIbPVT-n"
# 2. Unzip the dataset folder
# + id="2J3v5kgbSrjx"
# !gzip -d HIGGS.csv.gz
# + id="MLREBn86VD5H"
from sklearn.datasets import make_gaussian_quantiles
from sklearn.ensemble import AdaBoostClassifier
from sklearn.metrics import accuracy_score
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import confusion_matrix
# + id="MUE2QepFVwEq"
from sklearn.model_selection import train_test_split
import pandas as pd
import numpy as np
# + id="8H-i4o9RWNED"
import numpy as np
np.random.seed(1337) # for reproducibility
import h5py
from keras.models import Sequential
from tensorflow.keras.optimizers import Adam
from keras.initializers import TruncatedNormal
from keras.layers import Input, Dense, Dropout, Flatten, Conv2D, MaxPooling2D
from keras.callbacks import ReduceLROnPlateau
from sklearn.metrics import roc_curve, auc
import matplotlib.pyplot as plt
# + [markdown] id="4GDeVfB04Qe4"
# **Load the file using pandas library**
# + id="Jxfnd8shK0vq"
data=pd.read_csv('./HIGGS.csv')
# + colab={"base_uri": "https://localhost:8080/", "height": 505} id="1P4trCjzL1MB" outputId="2a1356ea-314d-418d-b9e7-eb96a0d56de5"
data
# + [markdown] id="ZTnbxdjkUp73"
# Assign first column 0 to class labels (labeled 1 for signal, 0 for background) and all others to feature matrix X.
#
# In this example, for the sake of fast checking, we use 1000 samples. To train on the entire dataset, proceed with uncommenting the lines below.
# + id="hoRPhH9dNmCy"
X=data.iloc[:1000,1:]#data.iloc[:,1:]
y=data.iloc[:1000,0]#data.iloc[:,0]
# + [markdown] id="hQX3GUrVUe7N"
# Split your data into training and validation samples where the fraction of the data used for validation is 33%.
# + id="50-8I-fj8jC7"
X_train1, X_val, y_train1, y_val = train_test_split(X, y, test_size=0.2, random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X_train1, y_train1, test_size=0.2, random_state=42)
# + [markdown] id="PexwSVQ34M_l"
# **Visualize your data - One histogram per feature column**
# + [markdown] id="d8UNRLc-5VPM"
# Detailed information on what each feature column is can be found in *Attribute Information* section on the [UCI Machine learning Repositery](https://archive.ics.uci.edu/ml/datasets/HIGGS). For further information, refer to the [paper](https://www.nature.com/articles/ncomms5308) by Baldi et. al
# + id="5BA9_Q-_B-8A" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="fd6631a5-0aec-48ce-ea74-a5a5b4bc4cf7"
from itertools import combinations
import matplotlib.pyplot as plt
fig, axes = plt.subplots(len(X_train.columns)//3, 3, figsize=(12, 48))
i = 0
for triaxis in axes:
for axis in triaxis:
X_train.hist(column = X_train.columns[i], bins = 100, ax=axis)
i = i+1
# + [markdown] id="pDoiz5yl3aUf"
# **Setup the Boosted Decision Tree model** (BDT explanation [here](https://docs.microsoft.com/en-us/azure/machine-learning/algorithm-module-reference/boosted-decision-tree-regression#:~:text=Boosting%20means%20that%20each%20tree,small%20risk%20of%20less%20coverage.))
# + id="tWBkXGr-aBuT"
classifier = AdaBoostClassifier(
DecisionTreeClassifier(max_depth=1),
n_estimators=200
)
# + [markdown] id="szVdBJ2S34bW"
# **Train the Boosted Decision Tree model**
# + id="9B6CsEgpQru4"
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.datasets import load_breast_cancer
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
from sklearn.preprocessing import LabelEncoder
# + id="hlpgeOOR32zL" colab={"base_uri": "https://localhost:8080/"} outputId="9758f0b9-ea42-4946-ad9a-36c48aef9184"
classifier.fit(X_train, y_train)
# + [markdown] id="DBJaFEoGApL5"
# **Predict on new testing data**
# + id="UsRbhfAPQgEl"
predictions = classifier.predict(X_test)
# + [markdown] id="pYMnGKVGAeM2"
# **Print confusion matrix which describes the performance of the model classification by displaying the number of True Positives, True Negatives, False Positives and False Negatives. More info on [Wikipedia](https://en.wikipedia.org/wiki/Confusion_matrix)**
# + id="NREs3cPOQo4V" colab={"base_uri": "https://localhost:8080/"} outputId="4d719fc8-52d3-41e7-be6d-9681160662a0"
confusion_matrix(y_test, predictions)
# + [markdown] id="ULHd0Oto3om9"
# **Setup the Neural Network** (some useful info [here](https://towardsdatascience.com/a-gentle-introduction-to-neural-networks-series-part-1-2b90b87795bc))
# + id="k_I4kazT3_3c"
from numpy import loadtxt
from keras.models import Sequential
from keras.layers import Dense
# + id="2rcPG-UX3kkq"
model_nn = Sequential()
model_nn.add(Dense(28, input_dim=28, activation='relu'))
model_nn.add(Dense(8, activation='relu'))
model_nn.add(Dense(1, activation='sigmoid'))
# + [markdown] id="udhrPh3c4JoT"
# **Train the Neural Network and save your model weights in a h5 file**
# + id="gDEm75CK3-TT" colab={"base_uri": "https://localhost:8080/"} outputId="5bb35725-b04f-448b-eeb2-2371a978df50"
# compile the keras model
model_nn.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
# fit the keras model on the dataset
history=model_nn.fit(X, y,validation_data=(X_val,y_val),epochs=20, batch_size=10)
# evaluate the keras model
_, accuracy = model_nn.evaluate(X, y)
model_nn.save('my_model.h5') ##Saving model weights
print('Accuracy: %.2f' % (accuracy*100))
# + id="2rVEZ6oj_Km9" colab={"base_uri": "https://localhost:8080/"} outputId="da1fbef3-630f-42d5-b275-0239f9fdeab5"
# list all data in history
print(history.history.keys())
# + [markdown] id="8KAg7JEiU3Na"
# **Plot accuracy wrt number of epochs**
# + id="8wQsZEa9PYJw" colab={"base_uri": "https://localhost:8080/", "height": 295} outputId="19ce9982-10e8-46e1-fc82-071fb1f3f669"
# summarize history for accuracy
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# + [markdown] id="mb_eTlfgUzsR"
# **Plot training loss wrt number of epochs**
# + id="-jyiKksT_JjN" colab={"base_uri": "https://localhost:8080/", "height": 295} outputId="011cfb70-cce6-44fd-c194-570b6e491a51"
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# + id="iYlPzQroWxiC"
y_pred=model_nn.predict(X_test)
# + id="KPaO498pWI_o" colab={"base_uri": "https://localhost:8080/"} outputId="b253ec9c-59cc-4eae-d901-3b92ef449e27"
confusion_matrix(y_test, y_pred.round())
# + [markdown] id="Pal8tfejcsno"
# **Plot the ROC (Receiver Operating Characteristic) Curve** (more info on ROC could be found [here](https://en.wikipedia.org/wiki/Receiver_operating_characteristic)
# + id="qQlwEMrocVme" colab={"base_uri": "https://localhost:8080/"} outputId="f545ebcd-cc8b-4ffe-c5b9-82ac336f07e9"
# !pip install plot-metric
# + id="Twqf5QaocaAQ" colab={"base_uri": "https://localhost:8080/", "height": 350} outputId="f1e5fc0f-dd7a-4959-bd9e-f437191cd4e8"
from plot_metric.functions import BinaryClassification
# Visualisation with plot_metric
bc = BinaryClassification(y_pred.round(), y_test, labels=["Class 1", "Class 2"])
# Figures
plt.figure(figsize=(5,5))
bc.plot_roc_curve()
plt.show()
# + [markdown] id="Kla2ToH7-XkU"
# **Deliverables**
#
# Please submit the following:
#
# * Your full notebook used for training including the ROC Curves, model weights and loss and accuracy plots wrt number of epochs.
#
#
#
# + [markdown] id="T3MNJbXN30sy"
# **References**
#
# <NAME>., <NAME>. and <NAME>. “Searching for Exotic Particles in High-energy Physics with Deep Learning.” Nature Communications 5 (July 2, 2014).
#
#
| Higgs_Classification/higgs_classification (1).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Perform spatial queries on points retrieved from TileDB
#
# Note if querying for large areas then you should consider using a larger notebook or breaking the area into smaller queries and using [user defined functions](https://docs.tiledb.com/cloud/client-api/serverless-array-udfs)
# +
import time
import numpy as np
import pandas as pd
import geopandas as gpd
import shapely
import tiledb
tiledb_uri = 'tiledb://exactEarth/ee_ais'
config = tiledb.Config()
# Set value
config["sm.memory_budget"] = 50_000_000
config["sm.memory_budget_var"] = 50_000_000
ctx = tiledb.Ctx(config)
# +
# %%time
t1 = np.datetime64('2019-07-01T00:00:00')
t2 = np.datetime64('2019-07-02T00:00:00')
x1, x2, y1, y2 = [-1.7, -1.2, 50, 51.0]
st = time.time()
with tiledb.open(tiledb_uri, ctx=ctx) as arr:
pts = arr.query(attrs=["mmsi"], dims=["longitude", "latitude"]).multi_index[t1:t2, x1:x2, y1:y2]
print(f"Retrieved {len(pts['longitude'])} vessels")
# -
# We will make a GeoPandas dataframe to perform spatial queries
df = pd.DataFrame(pts)
gdf = gpd.GeoDataFrame(df, geometry=gpd.points_from_xy(df.longitude, df.latitude), crs='epsg:4326')
# We will quickly inspect the array
gdf.head()
# We will pick one vessel and buffer the location to retrieve all of the vessels nearby
# +
# %%time
pt = gdf.geometry[1000]
thresh = 0.125
bowtie = shapely.geometry.Polygon([
(pt.x - thresh, pt.y + thresh),
(pt.x, pt.y),
(pt.x - thresh, pt.y - thresh),
(pt.x + thresh, pt.y - thresh),
(pt.x, pt.y),
(pt.x + thresh, pt.y + thresh),
(pt.x - thresh, pt.y + thresh)]).buffer(0) # buffer by zero cleans the intersecting point
r = gpd.clip(gdf, bowtie)
# -
# note edge of original query in the result dataset
r.plot()
# Alternatively use the spatial index directly
# +
# %%time
spatial_index = gdf.sindex
possible_matches_index = list(spatial_index.intersection(bowtie.bounds))
possible_matches = gdf.iloc[possible_matches_index]
precise_matches = possible_matches[possible_matches.intersects(bowtie)]
# -
precise_matches.plot()
# Plot with datashader
# +
import datashader as ds
from datashader.utils import lnglat_to_meters
import holoviews as hv
import holoviews.operation.datashader as hd
from holoviews.element import tiles
from matplotlib import cm
hv.extension("bokeh", "matplotlib")
# -
hv.output(backend="bokeh")
r.loc[:, 'x'], r.loc[:, 'y'] = lnglat_to_meters(r.longitude, r.latitude)
bkgrd = tiles.EsriImagery().opts(xaxis=None, yaxis=None, width=700, height=500)
points = hv.Points(r, ['x', 'y'])
bkgrd * hd.datashade(points, cmap=cm.inferno)
| geo/exactEarth/Ships_SpatialQueries.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Beginner's Python Cheat Sheets
# ## 1. Variables and Strings
# #### Hello World!
print("Hello World!")
# #### Hello World with a variable
msg = "Hello World!"
print(msg)
# Concatenation (Combining Strings)
first_name = 'Albert'
last_name = 'Einstein'
full_name = first_name + ' ' + last_name
print(full_name)
# ## 2. Lists
# #### Make a list
bikes = ['trek','redline', 'giant']
# #### Get the first item in a list
first_bike = bikes[0]
print(first_bike)
# #### Get the last item in a list
last_bike = bikes[-1]
print(last_bike)
# #### Looping throgh a list
for bike in bikes:
print(bike)
# #### Adding items to a list
bikes = []
bikes.append('trek')
bikes.append('redline')
bikes.append('giant')
print(bikes)
# #### Making numerical lists
# +
squares = []
for num in range(1,11):
squares.append(num**2)
print(squares)
# -
# #### List Comprehensions
squares = [num**2 for num in range(1,11)]
print(squares)
# #### Slicing a list
finishers = ['sam','bob','ada','bea']
first_two = finishers[:2]
print(first_two)
# #### Copying a list
copy_of_bikes = bikes[:]
print(copy_of_bikes)
# ## 3. Tuples
# #### Making a tuple
dimensions = (1920, 1080)
print(dimensions)
# ## 4. If statements
# #### Conditional tests
# Equals
x = 42
if x== 42:
print('equals')
# Not equals
x = 40
if x!= 42:
print('not equals')
# Greater than
x = 45
if x > 42:
print('greater than 42')
# Greater than or equal to
x = 42
if x >= 42:
print('greater than or equal to 42')
# Less than
x = 41
if x < 42:
print('less than 42')
# Less than or equal to
x = 41
if x <= 42:
print('less than or equal to 42')
# #### Conditional test with lists
if 'trek' in bikes:
print('yes')
if 'surly' not in bikes:
print('yes')
# #### Assigning boolean values
game_active = True
print(game_active)
can_edit = False
print(can_edit)
# #### A simple If test
age = 20
if age >= 18:
print('You can vote!')
# #### If-elif-else statements
if age < 4:
ticket_price = 0
elif age < 18:
ticket_price = 10
else:
ticket_price = 15
print(ticket_price)
# ## 5. Dictionaries
# #### A simple dictionary
alien = {'color':'green', 'point':5}
print(alien)
# #### Accessing a value
print("The alien's color is " + alien['color'])
# #### Adding a new key-value pair
alien['x_position'] = 0
print(alien)
# #### Looping through all key-value pairs
fav_numbers = {'eric': 17, 'ever': 4}
for name, number in fav_numbers.items():
print(name + ' loves ' + str(number))
# #### Looping through all keys
fav_numbers = {'eric': 17, 'ever': 4}
for name in fav_numbers.keys():
print(name + ' loves a number')
# #### Looping through all the values
fav_numbers = {'eric': 17, 'ever': 4}
for number in fav_numbers.values():
print(str(number) + ' is a favorite')
# ## 6.User input
# #### Prompting for a value
name = input("What's your name? ")
print("Hello, " + name + "!")
# #### Prompting for numerical input
age = input("How old are you? ")
age = int(age)
print(age)
pi = input("What's the value of pi? ")
pi = float(pi)
print(pi)
# ## 7. While loops
# #### A simple while loop
current_value = 1
while current_value <= 5:
print(current_value)
current_value += 1
# #### Letting the user choose when quit
msg = ''
while msg != 'quit':
msg = input("What's your message? ")
print(msg)
# ## 8.Functions
# #### A simple function
def greet_user():
"""Display a simple greeting."""
print("Hello!")
greet_user()
# #### Passing an argument
# +
def greet_user(username):
"""Display a personalized greeting."""
print("Hello, " + username + "!")
greet_user('Jesse')
# -
# #### Default values for parameters
# +
def make_pizza(topping='bacon'):
"""Make a single-topping pizza."""
print("Have a " + topping + " pizza!")
make_pizza()
make_pizza('pepperoni')
# -
# #### Returning a value
# +
def add_numbers(x, y):
"""Add two numbers and return the sum."""
return x + y
sum = add_numbers(3, 5)
print(sum)
# -
# ## 8. Classes
# #### Creating a dog class
# +
class Dog():
"""Represent a dog."""
def __init__(self, name):
"""Initialize dog object."""
self.name = name
def sit(self):
"""Simulate sitting."""
print(self.name + " is sitting.")
my_dog = Dog('Peso')
print(my_dog.name + " is a great dog!")
my_dog.sit()
# -
# #### Inheritance
class SARDog(Dog):
"""Represent a search dog."""
def __init__(self, name):
"""Initialize the sardog."""
super().__init__(name)
def search(self):
"""Simulate searching."""
print(self.name + " is searching.")
my_dog = SARDog('Willie')
print(my_dog.name + " is a search dog.")
my_dog.sit()
my_dog.search()
| Python-Data Science.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.8 64-bit (''base'': conda)'
# name: python3
# ---
type(10)
help(int)
int(10.5)
int((10.00000))
int(True)
int(False)
import fractions
a = fractions.Fraction(22, 7)
a
print(a)
float(a)
int(a)
int("12345")
int("101", 2)
int("FF", 16)
int("ff", 16)
int("A", 11)
int("B", 11)
bin(10)
oct(10)
hex(255)
a = int('101', 2)
b = 0b101
a
b
def from_base10(n, b):
if b < 2:
raise ValueError('Base b must be >= 2')
if n < 0:
raise ValueError("Number n must be >= 0")
if n == 0:
return [0]
digits = []
while n > 0:
n, m = divmod(n, b)
digits.insert(0, m)
return digits
from_base10(10, 2)
from_base10(255, 16)
def encode(digits, digit_map):
if max(digits) >= len(digit_map):
raise ValueError("digit_map is not long enough to encode the digits")
encoding = ''
for d in digits:
encoding += digit_map[d]
return encoding
encode([15, 15], '0123456789ABCDEF')
def encode(digits, digit_map):
if max(digits) >= len(digit_map):
raise ValueError("digit_map is not long enough to encode the digits")
# encoding = ''
# for d in digits:
# encoding += digit_map[d]
# return encoding
return ''.join([digit_map[d] for d in digits])
encode([15, 15], '0123456789ABCDEF')
def rebase_from10(number, base):
digit_map = '0123456789ABCDEFBHIJKLMNOPQRSTUVWXYZ'
if base < 2 or base > 36:
raise ValueError('Invalid base: 2 <= base <= 36')
sign = -1 if number < 0 else 1
number *= sign
digits = from_base10(number, base)
encoding = encode(digits, digit_map)
if sign == -1:
encoding = '-' + encoding
return encoding
e = rebase_from10(10, 2)
print(e)
print(int(e, base=2))
e = rebase_from10(314, 2)
print(e)
print(int(e, base=2))
e = rebase_from10(-314, 2)
print(e)
print(int(e, base=2))
e = rebase_from10(3451, 16)
print(e)
print(int(e, base=16))
e = rebase_from10(-3451, 16)
print(e)
print(int(e, base=16))
| my_classes/NumericTypes/integers_constructors_bases.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + colab={} colab_type="code" id="3-7qi9TkyHut"
#importing libraries
import pandas as pd
import numpy as np
# # !pip install seaborn
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.preprocessing import LabelEncoder
# + colab={} colab_type="code" id="pbNTxRwSy5I9"
test_file=pd.read_csv('datasets/test_file.csv')
train_file=pd.read_csv('datasets/train_file.csv')
meal_file=pd.read_csv('datasets/meal_info.csv')
fullfilment_file=pd.read_csv('datasets/fulfilment_center_info.csv')
# + colab={"base_uri": "https://localhost:8080/", "height": 221} colab_type="code" executionInfo={"elapsed": 1861, "status": "ok", "timestamp": 1592037395513, "user": {"displayName": "amit", "photoUrl": "", "userId": "12797285159599571225"}, "user_tz": -345} id="-VrpKruCy5Sm" outputId="4e904fb7-e086-4300-e958-5fcdcddc278a"
df=test_file.copy()
print(df.head())
# + colab={} colab_type="code" id="SejBO10Sy5VR"
df.rename(columns={"id,week,center_id,meal_id,checkout_price,base_price,emailer_for_promotion,homepage_featured":"new"},inplace=True)
# + colab={} colab_type="code" id="cuI3g_Ady5X9"
# df['id']=df.new.str.split(',').str[0]
# df['week']=df.new.str.split(',').str[1]
# df['center_id']=df.new.str.split(',').str[2]
# df['meal_id']=df.new.str.split(',').str[3]
# df['checkout_price']=df.new.str.split(',').str[4]
# df['base_price']=df.new.str.split(',').str[5]
# df['emailer_for_promotion']=df.new.str.split(',').str[6]
# df['homepage_featured']=df.new.str.split(',').str[7]
# df=df.iloc[:,1:]
# + colab={"base_uri": "https://localhost:8080/", "height": 221} colab_type="code" executionInfo={"elapsed": 1445, "status": "ok", "timestamp": 1592037430960, "user": {"displayName": "amit", "photoUrl": "", "userId": "12797285159599571225"}, "user_tz": -345} id="GszGGnBtzmfh" outputId="63ef1f8b-4380-43f8-a33f-55859778ea6a"
df['id']=df['id'].astype('int')
df['week']=df['week'].astype('int')
df['center_id']=df['center_id'].astype('int')
df['meal_id']=df['meal_id'].astype('int')
df['checkout_price']=df['checkout_price'].astype('float')
df['base_price']=df['base_price'].astype('float')
df['emailer_for_promotion']=df['emailer_for_promotion'].astype('int')
df['homepage_featured']=df['homepage_featured'].astype('int')
df.head()
# + colab={} colab_type="code" id="CMbujIgszqiw"
train_df = pd.merge(train_file,fullfilment_file, on='center_id')
test_df= pd.merge(df,fullfilment_file, on='center_id')
# + colab={} colab_type="code" id="2jqM_WELzqlh"
train_df = pd.merge(train_df,meal_file, on='meal_id')
test_df = pd.merge(test_df,meal_file, on='meal_id')
# + colab={"base_uri": "https://localhost:8080/", "height": 221} colab_type="code" executionInfo={"elapsed": 1341, "status": "ok", "timestamp": 1592037498784, "user": {"displayName": "amit", "photoUrl": "", "userId": "12797285159599571225"}, "user_tz": -345} id="5GtcsSEizqoH" outputId="1a9b9850-912a-453f-f811-2bcf6a1d7d8f"
train_df.tail()
test_df.head()
# + colab={} colab_type="code" id="KWx2iaeO81ok"
encoder=LabelEncoder()
encoder2=LabelEncoder()
encoder3=LabelEncoder()
# + colab={"base_uri": "https://localhost:8080/", "height": 221} colab_type="code" executionInfo={"elapsed": 1391, "status": "ok", "timestamp": 1592039657005, "user": {"displayName": "amit", "photoUrl": "", "userId": "12797285159599571225"}, "user_tz": -345} id="ucjkjwrZ87X5" outputId="3be77553-4e9e-4ec0-fea1-8c9e3d3d1b46"
main_data=train_df.copy()
main_data.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 221} colab_type="code" executionInfo={"elapsed": 1565, "status": "ok", "timestamp": 1592040018504, "user": {"displayName": "amit", "photoUrl": "", "userId": "12797285159599571225"}, "user_tz": -345} id="slgBbrbP87hR" outputId="b1bb8302-98ee-4ac3-86f4-b49dab695b52"
main_data['category']=encoder.fit_transform(main_data['category'])
main_data['center_type']=encoder2.fit_transform(main_data['center_type'])
main_data['cuisine']=encoder3.fit_transform(main_data['cuisine'])
main_data.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 85} colab_type="code" executionInfo={"elapsed": 2564, "status": "ok", "timestamp": 1592040138127, "user": {"displayName": "amit", "photoUrl": "", "userId": "12797285159599571225"}, "user_tz": -345} id="IG923i2t87k0" outputId="bbe944a0-f0ff-401e-c4e6-54fec993007c"
main_data1= main_data.drop(['id'], axis=1)
correlation = main_data1.corr(method='pearson')
columns = correlation.nlargest(8, 'num_orders').index
columns
# + colab={"base_uri": "https://localhost:8080/", "height": 389} colab_type="code" executionInfo={"elapsed": 2427, "status": "ok", "timestamp": 1592043360246, "user": {"displayName": "amit", "photoUrl": "", "userId": "12797285159599571225"}, "user_tz": -345} id="LCJP9W6JKJfW" outputId="b65e25c8-d9be-49f1-aff5-5ad9086c9866"
sns.heatmap(correlation,annot=True)
plt.show()
# + colab={} colab_type="code" id="jxNvYFhz87sM"
features = columns.drop(['num_orders'])
main_data2 = main_data[features]
X = main_data2.values
y = main_data['num_orders'].values
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(X, y, test_size=0.25,random_state=0)
# + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" executionInfo={"elapsed": 2163, "status": "ok", "timestamp": 1592042345780, "user": {"displayName": "amit", "photoUrl": "", "userId": "12797285159599571225"}, "user_tz": -345} id="A4HR4GNZ_2ab" outputId="ac7667d4-482b-454c-ddba-d026c4969c4c"
x_train.shape
# + colab={"base_uri": "https://localhost:8080/", "height": 221} colab_type="code" executionInfo={"elapsed": 1372, "status": "ok", "timestamp": 1592043168986, "user": {"displayName": "amit", "photoUrl": "", "userId": "12797285159599571225"}, "user_tz": -345} id="jTyYCfxwKl6f" outputId="af30f940-9e13-487d-eb8c-09ed543d1408"
test_data=test_df.copy()
test_data.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 221} colab_type="code" executionInfo={"elapsed": 3623, "status": "ok", "timestamp": 1592043249854, "user": {"displayName": "amit", "photoUrl": "", "userId": "12797285159599571225"}, "user_tz": -345} id="0s-TrXT3KmKn" outputId="ff7026af-562a-43ef-c9e9-29de49ff4e79"
test_data['category']=encoder.fit_transform(test_data['category'])
test_data['center_type']=encoder2.fit_transform(test_data['center_type'])
test_data['cuisine']=encoder3.fit_transform(test_data['cuisine'])
test_data.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 221} colab_type="code" executionInfo={"elapsed": 3156, "status": "ok", "timestamp": 1592044319442, "user": {"displayName": "amit", "photoUrl": "", "userId": "12797285159599571225"}, "user_tz": -345} id="pfSxhfalLjLa" outputId="ced5fef3-d9ce-4a59-fc15-6cce5f9065c1"
test_data1=test_data.copy()
test_data1.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 221} colab_type="code" executionInfo={"elapsed": 3243, "status": "ok", "timestamp": 1592045187779, "user": {"displayName": "amit", "photoUrl": "", "userId": "12797285159599571225"}, "user_tz": -345} id="3XFyb0GMLjOA" outputId="ae0f7254-ccbe-4f32-9c46-6ad302f6d251"
test_data_final=test_data1.drop(columns=['id','center_id','meal_id','week','checkout_price','base_price','center_type'],axis=1)
test_data_final.head()
# + colab={} colab_type="code" id="tRJykGaIBZ8L"
#Building Models
# + colab={} colab_type="code" id="ZDZh0VJ-_2eY"
from sklearn.tree import DecisionTreeRegressor
DTR = DecisionTreeRegressor()
DTR.fit(x_train, y_train)
y_pred = DTR.predict(x_test)
# + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" executionInfo={"elapsed": 1393, "status": "ok", "timestamp": 1592042454940, "user": {"displayName": "amit", "photoUrl": "", "userId": "12797285159599571225"}, "user_tz": -345} id="S9HBSRS8BbbV" outputId="dd09d660-f6cd-4b38-88ed-c29835017d99"
from sklearn.metrics import mean_squared_log_error
from sklearn import metrics
RMSEL=mean_squared_log_error(y_test, y_pred)
RMSEL=np.sqrt(RMSEL)
RMSEL
# + colab={"base_uri": "https://localhost:8080/", "height": 68} colab_type="code" executionInfo={"elapsed": 13891, "status": "ok", "timestamp": 1592042566136, "user": {"displayName": "amit", "photoUrl": "", "userId": "12797285159599571225"}, "user_tz": -345} id="g4WoLk1PBYge" outputId="86387659-1250-4069-c400-524a7e646dd4"
# # !pip install xgboost
from xgboost import XGBRegressor
XG = XGBRegressor()
XG.fit(x_train, y_train)
y_pred = XG.predict(x_test)
y_pred[y_pred<0] = 0
RMSEL=mean_squared_log_error(y_test, y_pred)
RMSEL=np.sqrt(RMSEL)
print(RMSEL)
# + colab={} colab_type="code" id="JAuUDA6YCFYo"
# + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" executionInfo={"elapsed": 20292, "status": "ok", "timestamp": 1592042650745, "user": {"displayName": "amit", "photoUrl": "", "userId": "12797285159599571225"}, "user_tz": -345} id="g7_5zONYB66T" outputId="16fab81a-e026-4200-8afc-7c9f769ad40a"
from sklearn.ensemble import GradientBoostingRegressor
GB = GradientBoostingRegressor()
GB.fit(x_train, y_train)
y_pred = GB.predict(x_test)
y_pred[y_pred<0] = 0
RMSEL=mean_squared_log_error(y_test, y_pred)
RMSEL=np.sqrt(RMSEL)
RMSEL
# + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" executionInfo={"elapsed": 34391, "status": "ok", "timestamp": 1592042720678, "user": {"displayName": "amit", "photoUrl": "", "userId": "12797285159599571225"}, "user_tz": -345} id="18zOGLoXDUma" outputId="277d50c9-1bb9-4db5-f5e7-1db8c8d9ef94"
from sklearn.ensemble import RandomForestRegressor
RF = RandomForestRegressor()
RF.fit(x_train, y_train)
y_pred = RF.predict(x_test)
y_pred[y_pred<0] = 0
RMSEL=mean_squared_log_error(y_test, y_pred)
RMSEL=np.sqrt(RMSEL)
RMSEL
# + colab={} colab_type="code" id="DBgJVLq10Um_"
y=train_file['num_orders']
# + colab={} colab_type="code" id="W3KG8gcq0UyG"
from lightgbm import LGBMRegressor,plot_importance
from xgboost import XGBRegressor
from sklearn.metrics import mean_squared_log_error,mean_squared_error
from sklearn.model_selection import GridSearchCV
# + colab={"base_uri": "https://localhost:8080/", "height": 153} colab_type="code" executionInfo={"elapsed": 1544, "status": "ok", "timestamp": 1592041674979, "user": {"displayName": "amit", "photoUrl": "", "userId": "12797285159599571225"}, "user_tz": -345} id="ruM10oES0U0r" outputId="eecc6c58-9369-4888-fe61-50d2ca278488"
# # !pip install lightgbm
from lightgbm import LGBMRegressor
lgb_model=LGBMRegressor(importance_type='gain')
lgbm_params = {
"n_estimators":[230,260],
"num_leaves":[41,51],
'min_child_samples':[40,45,50],
'random_state':[2019]
}
lgb_model.set_params(**lgbm_params) #base model
# + colab={} colab_type="code" id="Fhg5NK1h0U3F"
lgb_grid=GridSearchCV(lgb_model,lgbm_params,cv=5,scoring='neg_mean_squared_error',n_jobs=8)
# + colab={} colab_type="code" id="hbS_5mZqIjrv"
model=lgb_grid.fit(x_train,y_train)
# + colab={"base_uri": "https://localhost:8080/", "height": 136} colab_type="code" executionInfo={"elapsed": 1770, "status": "ok", "timestamp": 1592037967223, "user": {"displayName": "amit", "photoUrl": "", "userId": "12797285159599571225"}, "user_tz": -345} id="M5-O0v-jIj0W" outputId="8f0db54f-eb51-4cbf-a0aa-0d728b5b54b9"
lgb_estimate=model.best_estimator_
lgb_estimate
# + colab={"base_uri": "https://localhost:8080/", "height": 68} colab_type="code" executionInfo={"elapsed": 2330, "status": "ok", "timestamp": 1592041982587, "user": {"displayName": "amit", "photoUrl": "", "userId": "12797285159599571225"}, "user_tz": -345} id="TgTk8t51Ij5O" outputId="b0631577-39b7-44db-b2be-00fa3879eca1"
y_pred=model.predict(x_test)
y_pred
# + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" executionInfo={"elapsed": 1863, "status": "ok", "timestamp": 1592037978685, "user": {"displayName": "amit", "photoUrl": "", "userId": "12797285159599571225"}, "user_tz": -345} id="vahEq_BcIj_O" outputId="cdfaa0c3-ab24-4f64-fa27-63ce705856d5"
# + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" executionInfo={"elapsed": 1520, "status": "ok", "timestamp": 1592042152247, "user": {"displayName": "amit", "photoUrl": "", "userId": "12797285159599571225"}, "user_tz": -345} id="MDvELDAOIkC-" outputId="8c706306-832d-4562-d90e-394cd181236a"
y_pred[y_pred<0] = 0
RMSEL=mean_squared_log_error(y_test, y_pred)
RMSEL=np.sqrt(RMSEL)
RMSEL
# + colab={} colab_type="code" id="p1olFlK0QxNX"
#by comparision the error value of differents models for training data, the lowest error value got from Random Forest Regressio Model
#prediction on test data by Random Forest Regressor
# + colab={"base_uri": "https://localhost:8080/", "height": 221} colab_type="code" executionInfo={"elapsed": 1634, "status": "ok", "timestamp": 1592045275153, "user": {"displayName": "amit", "photoUrl": "", "userId": "12797285159599571225"}, "user_tz": -345} id="sKb9qvUhSsRI" outputId="037170ff-1a1e-4f42-9580-4c4a52fd4dbe"
test_data_final.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 221} colab_type="code" executionInfo={"elapsed": 2060, "status": "ok", "timestamp": 1592045292147, "user": {"displayName": "amit", "photoUrl": "", "userId": "12797285159599571225"}, "user_tz": -345} id="KfggcfX1SxKP" outputId="8c2e611a-2058-4fc1-b94b-2a8040e64aa6"
main_data2.head()
# + colab={} colab_type="code" id="-8EBGUxsQxQ7"
pred_test_data= RF.predict(test_data_final)
pred_test_data[pred_test_data<0] = 0
submit = pd.DataFrame({
'id' :test_data['id'],
'num_orders' : pred_test_data
})
# + colab={"base_uri": "https://localhost:8080/", "height": 221} colab_type="code" executionInfo={"elapsed": 2142, "status": "ok", "timestamp": 1592045460900, "user": {"displayName": "amit", "photoUrl": "", "userId": "12797285159599571225"}, "user_tz": -345} id="VkF0P3sNQxU9" outputId="acd0dc31-94e9-40e8-8f09-319324d015d3"
submit.head()
# + colab={} colab_type="code" id="9p_Lz1ExQxYH"
submit.to_csv("output/submission_RFR.csv", index=False)
# + colab={} colab_type="code" id="P9KLryVRQxbj"
pred_test_data= DTR.predict(test_data_final)
pred_test_data[pred_test_data<0] = 0
submitDTR = pd.DataFrame({
'id' :test_data['id'],
'num_orders' : pred_test_data
})
submitDTR.head()
submitDTR.to_csv("output/submission_DTR.csv", index=False)
# + colab={} colab_type="code" id="zLM6Bf8rQxfX"
pred_test_data= XG.predict(test_data_final)
pred_test_data[pred_test_data<0] = 0
submitXG = pd.DataFrame({
'id' :test_data['id'],
'num_orders' : pred_test_data
})
submitXG.head()
submitXG.to_csv("output/submission_XG.csv", index=False)
# + colab={} colab_type="code" id="fclhinAEQxjv"
pred_test_data= GB.predict(test_data_final)
pred_test_data[pred_test_data<0] = 0
submitGB = pd.DataFrame({
'id' :test_data['id'],
'num_orders' : pred_test_data
})
submitGB.head()
submitGB.to_csv("output/submission_GB.csv", index=False)
# + colab={} colab_type="code" id="DkXYGcKTQxmi"
# + colab={} colab_type="code" id="tOzl1YGDQxqO"
# + colab={} colab_type="code" id="kmGz_jg0Jee1"
| food_demand_forecast.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python (dev)
# language: python
# name: dev
# ---
# + [markdown] id="5MVLh0nv_lS7"
# # Forecasting Net Prophet
#
# You’re a growth analyst at [MercadoLibre](http://investor.mercadolibre.com/investor-relations). With over 200 million users, MercadoLibre is the most popular e-commerce site in Latin America. You've been tasked with analyzing the company's financial and user data in clever ways to make the company grow. So, you want to find out if the ability to predict search traffic can translate into the ability to successfully trade the stock.
#
# Instructions
#
# This section divides the instructions for this Challenge into four steps and an optional fifth step, as follows:
#
# * Step 1: Find unusual patterns in hourly Google search traffic
#
# * Step 2: Mine the search traffic data for seasonality
#
# * Step 3: Relate the search traffic to stock price patterns
#
# * Step 4: Create a time series model with Prophet
#
# * Step 5 (optional): Forecast revenue by using time series models
#
# The following subsections detail these steps.
#
# ## Step 1: Find Unusual Patterns in Hourly Google Search Traffic
#
# The data science manager asks if the Google search traffic for the company links to any financial events at the company. Or, does the search traffic data just present random noise? To answer this question, pick out any unusual patterns in the Google search data for the company, and connect them to the corporate financial events.
#
# To do so, complete the following steps:
#
# 1. Read the search data into a DataFrame, and then slice the data to just the month of May 2020. (During this month, MercadoLibre released its quarterly financial results.) Use hvPlot to visualize the results. Do any unusual patterns exist?
#
# 2. Calculate the total search traffic for the month, and then compare the value to the monthly median across all months. Did the Google search traffic increase during the month that MercadoLibre released its financial results?
#
# ## Step 2: Mine the Search Traffic Data for Seasonality
#
# Marketing realizes that they can use the hourly search data, too. If they can track and predict interest in the company and its platform for any time of day, they can focus their marketing efforts around the times that have the most traffic. This will get a greater return on investment (ROI) from their marketing budget.
#
# To that end, you want to mine the search traffic data for predictable seasonal patterns of interest in the company. To do so, complete the following steps:
#
# 1. Group the hourly search data to plot the average traffic by the day of the week (for example, Monday vs. Friday).
#
# 2. Using hvPlot, visualize this traffic as a heatmap, referencing the `index.hour` as the x-axis and the `index.dayofweek` as the y-axis. Does any day-of-week effect that you observe concentrate in just a few hours of that day?
#
# 3. Group the search data by the week of the year. Does the search traffic tend to increase during the winter holiday period (weeks 40 through 52)?
#
# ## Step 3: Relate the Search Traffic to Stock Price Patterns
#
# You mention your work on the search traffic data during a meeting with people in the finance group at the company. They want to know if any relationship between the search data and the company stock price exists, and they ask if you can investigate.
#
# To do so, complete the following steps:
#
# 1. Read in and plot the stock price data. Concatenate the stock price data to the search data in a single DataFrame.
#
# 2. Market events emerged during the year of 2020 that many companies found difficult. But, after the initial shock to global financial markets, new customers and revenue increased for e-commerce platforms. Slice the data to just the first half of 2020 (`2020-01` to `2020-06` in the DataFrame), and then use hvPlot to plot the data. Do both time series indicate a common trend that’s consistent with this narrative?
#
# 3. Create a new column in the DataFrame named “Lagged Search Trends” that offsets, or shifts, the search traffic by one hour. Create two additional columns:
#
# * “Stock Volatility”, which holds an exponentially weighted four-hour rolling average of the company’s stock volatility
#
# * “Hourly Stock Return”, which holds the percent change of the company's stock price on an hourly basis
#
# 4. Review the time series correlation, and then answer the following question: Does a predictable relationship exist between the lagged search traffic and the stock volatility or between the lagged search traffic and the stock price returns?
#
# ## Step 4: Create a Time Series Model with Prophet
#
# Now, you need to produce a time series model that analyzes and forecasts patterns in the hourly search data. To do so, complete the following steps:
#
# 1. Set up the Google search data for a Prophet forecasting model.
#
# 2. After estimating the model, plot the forecast. How's the near-term forecast for the popularity of MercadoLibre?
#
# 3. Plot the individual time series components of the model to answer the following questions:
#
# * What time of day exhibits the greatest popularity?
#
# * Which day of the week gets the most search traffic?
#
# * What's the lowest point for search traffic in the calendar year?
#
# ## Step 5 (Optional): Forecast Revenue by Using Time Series Models
#
# A few weeks after your initial analysis, the finance group follows up to find out if you can help them solve a different problem. Your fame as a growth analyst in the company continues to grow!
#
# Specifically, the finance group wants a forecast of the total sales for the next quarter. This will dramatically increase their ability to plan budgets and to help guide expectations for the company investors.
#
# To do so, complete the following steps:
#
# 1. Read in the daily historical sales (that is, revenue) figures, and then apply a Prophet model to the data.
#
# 2. Interpret the model output to identify any seasonal patterns in the company's revenue. For example, what are the peak revenue days? (Mondays? Fridays? Something else?)
#
# 3. Produce a sales forecast for the finance group. Give them a number for the expected total sales in the next quarter. Include the best- and worst-case scenarios to help them make better plans.
#
# + [markdown] id="zuGqNYcz_lTG"
# ## Install and import the required libraries and dependencies
# + colab={"base_uri": "https://localhost:8080/"} id="3FqL7IT5_lTI" executionInfo={"status": "ok", "timestamp": 1632556455605, "user_tz": -180, "elapsed": 14388, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh3V43PoCS0W8Dfmqkeo6elcm9Buwdyhk7iCmAk1zY=s64", "userId": "13953265443078842179"}} outputId="2fb275f4-b6ad-4ee7-bb4f-36a57d0d5a2f"
# Install the required libraries
from IPython.display import clear_output
try:
# !pip install pystan
# !pip install fbprophet
# !pip install hvplot
# !pip install holoviews
except:
print("Error installing libraries")
finally:
clear_output()
print('Libraries successfully installed')
# + id="-Py-pd6Q_lTK" colab={"base_uri": "https://localhost:8080/", "height": 16, "output_embedded_package_id": "1bD6XA54DDQs7amsqSlZ_bb8bIhwBL0dq"} executionInfo={"status": "ok", "timestamp": 1632556461068, "user_tz": -180, "elapsed": 5473, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh3V43PoCS0W8Dfmqkeo6elcm9Buwdyhk7iCmAk1zY=s64", "userId": "13953265443078842179"}} outputId="7ae1aaf9-f429-4564-eaf1-e06414e518e9"
# Import the required libraries and dependencies
import hvplot.pandas
import pandas as pd
import holoviews as hv
import datetime as dt
from datetime import datetime
from fbprophet import Prophet
from bokeh.models import Title
# %matplotlib inline
# + [markdown] id="vzv8g2R-_lTK"
# ## Step 1: Find Unusual Patterns in Hourly Google Search Traffic
#
# The data science manager asks if the Google search traffic for the company links to any financial events at the company. Or, does the search traffic data just present random noise? To answer this question, pick out any unusual patterns in the Google search data for the company, and connect them to the corporate financial events.
#
# To do so, complete the following steps:
#
# 1. Read the search data into a DataFrame, and then slice the data to just the month of May 2020. (During this month, MercadoLibre released its quarterly financial results.) Use hvPlot to visualize the results. Do any unusual patterns exist?
#
# 2. Calculate the total search traffic for the month, and then compare the value to the monthly median across all months. Did the Google search traffic increase during the month that MercadoLibre released its financial results?
#
# + [markdown] id="XkxoZlpj_lTL"
# #### Step 1: Read the search data into a DataFrame, and then slice the data to just the month of May 2020. (During this month, MercadoLibre released its quarterly financial results.) Use hvPlot to visualize the results. Do any unusual patterns exist?
# + colab={"resources": {"http://localhost:8080/nbextensions/google.colab/files.js": {"data": "<KEY>", "ok": true, "headers": [["content-type", "application/javascript"]], "status": 200, "status_text": ""}}, "base_uri": "https://localhost:8080/", "height": 288} id="58Nq8xXZ_lTL" executionInfo={"status": "ok", "timestamp": 1632556481384, "user_tz": -180, "elapsed": 20329, "user": {"displayName": "<NAME>", "photoUrl": "https://<KEY>", "userId": "13953265443078842179"}} outputId="d771f42e-d374-4552-8963-f7fbeb8aed7e"
# Upload the "google_hourly_search_trends.csv" file into Colab, then store in a Pandas DataFrame
# Set the "Date" column as the Datetime Index.
from google.colab import files
uploaded = files.upload()
df_mercado_trends = pd.read_csv(
"google_hourly_search_trends.csv",
index_col="Date",
parse_dates=True,
infer_datetime_format=True,
)
# Review the first and last five rows of the DataFrame
df_mercado_trends.head()
# + colab={"base_uri": "https://localhost:8080/"} id="pzrUg9ph_lTL" executionInfo={"status": "ok", "timestamp": 1632556481384, "user_tz": -180, "elapsed": 22, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh3V43PoCS0W8Dfmqkeo6elcm9Buwdyhk7iCmAk1zY=s64", "userId": "13953265443078842179"}} outputId="05d2fdb4-5742-4385-d3e2-5e01b58e89ba"
# Review the data types of the DataFrame using the info function
df_mercado_trends.info()
# + colab={"base_uri": "https://localhost:8080/", "height": 326, "output_embedded_package_id": "1POmdxgXJWkvmhFSN_OcuQszYkzk06yIu"} id="FwGQ0EAo_lTM" executionInfo={"status": "ok", "timestamp": 1632556486274, "user_tz": -180, "elapsed": 4901, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh3V43PoCS0W8Dfmqkeo6elcm9Buwdyhk7iCmAk1zY=s64", "userId": "13953265443078842179"}} outputId="e8c3f1c5-487b-4a35-9dcf-2764e5d3bfed"
# Holoviews extension to render hvPlots in Colab
hv.extension('bokeh')
# Slice the DataFrame to just the month of May 2020
df_may_2020 = df_mercado_trends.loc["2020-May"]
# Use hvPlot to visualize the data for May 2020
df_may_2020.hvplot(
title="MercadoLibre Hourly Google Trends, May 2020",
figsize=[500, 750],
legend=False,
)
# + [markdown] id="05a8OnMQ_lTM"
# #### Step 2: Calculate the total search traffic for the month, and then compare the value to the monthly median across all months. Did the Google search traffic increase during the month that MercadoLibre released its financial results?
# + colab={"base_uri": "https://localhost:8080/"} id="4hENgvGb_lTN" executionInfo={"status": "ok", "timestamp": 1632556486276, "user_tz": -180, "elapsed": 110, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh3V43PoCS0W8Dfmqkeo6elcm9Buwdyhk7iCmAk1zY=s64", "userId": "13953265443078842179"}} outputId="c8f43a94-757e-4139-a593-67c7a5e1e30b"
# Calculate the sum of the total search traffic for May 2020
traffic_may_2020 = df_may_2020.sum()
# View the traffic_may_2020 value
traffic_may_2020
# + colab={"base_uri": "https://localhost:8080/"} id="BqZ__KD7_lTN" executionInfo={"status": "ok", "timestamp": 1632556486281, "user_tz": -180, "elapsed": 94, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh3V43PoCS0W8Dfmqkeo6elcm9Buwdyhk7iCmAk1zY=s64", "userId": "13953265443078842179"}} outputId="7a994b76-7919-4d7b-c909-21b20c49500b"
# Calcluate the monthly median search traffic across all months
# Group the DataFrame by index year and then index month, chain the sum and then the median functions
median_monthly_traffic = df_mercado_trends.groupby([df_mercado_trends.index.year,
df_mercado_trends.index.month]
).sum().median()
# View the median_monthly_traffic value
median_monthly_traffic
# + colab={"base_uri": "https://localhost:8080/"} id="8zxsQRy0_lTN" executionInfo={"status": "ok", "timestamp": 1632556486284, "user_tz": -180, "elapsed": 93, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh3V43PoCS0W8Dfmqkeo6elcm9Buwdyhk7iCmAk1zY=s64", "userId": "13953265443078842179"}} outputId="6e6bf21e-ada7-48e7-821b-67b24e4bf89c"
# Compare the seach traffic for the month of May 2020 to the overall monthly median value
traffic_may_2020 - median_monthly_traffic
# + [markdown] id="2eHtzFzh_lTO"
# ##### Answer the following question:
# + [markdown] id="crsVZasF_lTO"
# **Question:** Did the Google search traffic increase during the month that MercadoLibre released its financial results?
#
# **Answer:** The Google search traffic increased during the month of May, 2020.
# + [markdown] id="XQLMHK6y_lTO"
# ## Step 2: Mine the Search Traffic Data for Seasonality
#
# Marketing realizes that they can use the hourly search data, too. If they can track and predict interest in the company and its platform for any time of day, they can focus their marketing efforts around the times that have the most traffic. This will get a greater return on investment (ROI) from their marketing budget.
#
# To that end, you want to mine the search traffic data for predictable seasonal patterns of interest in the company. To do so, complete the following steps:
#
# 1. Group the hourly search data to plot the average traffic by the day of the week (for example, Monday vs. Friday).
#
# 2. Using hvPlot, visualize this traffic as a heatmap, referencing the `index.hour` as the x-axis and the `index.dayofweek` as the y-axis. Does any day-of-week effect that you observe concentrate in just a few hours of that day?
#
# 3. Group the search data by the week of the year. Does the search traffic tend to increase during the winter holiday period (weeks 40 through 52)?
#
# + [markdown] id="4rusRScW_lTP"
# #### Step 1: Group the hourly search data to plot the average traffic by the day of the week (for example, Monday vs. Friday).
# + id="t6373Tpk_lTP" colab={"base_uri": "https://localhost:8080/", "height": 326, "output_embedded_package_id": "1UcntRkDol_tV34auUwqMtjq1TmiPDf6d"} executionInfo={"status": "ok", "timestamp": 1632556486306, "user_tz": -180, "elapsed": 111, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh3V43PoCS0W8Dfmqkeo6elcm9Buwdyhk7iCmAk1zY=s64", "userId": "13953265443078842179"}} outputId="cfddaeb9-e650-48e0-b13c-7019ae53823d"
# Holoviews extension to render hvPlots in Colab
hv.extension('bokeh')
# Group the hourly search data to plot (use hvPlot) the average traffic by the day of week
dayofweek_dict = {k: v for k, v in enumerate(["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"])}
df_mercado_trends_dayofweek = df_mercado_trends.groupby([df_mercado_trends.index.dayofweek]).mean()
df_mercado_trends_dayofweek.index = df_mercado_trends_dayofweek.index.map(dayofweek_dict)
df_mercado_trends_dayofweek.index.name = "Day of Week"
df_mercado_trends_dayofweek.hvplot(
title="MercadoLibre Trends by Day of Week",
figsize=[500, 750],
legend=False,
)
# + [markdown] id="VyQoggZH_lTP"
# #### Step 2: Using hvPlot, visualize this traffic as a heatmap, referencing the `index.hour` as the x-axis and the `index.dayofweek` as the y-axis. Does any day-of-week effect that you observe concentrate in just a few hours of that day?
# + id="vH3Hm0xh_lTP" colab={"base_uri": "https://localhost:8080/", "height": 647, "output_embedded_package_id": "1HvqsYD4IOmLCwPBjO5gEswSijsiJV6Yv"} executionInfo={"status": "ok", "timestamp": 1632556486308, "user_tz": -180, "elapsed": 85, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh3V43PoCS0W8Dfmqkeo6elcm9Buwdyhk7iCmAk1zY=s64", "userId": "13953265443078842179"}} outputId="70b31c6f-8c82-4e30-ad93-0603c22b6103"
# Holoviews extension to render hvPlots in Colab
import math
import numpy as np
hv.extension('bokeh')
# Use hvPlot to visualize the hour of the day and day of week search traffic as a heatmap.
dayofweek_tuple = [(k, v) for k, v in enumerate(["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"])]
heatmap = df_mercado_trends.hvplot.heatmap(
x="index.hour",
y="index.dayofweek",
C="Search Trends",
cmap="reds",
title="MercadoLibre Hourly Trends by Day of Week",
xlabel="Hour",
ylabel="Day of Week",
yticks=dayofweek_tuple,
colorbar=True,
)
heatmap1 = heatmap.aggregate(function=np.mean)
heatmap2 = heatmap.aggregate(function=np.sum)
layout = hv.Layout(
heatmap1.options(axiswise=True, title="MercadoLibre Hourly Trends by Day of Week (Average)") + \
heatmap2.options(axiswise=True, title="MercadoLibre Hourly Trends by Day of Week (Sum)")
).cols(1)
layout
# + [markdown] id="7YpQxNoL_lTQ"
# ##### Answer the following question:
# + [markdown] id="HKKAZxhD_lTQ"
# **Question:** Does any day-of-week effect that you observe concentrate in just a few hours of that day?
#
# **Answer:** For most of the weekdays, there appears to be a more search activity towards the late ends of the night following into the early hours of the morning. This trend is somewhat consistent for the weekends, however it's less pronounced on the early morning hours.
# + [markdown] id="LtMqzu3a_lTQ"
# #### Step 3: Group the search data by the week of the year. Does the search traffic tend to increase during the winter holiday period (weeks 40 through 52)?
# + id="c4m0OsWG_lTQ" colab={"base_uri": "https://localhost:8080/", "height": 326, "output_embedded_package_id": "1HJ8cM8EUUJB8JikZ-6M_3lRW32S2urG4"} executionInfo={"status": "ok", "timestamp": 1632556487420, "user_tz": -180, "elapsed": 1166, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh3V43PoCS0W8Dfmqkeo6elcm9Buwdyhk7iCmAk1zY=s64", "userId": "13953265443078842179"}} outputId="48e9f241-be89-4872-84e6-ce8ec6764c8c"
# Holoviews extension to render hvPlots in Colab
hv.extension('bokeh')
# Group the hourly search data to plot (use hvPlot) the average traffic by the week of the year
df_mercado_trends_weekofyear = df_mercado_trends.groupby([df_mercado_trends.index.isocalendar().week]).mean()
df_mercado_trends_weekofyear.hvplot(
x="week",
y="Search Trends",
title="MercadoLibre Hourly Traffic by Week of Year",
xlabel="Week of Year",
)
# + [markdown] id="NwvNyg_h_lTR"
# ##### Answer the following question:
# + [markdown] id="-4JjVt1I_lTR"
# **Question:** Does the search traffic tend to increase during the winter holiday period (weeks 40 through 52)?
#
# **Answer:** # The search traffic does tend to increase from approximately week 40 to 51. However, there is a sharp drop on the last week.
# + [markdown] id="07bRHBy6_lTR"
# ## Step 3: Relate the Search Traffic to Stock Price Patterns
#
# You mention your work on the search traffic data during a meeting with people in the finance group at the company. They want to know if any relationship between the search data and the company stock price exists, and they ask if you can investigate.
#
# To do so, complete the following steps:
#
# 1. Read in and plot the stock price data. Concatenate the stock price data to the search data in a single DataFrame.
#
# 2. Market events emerged during the year of 2020 that many companies found difficult. But, after the initial shock to global financial markets, new customers and revenue increased for e-commerce platforms. Slice the data to just the first half of 2020 (`2020-01` to `2020-06` in the DataFrame), and then use hvPlot to plot the data. Do both time series indicate a common trend that’s consistent with this narrative?
#
# 3. Create a new column in the DataFrame named “Lagged Search Trends” that offsets, or shifts, the search traffic by one hour. Create two additional columns:
#
# * “Stock Volatility”, which holds an exponentially weighted four-hour rolling average of the company’s stock volatility
#
# * “Hourly Stock Return”, which holds the percent change of the company's stock price on an hourly basis
#
# 4. Review the time series correlation, and then answer the following question: Does a predictable relationship exist between the lagged search traffic and the stock volatility or between the lagged search traffic and the stock price returns?
#
# + [markdown] id="ujlCRy41_lTS"
# #### Step 1: Read in and plot the stock price data. Concatenate the stock price data to the search data in a single DataFrame.
# + id="AL4sIttQ_lTS" colab={"resources": {"http://localhost:8080/nbextensions/google.colab/files.js": {"data": "<KEY>", "ok": true, "headers": [["content-type", "application/javascript"]], "status": 200, "status_text": ""}}, "base_uri": "https://localhost:8080/", "height": 288} executionInfo={"status": "ok", "timestamp": 1632556527397, "user_tz": -180, "elapsed": 39993, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh3V43PoCS0W8Dfmqkeo6elcm9Buwdyhk7iCmAk1zY=s64", "userId": "13953265443078842179"}} outputId="f3590244-8f82-4b0a-f257-fed531f16f4b"
# Upload the "mercado_stock_price.csv" file into Colab, then store in a Pandas DataFrame
# Set the "date" column as the Datetime Index.
from google.colab import files
uploaded = files.upload()
df_mercado_stock = pd.read_csv(
"mercado_stock_price.csv",
index_col="date",
parse_dates=True,
infer_datetime_format=True,
)
# View the first and last five rows of the DataFrame
df_mercado_stock.tail()
# + id="OHxhuG31_lTT" colab={"base_uri": "https://localhost:8080/", "height": 326, "output_embedded_package_id": "1_IAsK5YP6H9oHoKBmE1xlA2WR8euwTW8"} executionInfo={"status": "ok", "timestamp": 1632556531124, "user_tz": -180, "elapsed": 3746, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh3V43PoCS0W8Dfmqkeo6elcm9Buwdyhk7iCmAk1zY=s64", "userId": "13953265443078842179"}} outputId="9c49c41c-a3e0-49c2-db1c-7e46e612aa86"
# Holoviews extension to render hvPlots in Colab
hv.extension('bokeh')
# Use hvPlot to visualize the closing price of the df_mercado_stock DataFrame
df_mercado_stock.hvplot(
title="MercadoLibre Hourly Stock Prices",
xlabel="Time (Hourly)",
ylabel="Closing Price",
)
# + id="i0zKoq_n_lTT" colab={"base_uri": "https://localhost:8080/", "height": 386} executionInfo={"status": "ok", "timestamp": 1632556531126, "user_tz": -180, "elapsed": 182, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh3V43PoCS0W8Dfmqkeo6elcm9Buwdyhk7iCmAk1zY=s64", "userId": "13953265443078842179"}} outputId="c7793144-a46c-473a-9c62-b1f5b4b03b6c"
# Concatenate the df_mercado_stock DataFrame with the df_mercado_trends DataFrame
# Concatenate the DataFrame by columns (axis=1), and drop and rows with only one column of data
mercado_stock_trends_df = pd.concat([df_mercado_trends, df_mercado_stock], axis=1).dropna()
# View the first and last five rows of the DataFrame
display(mercado_stock_trends_df.head())
display(mercado_stock_trends_df.tail())
# + [markdown] id="pehKLqvu_lTT"
# #### Step 2: Market events emerged during the year of 2020 that many companies found difficult. But, after the initial shock to global financial markets, new customers and revenue increased for e-commerce platforms. Slice the data to just the first half of 2020 (`2020-01` to `2020-06` in the DataFrame), and then use hvPlot to plot the data. Do both time series indicate a common trend that’s consistent with this narrative?
# + id="1eXMOoll_lTT" colab={"base_uri": "https://localhost:8080/", "height": 386} executionInfo={"status": "ok", "timestamp": 1632556531128, "user_tz": -180, "elapsed": 181, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh3V43PoCS0W8Dfmqkeo6elcm9Buwdyhk7iCmAk1zY=s64", "userId": "13953265443078842179"}} outputId="a327a8a0-6ab5-44d8-b56a-643f0eea78d9"
# For the combined dataframe, slice to just the first half of 2020 (2020-01 through 2020-06)
first_half_2020 = mercado_stock_trends_df.loc["2020-Jan":"2020-Jun"]
# View the first and last five rows of first_half_2020 DataFrame
display(first_half_2020.head())
display(first_half_2020.tail())
# + id="aQleoRZD_lTU" colab={"base_uri": "https://localhost:8080/", "height": 647, "output_embedded_package_id": "19jZUqUEhv3JFeDtLSn5X63Lure2UFBWl"} executionInfo={"status": "ok", "timestamp": 1632556531130, "user_tz": -180, "elapsed": 180, "user": {"displayName": "<NAME>", "photoUrl": "https://<KEY>", "userId": "13953265443078842179"}} outputId="ba448aee-1b6a-4a50-e146-674f7c86a82a"
# Holoviews extension to render hvPlots in Colab
hv.extension('bokeh')
# Use hvPlot to visualize the close and Search Trends data
# Plot each column on a separate axes using the following syntax
# `hvplot(shared_axes=False, subplots=True).cols(1)`
first_half_2020.hvplot(
shared_axes=False,
subplots=True,
xlabel="Time (Hourly)",
ylabel="Variable",
).cols(1)
# + [markdown] id="80PKxsMy_lTU"
# ##### Answer the following question:
# + [markdown] id="eD3-GEo4_lTU"
# **Question:** Do both time series indicate a common trend that’s consistent with this narrative?
#
# **Answer:** From this initial raw plotting comparison, it's difficult to find correlating trends between the two charts.
# + [markdown] id="3rWmycER_lTU"
# #### Step 3: Create a new column in the DataFrame named “Lagged Search Trends” that offsets, or shifts, the search traffic by one hour. Create two additional columns:
#
# * “Stock Volatility”, which holds an exponentially weighted four-hour rolling average of the company’s stock volatility
#
# * “Hourly Stock Return”, which holds the percent change of the company's stock price on an hourly basis
#
# + id="XuQ5AH4j_lTV" executionInfo={"status": "ok", "timestamp": 1632556531138, "user_tz": -180, "elapsed": 77, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh3V43PoCS0W8Dfmqkeo6elcm9Buwdyhk7iCmAk1zY=s64", "userId": "13953265443078842179"}}
# Create a new column in the mercado_stock_trends_df DataFrame called Lagged Search Trends
# This column should shift the Search Trends information by one hour
mercado_stock_trends_df['Lagged Search Trends'] = mercado_stock_trends_df["Search Trends"].shift(periods=1, freq="H")
# + id="hsufyNed_lTV" executionInfo={"status": "ok", "timestamp": 1632556531145, "user_tz": -180, "elapsed": 83, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh3V43PoCS0W8Dfmqkeo6elcm9Buwdyhk7iCmAk1zY=s64", "userId": "13953265443078842179"}}
# Create a new column in the mercado_stock_trends_df DataFrame called Stock Volatility
# This column should calculate the standard deviation of the closing stock price return data over a 4 period rolling window
mercado_stock_trends_df['Stock Volatility'] = mercado_stock_trends_df["close"].pct_change().rolling(window=4).std()
# + id="FBmq9MBZ_lTV" colab={"base_uri": "https://localhost:8080/", "height": 326, "output_embedded_package_id": "1n0nzzJQnlzL3JjsjiDsb3jHWPEP46noj"} executionInfo={"status": "ok", "timestamp": 1632556532442, "user_tz": -180, "elapsed": 1379, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh3V43PoCS0W8Dfmqkeo6elcm9Buwdyhk7iCmAk1zY=s64", "userId": "13953265443078842179"}} outputId="32e81ed6-a3cc-4af3-f9de-c1cb31d17776"
# Holoviews extension to render hvPlots in Colab
hv.extension('bokeh')
# Use hvPlot to visualize the stock volatility
mercado_stock_trends_df["Stock Volatility"].hvplot(
title="Stock Volatility over Time",
figsize=[500,750],
)
# + [markdown] id="l2GrqaJt_lTV"
# **Solution Note:** Note how volatility spiked, and tended to stay high, during the first half of 2020. This is a common characteristic of volatility in stock returns worldwide: high volatility days tend to be followed by yet more high volatility days. When it rains, it pours.
# + id="19M5hfrz_lTW" executionInfo={"status": "ok", "timestamp": 1632556532444, "user_tz": -180, "elapsed": 48, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh3V43PoCS0W8Dfmqkeo6elcm9Buwdyhk7iCmAk1zY=s64", "userId": "13953265443078842179"}}
# Create a new column in the mercado_stock_trends_df DataFrame called Hourly Stock Return
# This column should calculate hourly return percentage of the closing price
mercado_stock_trends_df['Hourly Stock Return'] = mercado_stock_trends_df["close"].pct_change()
# + id="_9Aj18MD_lTW" colab={"base_uri": "https://localhost:8080/", "height": 386} executionInfo={"status": "ok", "timestamp": 1632556532445, "user_tz": -180, "elapsed": 42, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh3V43PoCS0W8Dfmqkeo6elcm9Buwdyhk7iCmAk1zY=s64", "userId": "13953265443078842179"}} outputId="0b0973d8-a781-4d86-a713-33c99cac110e"
# View the first and last five rows of the mercado_stock_trends_df DataFrame
display(mercado_stock_trends_df.head())
display(mercado_stock_trends_df.tail())
# + [markdown] id="goT1ZQx0_lTW"
# #### Step 4: Review the time series correlation, and then answer the following question: Does a predictable relationship exist between the lagged search traffic and the stock volatility or between the lagged search traffic and the stock price returns?
# + id="WxVvpd7S_lTW" colab={"base_uri": "https://localhost:8080/", "height": 141} executionInfo={"status": "ok", "timestamp": 1632556532448, "user_tz": -180, "elapsed": 41, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh3V43PoCS0W8Dfmqkeo6elcm9Buwdyhk7iCmAk1zY=s64", "userId": "13953265443078842179"}} outputId="1562ec31-9f01-4536-93c6-9fdd426e8ac2"
# Construct correlation table of Stock Volatility, Lagged Search Trends, and Hourly Stock Return
mercado_stock_trends_df.filter(["Stock Volatility", "Lagged Search Trends", "Hourly Stock Return"]).corr()
# + [markdown] id="ip92XxvE_lTW"
# ##### Answer the following question:
#
# + [markdown] id="9gwZSD22_lTX"
# **Question:** Does a predictable relationship exist between the lagged search traffic and the stock volatility or between the lagged search traffic and the stock price returns?
#
# **Answer:** Lagged search trends and stock volatility appear to be inversely correlated.
# + [markdown] id="fBHq1d9h_lTX"
# ## Step 4: Create a Time Series Model with Prophet
#
# Now, you need to produce a time series model that analyzes and forecasts patterns in the hourly search data. To do so, complete the following steps:
#
# 1. Set up the Google search data for a Prophet forecasting model.
#
# 2. After estimating the model, plot the forecast. How's the near-term forecast for the popularity of MercadoLibre?
#
# 3. Plot the individual time series components of the model to answer the following questions:
#
# * What time of day exhibits the greatest popularity?
#
# * Which day of the week gets the most search traffic?
#
# * What's the lowest point for search traffic in the calendar year?
#
# + [markdown] id="uwyckfFm_lTX"
# #### Step 1: Set up the Google search data for a Prophet forecasting model.
# + id="EkDrQfhb_lTX" colab={"base_uri": "https://localhost:8080/", "height": 386} executionInfo={"status": "ok", "timestamp": 1632556532469, "user_tz": -180, "elapsed": 60, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh3V43PoCS0W8Dfmqkeo6elcm9Buwdyhk7iCmAk1zY=s64", "userId": "13953265443078842179"}} outputId="a26a7182-864a-4490-ee7a-b0349881af64"
# Using the df_mercado_trends DataFrame, reset the index so the date information is no longer the index
mercado_prophet_df = mercado_stock_trends_df.reset_index().filter(["index", "close"])
# Label the columns ds and y so that the syntax is recognized by Prophet
mercado_prophet_df = mercado_prophet_df.rename(columns={"index":"ds", "close":"y"})
# Drop an NaN values from the prophet_df DataFrame
mercado_prophet_df = mercado_prophet_df.dropna()
# View the first and last five rows of the mercado_prophet_df DataFrame
display(mercado_prophet_df.head())
display(mercado_prophet_df.tail())
# + id="UDClNGxC_lTY" executionInfo={"status": "ok", "timestamp": 1632556532879, "user_tz": -180, "elapsed": 467, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh3V43PoCS0W8Dfmqkeo6elcm9Buwdyhk7iCmAk1zY=s64", "userId": "13953265443078842179"}}
# Call the Prophet function, store as an object
model_mercado_trends = Prophet()
# + id="Tg7I7RvP_lTY" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1632556567353, "user_tz": -180, "elapsed": 34489, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh3V43PoCS0W8Dfmqkeo6elcm9Buwdyhk7iCmAk1zY=s64", "userId": "13953265443078842179"}} outputId="dc8982a2-86bb-4143-8260-46ef2c3a2fb8"
# Fit the time-series model.
model_mercado_trends.fit(mercado_prophet_df)
# + id="wtCrNRU8_lTY" colab={"base_uri": "https://localhost:8080/", "height": 202} executionInfo={"status": "ok", "timestamp": 1632556567358, "user_tz": -180, "elapsed": 22, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh3V43PoCS0W8Dfmqkeo6elcm9Buwdyhk7iCmAk1zY=s64", "userId": "13953265443078842179"}} outputId="d0c0c0d5-f8f8-4865-85a5-878531a5029a"
# Create a future dataframe to hold predictions
# Make the prediction go out as far as 2000 hours (approx 80 days)
future_mercado_trends = model_mercado_trends.make_future_dataframe(periods=2000, freq="H")
# View the last five rows of the future_mercado_trends DataFrame
future_mercado_trends.tail()
# + id="HTKN1CvG_lTY" colab={"base_uri": "https://localhost:8080/", "height": 388} executionInfo={"status": "ok", "timestamp": 1632557951756, "user_tz": -180, "elapsed": 7361, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh3V43PoCS0W8Dfmqkeo6elcm9Buwdyhk7iCmAk1zY=s64", "userId": "13953265443078842179"}} outputId="3c3853f0-7f41-4d6d-c17a-4b4c83b1c6f3"
# Make the predictions for the trend data using the future_mercado_trends DataFrame
forecast_mercado_trends = model_mercado_trends.predict(future_mercado_trends)
# Display the first five rows of the forecast_mercado_trends DataFrame
forecast_mercado_trends.head()
# + [markdown] id="hxdCHIHZ_lTZ"
# #### Step 2: After estimating the model, plot the forecast. How's the near-term forecast for the popularity of MercadoLibre?
# + id="Wn7tJHLe_lTZ" colab={"base_uri": "https://localhost:8080/", "height": 441} executionInfo={"status": "ok", "timestamp": 1632557952955, "user_tz": -180, "elapsed": 1209, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh3V43PoCS0W8Dfmqkeo6elcm9Buwdyhk7iCmAk1zY=s64", "userId": "13953265443078842179"}} outputId="d3079293-2627-4fc2-c6e9-077bfed2636a"
# Plot the Prophet predictions for the Mercado trends data
model_mercado_trends.plot(forecast_mercado_trends);
# + [markdown] id="kqCLZJ-Z_lTZ"
# ##### Answer the following question:
# + [markdown] id="wYf_udGd_lTZ"
# **Question:** How's the near-term forecast for the popularity of MercadoLibre?
#
# **Answer:** # The near-term forecast for the popularity of MercardoLibre is mostly at or above the most recent level. However, there is large variability in the predictions.
#
# + [markdown] id="0h839Ojg_lTZ"
# #### Step 3: Plot the individual time series components of the model to answer the following questions:
#
# * What time of day exhibits the greatest popularity?
#
# * Which day of the week gets the most search traffic?
#
# * What's the lowest point for search traffic in the calendar year?
#
# + id="rUy3rI08_lTa" colab={"base_uri": "https://localhost:8080/", "height": 445} executionInfo={"status": "ok", "timestamp": 1632557983829, "user_tz": -180, "elapsed": 274, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh3V43PoCS0W8Dfmqkeo6elcm9Buwdyhk7iCmAk1zY=s64", "userId": "13953265443078842179"}} outputId="f20025ec-f6f3-4e16-e762-6b2453095c0f"
# Set the index in the forecast_mercado_trends DataFrame to the ds datetime column
forecast_mercado_trends = forecast_mercado_trends.set_index(["ds"])
# View the only the yhat,yhat_lower and yhat_upper columns from the DataFrame
forecast_mercado_trends[["yhat", "yhat_lower", "yhat_upper"]]
# + [markdown] id="_G94IPMH_lTa"
# Solutions Note: `yhat` represents the most likely (average) forecast, whereas `yhat_lower` and `yhat_upper` represents the worst and best case prediction (based on what are known as 95% confidence intervals).
# + colab={"base_uri": "https://localhost:8080/"} id="R1R6lK5xBvls" executionInfo={"status": "ok", "timestamp": 1632557992574, "user_tz": -180, "elapsed": 254, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh3V43PoCS0W8Dfmqkeo6elcm9Buwdyhk7iCmAk1zY=s64", "userId": "13953265443078842179"}} outputId="67520178-abaa-45d2-c89f-2d86f8c3f6c0"
forecast_mercado_trends.index
# + id="QElvUGwG_lTa" colab={"base_uri": "https://localhost:8080/", "height": 326, "output_embedded_package_id": "17ZcFad6lPLndJHQis6vGATZHzgM21aBR"} executionInfo={"status": "ok", "timestamp": 1632558001289, "user_tz": -180, "elapsed": 3104, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh3V43PoCS0W8Dfmqkeo6elcm9Buwdyhk7iCmAk1zY=s64", "userId": "13953265443078842179"}} outputId="6e460819-14cf-47ae-ff68-337cff69ef6a"
# Holoviews extension to render hvPlots in Colab
hv.extension('bokeh')
# From the forecast_mercado_trends DataFrame, use hvPlot to visualize
# the yhat, yhat_lower, and yhat_upper columns over the last 2000 hours
forecast_mercado_trends[["yhat_lower", "yhat_upper", "yhat"]] \
.rename(columns={"yhat_lower": "Worst Case", "yhat_upper": "Best Case", "yhat": "Expected"}) \
.hvplot(
title="MercadoLibre Predicted Traffic",
xlabel="Date",
ylabel="Stock Price",
)
# + id="ugF4XRoq_lTa" colab={"base_uri": "https://localhost:8080/", "height": 873} executionInfo={"status": "ok", "timestamp": 1632556579105, "user_tz": -180, "elapsed": 1151, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh3V43PoCS0W8Dfmqkeo6elcm9Buwdyhk7iCmAk1zY=s64", "userId": "13953265443078842179"}} outputId="1466c8e5-95c7-4067-c3fc-5074765f6927"
# Reset the index in the forecast_mercado_trends DataFrame
forecast_mercado_trends = forecast_mercado_trends.reset_index()
# Use the plot_components function to visualize the forecast results
# for the figures_mercado_trends DataFrame
figures_mercado_trends = model_mercado_trends.plot_components(forecast_mercado_trends);
# + [markdown] id="xv8xm_Bv_lTb"
# ##### Answer the following questions:
# + [markdown] id="eTAR2pzV_lTb"
# **Question:** What time of day exhibits the greatest popularity?
#
# **Answer:** The greatest popularity, on a daily timeframe, is the late evening to early morning.
# + [markdown] id="oZ3zBc_B_lTb"
# **Question:** Which day of week gets the most search traffic?
#
# **Answer:** The weekdays appear to get the most search traffic.
# + [markdown] id="5-AQLJ7L_lTc"
# **Question:** What's the lowest point for search traffic in the calendar year?
#
# **Answer:** April appears to be the lowest point for search traffic in the year.
#
# + [markdown] id="c9iYmwPZ_lTc"
# ## Step 5 (Optional): Forecast Revenue by Using Time Series Models
#
# A few weeks after your initial analysis, the finance group follows up to find out if you can help them solve a different problem. Your fame as a growth analyst in the company continues to grow!
#
# Specifically, the finance group wants a forecast of the total sales for the next quarter. This will dramatically increase their ability to plan budgets and to help guide expectations for the company investors.
#
# To do so, complete the following steps:
#
# 1. Read in the daily historical sales (that is, revenue) figures, and then apply a Prophet model to the data. The daily sales figures are quoted in millions of USD dollars.
#
# 2. Interpret the model output to identify any seasonal patterns in the company's revenue. For example, what are the peak revenue days? (Mondays? Fridays? Something else?)
#
# 3. Produce a sales forecast for the finance group. Give them a number for the expected total sales in the next quarter. Include the best- and worst-case scenarios to help them make better plans.
#
#
#
# + [markdown] id="Ctgf-MZD_lTc"
# #### Step 1: Read in the daily historical sales (that is, revenue) figures, and then apply a Prophet model to the data.
# + id="xTkslU0T_lTd" colab={"resources": {"http://localhost:8080/nbextensions/google.colab/files.js": {"data": "<KEY>", "ok": true, "headers": [["content-type", "application/javascript"]], "status": 200, "status_text": ""}}, "base_uri": "https://localhost:8080/", "height": 288} executionInfo={"status": "ok", "timestamp": 1632556591394, "user_tz": -180, "elapsed": 12321, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh3V43PoCS0W8Dfmqkeo6elcm9Buwdyhk7iCmAk1zY=s64", "userId": "13953265443078842179"}} outputId="8dcefce3-6f5f-4e60-9ac8-83c464586ea0"
# Upload the "mercado_daily_revenue.csv" file into Colab, then store in a Pandas DataFrame
# Set the "date" column as the DatetimeIndex
# Sales are quoted in millions of US dollars
from google.colab import files
uploaded = files.upload()
df_mercado_sales = pd.read_csv(
"mercado_daily_revenue.csv",
index_col="date",
parse_dates=True,
infer_datetime_format=True,
)
# Review the DataFrame
df_mercado_sales.head()
# + id="NiIVQUlY_lTd" colab={"base_uri": "https://localhost:8080/", "height": 326, "output_embedded_package_id": "17aKkF9x-n5XSnNF4K123brciNehz3-d8"} executionInfo={"status": "ok", "timestamp": 1632556593461, "user_tz": -180, "elapsed": 2078, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh3V43PoCS0W8Dfmqkeo6elcm9Buwdyhk7iCmAk1zY=s64", "userId": "13953265443078842179"}} outputId="53bea987-8b14-41cf-e0e6-8a9e5e252732"
# Holoviews extension to render hvPlots in Colab
hv.extension('bokeh')
# Use hvPlot to visualize the daily sales figures
df_mercado_sales.hvplot(
title="MercadoLibre Daily Revenue",
xlabel="Date",
ylabel="Daily Sales",
)
# + id="Dy4D-kOM_lTd" colab={"base_uri": "https://localhost:8080/", "height": 202} executionInfo={"status": "ok", "timestamp": 1632556593463, "user_tz": -180, "elapsed": 60, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh3V43PoCS0W8Dfmqkeo6elcm9Buwdyhk7iCmAk1zY=s64", "userId": "13953265443078842179"}} outputId="17b318bf-7205-4920-f538-55f5b292b263"
# Apply a Facebook Prophet model to the data.
# Set up the dataframe in the neccessary format:
# Reset the index so that date becomes a column in the DataFrame
mercado_sales_prophet_df = df_mercado_sales.reset_index()
# Adjust the columns names to the Prophet syntax
mercado_sales_prophet_df = mercado_sales_prophet_df.rename(columns={"date":"ds", "Daily Sales":"y"})
# Visualize the DataFrame
mercado_sales_prophet_df.head()
# + id="qd9znEqQ_lTd" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1632556593465, "user_tz": -180, "elapsed": 58, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh3V43PoCS0W8Dfmqkeo6elcm9Buwdyhk7iCmAk1zY=s64", "userId": "13953265443078842179"}} outputId="cd875399-3bfa-45a3-b25b-e1b95e612d29"
# Create the model
mercado_sales_prophet_model = Prophet(daily_seasonality=True)
# Fit the model
mercado_sales_prophet_model.fit(mercado_sales_prophet_df)
# + id="2h6qIH90_lTe" colab={"base_uri": "https://localhost:8080/", "height": 386} executionInfo={"status": "ok", "timestamp": 1632556593467, "user_tz": -180, "elapsed": 52, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh3V43PoCS0W8Dfmqkeo6elcm9Buwdyhk7iCmAk1zY=s64", "userId": "13953265443078842179"}} outputId="fdf6e565-108d-49ab-b401-ba4845a512ce"
# Predict sales for 90 days (1 quarter) out into the future.
# Start by making a future dataframe
mercado_sales_prophet_future = mercado_sales_prophet_model.make_future_dataframe(periods=3360, freq="H")
# Display the last five rows of the future DataFrame
display(mercado_sales_prophet_future.head())
display(mercado_sales_prophet_future.tail())
# + id="rG_BY-5F_lTe" colab={"base_uri": "https://localhost:8080/", "height": 305} executionInfo={"status": "ok", "timestamp": 1632556598187, "user_tz": -180, "elapsed": 4770, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh3V43PoCS0W8Dfmqkeo6elcm9Buwdyhk7iCmAk1zY=s64", "userId": "13953265443078842179"}} outputId="93df42ce-523d-4cb6-bd43-07b3c78381d1"
# Make predictions for the sales each day over the next quarter
mercado_sales_prophet_forecast = mercado_sales_prophet_model.predict(mercado_sales_prophet_future)
# Display the first 5 rows of the resulting DataFrame
mercado_sales_prophet_forecast.head()
# + [markdown] id="Gua7ww3R_lTe"
# #### Step 2: Interpret the model output to identify any seasonal patterns in the company's revenue. For example, what are the peak revenue days? (Mondays? Fridays? Something else?)
# + id="i7Dgq84L_lTe" colab={"base_uri": "https://localhost:8080/", "height": 656} executionInfo={"status": "ok", "timestamp": 1632556599369, "user_tz": -180, "elapsed": 1201, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh3V43PoCS0W8Dfmqkeo6elcm9Buwdyhk7iCmAk1zY=s64", "userId": "13953265443078842179"}} outputId="6a446a41-28a3-4aff-c6a9-f0f46b8879c0"
# Use the plot_components function to analyze seasonal patterns in the company's revenue
mercado_sales_prophet_model.plot_components(mercado_sales_prophet_forecast);
# + [markdown] id="ct6ZBAw8_lTf"
# ##### Answer the following question:
# + [markdown] id="SW9V9pP2_lTf"
# **Question:** For example, what are the peak revenue days? (Mondays? Fridays? Something else?)
#
# **Answer:** The peak revenue days, in descending order, are Wednesday, Tuesday, and Monday.
# + [markdown] id="PLzRkueh_lTf"
# #### Step 3: Produce a sales forecast for the finance group. Give them a number for the expected total sales in the next quarter. Include the best- and worst-case scenarios to help them make better plans.
# + id="khtzqz55_lTf" colab={"base_uri": "https://localhost:8080/", "height": 441} executionInfo={"status": "ok", "timestamp": 1632556600133, "user_tz": -180, "elapsed": 778, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh3V43PoCS0W8Dfmqkeo6elcm9Buwdyhk7iCmAk1zY=s64", "userId": "13953265443078842179"}} outputId="6d286dac-f991-444d-ff92-082ee79a3056"
# Plot the predictions for the Mercado sales
mercado_sales_prophet_model.plot(mercado_sales_prophet_forecast);
# + id="DBhuDYK__lTf" colab={"base_uri": "https://localhost:8080/", "height": 718} executionInfo={"status": "ok", "timestamp": 1632556600134, "user_tz": -180, "elapsed": 58, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh3V43PoCS0W8Dfmqkeo6elcm9Buwdyhk7iCmAk1zY=s64", "userId": "13953265443078842179"}} outputId="316c8fc3-6fd1-4499-fc9a-9fd7b7dc6c50"
# For the mercado_sales_prophet_forecast DataFrame, set the ds column as the DataFrame Index
mercado_sales_prophet_forecast = mercado_sales_prophet_forecast.set_index(["ds"])
# Display the first and last five rows of the DataFrame
display(mercado_sales_prophet_forecast.head())
display(mercado_sales_prophet_forecast.tail())
# + id="0N3K83Mf_lTg" colab={"base_uri": "https://localhost:8080/", "height": 232} executionInfo={"status": "ok", "timestamp": 1632556600134, "user_tz": -180, "elapsed": 54, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh3V43PoCS0W8Dfmqkeo6elcm9Buwdyhk7iCmAk1zY=s64", "userId": "13953265443078842179"}} outputId="a2040007-2e20-46ef-8b20-26f98b164068"
# Produce a sales forecast for the finance division
# giving them a number for expected total sales next quarter.
# Provide best case (yhat_upper), worst case (yhat_lower), and most likely (yhat) scenarios.
# Create a forecast_quarter Dataframe for the period 2020-07-01 to 2020-09-30
# The DataFrame should include the columns yhat_upper, yhat_lower, and yhat
mercado_sales_forecast_quarter = mercado_sales_prophet_forecast.loc["2020-07-01":"2020-09-30",
["yhat", "yhat_upper", "yhat_lower"]]
# Update the column names for the forecast_quarter DataFrame
# to match what the finance division is looking for
mercado_sales_forecast_quarter = mercado_sales_forecast_quarter.rename(
columns={"yhat": "Expected Sales",
"yhat_upper": "Best Case Sales",
"yhat_lower": "Worst Case Sales"}
)
# Review the last five rows of the DataFrame
mercado_sales_forecast_quarter.tail()
# + id="k575tx0G_lTg" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1632556600135, "user_tz": -180, "elapsed": 53, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh3V43PoCS0W8Dfmqkeo6elcm9Buwdyhk7iCmAk1zY=s64", "userId": "13953265443078842179"}} outputId="bde37f16-7975-433c-9364-b82b8ecd252c"
# Displayed the summed values for all the rows in the forecast_quarter DataFrame
print_summed_values_func = lambda ds, w: print(f"{ds.name+':':{w}} ${ds.sum():,.2f}")
width = max(mercado_sales_forecast_quarter.columns.map(len)) + 1
mercado_sales_forecast_quarter.apply(print_summed_values_func, w=width);
# + [markdown] id="_kMchkNQ_lTg"
# ### Based on the forecast information generated above, produce a sales forecast for the finance division, giving them a number for expected total sales next quarter. Include best and worst case scenarios, to better help the finance team plan.
#
# **Answer:** Given the timeline of historical sales data and its trending behavior, the quarterly sales forecast is positive with a continued upward trend. However, regarding short-term trends, we do see a downward trend towards the end of the week (Thursday thru Saturday) and at late hours into the night.<br><br>
# By using Prophet to analyze the daily revenue of MercadoLibre, these trends have been projected to show positive results with the expected sales at 52,860.32 USD for Q3. Also still reflecting an upward trend in Q3, the worst case sales is predicted at 48,650.62 USD.
# + id="nqiwU13V_lTg" executionInfo={"status": "ok", "timestamp": 1632556600135, "user_tz": -180, "elapsed": 45, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gh3V43PoCS0W8Dfmqkeo6elcm9Buwdyhk7iCmAk1zY=s64", "userId": "13953265443078842179"}}
| forecasting_net_prophet.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # <NAME>
# ## Does Gender and Race affect earnings?
#
# In today's society we face a lot of new found social issues surrounding gender and race. I wanted to examine the questions surrounding these social issues and discover if it is true that race and gender affect overall income. My initial hypothese was that since this survey was taken in the United States, whtle men would have a higher chance of making more money over any other gender, race combination.
import project_functions2
df = project_functions2.load_and_process("https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data")
df.to_csv("dataprocessed2.csv")
df
# # Analysis Pipeline
#
# How to create the same Dataset:
# 1. Importing
# * Import numpy, pandas, matplotlib and seaborn
# * Import the csv at https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data using pandas and matplotlib, make sure to include skipinitialspace as there is one in this dataset
# 1. Clean data
# * Fill empty values with NaN and drop those values
# * Drop any duplicates that may exist
# 1. Wrangle/Process Data
# * Drop unecessary columns like Capital Gains Losses and family relationship
# * Rename the columns of the dataset since the first row does not include the catagories for each column
# * Sorting the age by ascending order, makes age based graphs more easier and allows us to see max and min age
# * Be sure to reset index and drop the newly created index column
# 1. Wrangle/Graphing
# * Create a countplot for Income
# * Create a countplot for Gender
# * Create a dist plot for Age
# * Create a value count graph for Race
#
# Note: Sometimes you may get an error like "Tuple does not support attribute head", to fix this simply restart Jupyter Lab.
# # See Aaron.ipynb for EDA and Graphs
| notebooks/analysis2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
df = pd.read_csv('data/hacc.csv')
df.head()
len(df)
print(df.columns)
df.columns = ['name', 'session_type', 'start_time', 'end_time', 'duration', 'energy', 'amount', 'id', 'port_type', 'payment_mode']
print(df.columns)
df.dtypes
# +
df['start_time'] = pd.to_datetime(df['start_time'])
df['end_time'] = pd.to_datetime(df['end_time'])
# Changes the payment amount to a raw float value instead of a string of the form "$X.XX"
df['amount'] = df['amount'].replace('[\$,]', '', regex=True).astype(float)
df.head()
# -
df[df['duration'].str.contains('-')]['duration']
# ## Data Errors
# Types of errors found:
# 1. Positive energy, Zero amount -- Resolution: ?
# 2. Positive amount, Zero energy -- Resolution: ?
# Checking for any missing values
missing = df[(df['energy'] == 0) | (df['amount'] == 0)]
print(len(missing))
missing.head()
# Anyone who charges their car but doesn't pay is a "thief" -- correctable if we just use HECO formula?
# Question: Is this ALWAYS just a data issue or do some people actually get away without paying?
thieves = df[(df['energy'] > 0) & (df['amount'] == 0)]
print(len(thieves))
thieves.head()
# Anyone who used 0 energy but still paid got "jipped"
jipped = df[(df['energy'] == 0) & (df['amount'] > 0)]
print(len(jipped))
jipped.head()
dates = df['start_time'].dt.date
df['date'] = dates
df['date']
df['day_of_week'] = df['start_time'].dt.day_name()
df['day_of_week']
# ## Time of Day
# Noting from his slides:
#
# Times:
# - On Peak: 5pm - 10pm => 17:00 - 22:00, Daily
# - Mid Day: 9am - 5pm => 9:00 => 17:00, Daily
# - Off Peak: 10pm - 9am => 22:00 - 9:00, Daily
#
# Cost:
# - On Peak: \$0.57
# - Mid Day: \$0.49
# - Off Peak: \$0.54
#
import datetime as dt
start_times = df['start_time'].dt.time
df['on_peak'] = (dt.time(17, 0, 0) <= start_times) & (start_times < dt.time(22, 0, 0))
df['mid_day'] = (dt.time(9, 0, 0) <= start_times) & (start_times < dt.time(17, 0, 0))
df['off_peak'] = (dt.time(22, 0, 0) <= start_times) | (start_times < dt.time(9, 0, 0))
df.head()
# ## Error Checking
# Checking if each columns are in agreement with each other
# 1. Does cost match with the amount of energy for the given time period?
df['calculated_amount'] = df['energy'] * df['on_peak'] * 0.57 + df['energy'] * df['mid_day'] * 0.49 + df['energy'] * df['off_peak'] * 0.54
df['rounded_calculated_amount'] = np.round(df['calculated_amount'], 2)
correct = df[(df['amount'] == df['rounded_calculated_amount'])]
err = df[~(df['amount'] == df['rounded_calculated_amount'])]
correct.head()
err[np.abs(err['amount'] - err['rounded_calculated_amount']) == 0.01]#[['amount', 'rounded_calculated_amount', 'calculated_amount']]
err[np.abs(err['amount'] - err['rounded_calculated_amount']) > 1]
df = df.rename({'rounded_calculated_amount': 'correct_amount'}, axis=1)
df['error_rounding'] = np.abs(df['amount'] - df['correct_amount']) == 0.01
df['error_calculation'] = np.abs(df['amount'] - df['correct_amount']) > 0.01
df.head()
df[df['error_rounding']]
for col in ['session_type', 'port_type', 'payment_mode']:
df[col] = df[col].astype('category')
df.dtypes
# +
preproc_df = df.join(pd.get_dummies(df.select_dtypes('category')))
# preproc_df = preproc_df.join(pd.get_dummies(df['name']))
# Drop original categorical columns in favor of the "One Hot Encoding"
preproc_df = preproc_df.drop(df.select_dtypes('category'), axis=1)
# preproc_df['start_time'] = pd.to_timedelta(preproc_df['start_time'])
# preproc_df['end_time'] = pd.to_timedelta(preproc_df['end_time'])
# preproc_df['correct_duration'] = preproc_df['end_time'].dt.total_seconds() - preproc_df['start_time'].dt.total_seconds()
def get_sec(time_str):
"""Get Seconds from time."""
h, m, s = time_str.split(':')
return int(h) * 3600 + int(m) * 60 + int(s)
preproc_df['correct_duration'] = preproc_df['duration'].apply(lambda x: get_sec(x))
preproc_df = preproc_df.drop(['id', 'start_time', 'end_time', 'duration', 'amount', 'calculated_amount', 'day_of_week'], axis=1)
preproc_df.head()
# -
df_agg = preproc_df.groupby(['name', 'date']).agg('sum').reset_index()
# df_agg.columns = df_agg.columns.to_flat_index()
# df_agg.columns
df_agg.head()
df_agg.columns
import matplotlib.pyplot as plt
plt.scatter(df_agg['energy'].shift(-8), df_agg['energy'])
from pandas.plotting import scatter_matrix
df_temp = pd.DataFrame()
df_temp['energy'] = df_agg['energy']
for x in range(1, 7):
df_temp[f'energy_prev_{x}'] = df_temp['energy'].shift(x)
scatter_matrix(df_temp[['energy', 'energy_prev_1', 'energy_prev_2', 'energy_prev_3']])
df_temp.dropna().head(10)
# +
df_agg = preproc_df.groupby(['name', 'date']).agg('sum').reset_index()
df_agg['day_of_week'] = pd.to_datetime(df_agg['date']).dt.day_name()
df_agg['month'] = pd.to_datetime(df_agg['date']).dt.month_name()
for col in ['day_of_week', 'month']:
df_agg[col] = df_agg[col].astype('category')
df_agg = df_agg.join(pd.get_dummies(df_agg.select_dtypes('category')))
df_agg = df_agg.drop(df_agg.select_dtypes('category'), axis=1)
# df_agg.columns = df_agg.columns.to_flat_index()
# df_agg.columns
# for x in range(1, 7):
# df_agg[f'energy_prev_{x}'] = df_agg['energy'].shift(x)
# df_agg = df_agg.dropna()
# df_agg.head()
stations = [g for _, g in df_agg.groupby('name')]
def offset_col_x_days(df, col, days):
for x in range(1, days):
df[f'{col}_prev_{7+x}'] = df[col].shift(7+x)
df = df.dropna().reset_index(drop=True)
return df
for i in range(len(stations)):
for col in ['energy', 'on_peak', 'mid_day', 'off_peak', 'error_rounding', 'error_calculation', 'session_type_DEVICE', 'session_type_MOBILE', 'session_type_WEB',
'port_type_CHADEMO', 'port_type_DCCOMBOTYP1', 'payment_mode_CREDITCARD',
'payment_mode_RFID']:
stations[i] = offset_col_x_days(stations[i], col, 7)
stations[0]
# -
from datetime import timedelta
test_stations = []
for station in stations:
most_recent_session = station['date'].values[-1]
test_station = pd.DataFrame()
test_station['date'] = pd.Series([most_recent_session + timedelta(days=x) for x in range(1, 8)])
for col in ['energy', 'on_peak', 'mid_day', 'off_peak', 'error_rounding', 'error_calculation', 'session_type_DEVICE', 'session_type_MOBILE', 'session_type_WEB',
'port_type_CHADEMO', 'port_type_DCCOMBOTYP1', 'payment_mode_CREDITCARD',
'payment_mode_RFID']:
for x in range(1, 7):
test_station[f'{col}_prev_{7+x}'] = station[col].shift(7+x).values[-7:]
test_station = test_station.dropna().reset_index(drop=True)
test_station['day_of_week'] = pd.to_datetime(test_station['date']).dt.day_name()
test_station['month'] = pd.to_datetime(test_station['date']).dt.month_name()
for col in ['day_of_week', 'month']:
test_station[col] = test_station[col].astype('category')
months = set([
'month_April',
'month_August',
'month_December',
'month_February',
'month_January',
'month_July',
'month_June',
'month_March',
'month_May',
'month_November',
'month_October'
])
test_station = test_station.join(pd.get_dummies(test_station.select_dtypes('category')))
test_station = test_station.drop(test_station.select_dtypes('category'), axis=1)
months = months - set(test_station.columns)
months_df = pd.DataFrame()
for month in months:
months_df[month] = [0] * 7
test_station = test_station.join(months_df)
test_stations.append(test_station)
test_stations[0]
# +
from sklearn.ensemble import RandomForestRegressor
y_preds = []
for X_train, X_test in zip(stations, test_stations):
y = X_train[['energy', 'error_rounding', 'error_calculation', 'on_peak', 'mid_day', 'off_peak', 'port_type_CHADEMO', 'port_type_DCCOMBOTYP1', 'payment_mode_CREDITCARD',
'payment_mode_RFID', 'session_type_DEVICE', 'session_type_MOBILE', 'session_type_WEB']]
X_train = X_train.drop(['name', 'date', 'correct_amount', 'correct_duration', 'energy', 'on_peak', 'mid_day', 'off_peak', 'error_rounding', 'error_calculation', 'session_type_DEVICE', 'session_type_MOBILE', 'session_type_WEB',
'port_type_CHADEMO', 'port_type_DCCOMBOTYP1', 'payment_mode_CREDITCARD',
'payment_mode_RFID'], axis=1)
X_train = X_train.reindex(sorted(X_train.columns), axis=1)
date_test = X_test['date']
X_test = X_test.drop(['date'], axis=1)
X_test = X_test.reindex(sorted(X_test.columns), axis=1)
clf = RandomForestRegressor()
clf.fit(X_train, y)
y_pred = clf.predict(X_test)
y_preds.append(y_pred)
# +
# # from sklearn.utils import check_arrays
# def mape(y_true, y_pred):
# # y_true, y_pred = check_arrays(y_true, y_pred)
# ## Note: does not handle mix 1d representation
# #if _is_1d(y_true):
# # y_true, y_pred = _check_1d_array(y_true, y_pred)
# return np.mean(np.abs((y_true - y_pred) / y_true)) * 100
# mape(y_test, y_pred)
# -
all_predictions = []
for y_pred in y_preds:
predictions = [[] for j in range(len(y_pred[0]))]
# Each date
for i in range(len(y_pred)):
# Each field
for j in range(len(y_pred[i])):
# Create list of just that field
predictions[j].append(y_pred[i][j])
all_predictions.append(predictions)
all_predictions[0]
for j in range(len(all_predictions)):
predictions = all_predictions[j]
for i in range(len(predictions[1])):
predictions[1][i] = 0 if predictions[1][i] < 0 else predictions[1][i]
for i in range(len(predictions[2])):
predictions[2][i] = 0 if predictions[2][i] < 0 else predictions[2][i]
predictions[0] = np.round(predictions[0], 2)
for i in range(1, len(predictions)):
predictions[i] = np.round(predictions[i])
all_predictions[j] = predictions
for p in all_predictions[0]:
print(len(p))
len(all_predictions[0])
df_test = pd.DataFrame()
station_names = ['A','B']
for predictions, station_name in zip(all_predictions, station_names):
df = pd.DataFrame({
'name': [station_name] * len(predictions[0]),
'date': date_test,
'energy': predictions[0],
'error_rounding': predictions[1],
'error_calculation': predictions[2],
'on_peak': predictions[3],
'mid_day': predictions[4],
'off_peak': predictions[5],
'port_type_CHADEMO': predictions[6],
'port_type_DCCOMBOTYP1': predictions[7],
'payment_mode_CREDITCARD': predictions[8],
'payment_mode_RFID': predictions[9],
'session_type_DEVICE': predictions[10],
'session_type_MOBILE': predictions[11],
'session_type_WEB': predictions[12],
})
df_test = df_test.append(df)
df_test
df_test.to_csv('test_run.csv',index=False)
df_test_agg = df_test.groupby(['date']).agg('sum').reset_index()
df_test_agg
df_test_agg['energy'] = np.round(df_test_agg['energy'], 2)
df_test_agg['name'] = ['Agg'] * len(df_test_agg['energy'])
df_final = df_test.append(df_test_agg).reset_index()
df_final
# +
# df_test_agg.to_csv('test_run.csv',index=False)
| web/hecoweb/storage/eda/full_forecast.ipynb |