code
stringlengths 38
801k
| repo_path
stringlengths 6
263
|
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # Construyendo una red convolucional con PyTorch
# + [markdown] slideshow={"slide_type": "slide"}
# ## Librerías
# + slideshow={"slide_type": "-"}
import gzip
import mlflow
import numpy
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
import tempfile
from gensim import corpora
from gensim.parsing import preprocessing
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, average_precision_score
from tqdm.notebook import tqdm, trange
from torch.utils.data import Dataset, DataLoader
# + [markdown] slideshow={"slide_type": "slide"}
# ## Red convolucional para imágenes
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Datos del CIFAR-10
#
# Utilizamos los mismos datos que se usaron en el [notebook 1](./1_introduction_nn.ipynb).
# + slideshow={"slide_type": "subslide"}
CIFAR_CLASSES = ('plane', 'car', 'bird', 'cat', 'deer',
'dog', 'frog', 'horse', 'ship', 'truck')
BATCH_SIZE = 128
EPOCHS = 2
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
trainset = torchvision.datasets.CIFAR10(root='./data', train=True,
download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=BATCH_SIZE,
shuffle=True, num_workers=2)
testset = torchvision.datasets.CIFAR10(root='./data', train=False,
download=True, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=BATCH_SIZE,
shuffle=False, num_workers=2)
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Red convolucional
#
# - La red convolucional se obtiene apilando capas [`torch.nn.Conv2d`](https://pytorch.org/docs/stable/generated/torch.nn.Conv2d.html).
# - En particular, este tipo de capas acepta matrices (a diferencia de la lineal que sólo acepta vectores). En las capas se definen lo canales de entrada y los de salida, además del tamaño del kernel (i.e. ventana).
# - También son comunes las capas [`torch.nn.MaxPool2d`](https://pytorch.org/docs/stable/generated/torch.nn.MaxPool2d.html) que realizan una operación de max pooling, en 2 dimensiones.
# - La red se completa con algunas capas lineales para poder llevarla a las 10 dimensiones de salida que vienen a representar las clases.
# + slideshow={"slide_type": "subslide"}
class CNN(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
model = CNN()
print(model)
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Entrenamiento
#
# La red se entrena igual que el caso del perceptrón multicapa, solo que esta vez no requiere reacomodar la matriz de entrada.
# + slideshow={"slide_type": "subslide"}
loss_function = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.001, weight_decay=1e-5)
model.train()
iters_per_epoch = len(trainloader)
for epoch in trange(EPOCHS): # loop over the dataset multiple times
pbar = tqdm(trainloader, desc="Train loss: NaN")
for data in pbar:
inputs, labels = data
optimizer.zero_grad()
outputs = model(inputs)
loss = loss_function(outputs, labels)
loss.backward()
optimizer.step()
pbar.set_description(f"Train loss: {loss.item():.3f}")
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Evaluación
#
# Una vez más, la evaluación es similar al caso del perceptrón multicapa.
# + slideshow={"slide_type": "subslide"}
y_true = []
y_pred = []
with torch.no_grad():
for data in tqdm(testloader):
inputs, labels = data
outputs = model(inputs)
_, predicted = torch.max(outputs.data, 1)
y_true.extend(labels.numpy())
y_pred.extend(predicted.numpy())
print(classification_report(y_true, y_pred, target_names=CIFAR_CLASSES))
# + [markdown] slideshow={"slide_type": "slide"}
# ## CNNs para Texto
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Datos IMDB
#
# Similar al caso de CNN para imágenes, vamos a volver sobre el conjunto de datos que ya utilizamos anteriomente: el de reviews IMDB. Esta vez para compararlo contra el modelo de perceptrón multicapa utilizando la media de los embeddings.
# + slideshow={"slide_type": "subslide"}
class IMDBReviewsDataset(Dataset):
def __init__(self, dataset, transform=None):
self.dataset = dataset
self.transform = transform
def __len__(self):
return self.dataset.shape[0]
def __getitem__(self, item):
if torch.is_tensor(item):
item = item.to_list()
item = {
"data": self.dataset.loc[item, "review"],
"target": self.dataset.loc[item, "sentiment"]
}
if self.transform:
item = self.transform(item)
return item
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Preprocesamiento
#
# Aplicamos el mismo tipo de preprocesamiento.
# + slideshow={"slide_type": "subslide"}
class RawDataProcessor:
def __init__(self,
dataset,
ignore_header=True,
filters=None,
vocab_size=50000):
if filters:
self.filters = filters
else:
self.filters = [
lambda s: s.lower(),
preprocessing.strip_tags,
preprocessing.strip_punctuation,
preprocessing.strip_multiple_whitespaces,
preprocessing.strip_numeric,
preprocessing.remove_stopwords,
preprocessing.strip_short,
]
# Create dictionary based on all the reviews (with corresponding preprocessing)
self.dictionary = corpora.Dictionary(
dataset["review"].map(self._preprocess_string).tolist()
)
# Filter the dictionary and compactify it (make the indices continous)
self.dictionary.filter_extremes(no_below=2, no_above=1, keep_n=vocab_size)
self.dictionary.compactify()
# Add a couple of special tokens
self.dictionary.patch_with_special_tokens({
"[PAD]": 0,
"[UNK]": 1
})
self.idx_to_target = sorted(dataset["sentiment"].unique())
self.target_to_idx = {t: i for i, t in enumerate(self.idx_to_target)}
def _preprocess_string(self, string):
return preprocessing.preprocess_string(string, filters=self.filters)
def _sentence_to_indices(self, sentence):
return self.dictionary.doc2idx(sentence, unknown_word_index=1)
def encode_data(self, data):
return self._sentence_to_indices(self._preprocess_string(data))
def encode_target(self, target):
return self.target_to_idx[target]
def __call__(self, item):
if isinstance(item["data"], str):
data = self.encode_data(item["data"])
else:
data = [self.encode_data(d) for d in item["data"]]
if isinstance(item["target"], str):
target = self.encode_target(item["target"])
else:
target = [self.encode_target(t) for t in item["target"]]
return {
"data": data,
"target": target
}
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Carga de datos
# + slideshow={"slide_type": "subslide"}
dataset = pd.read_csv("./data/imdb_reviews.csv.gz")
preprocess = RawDataProcessor(dataset)
train_indices, test_indices = train_test_split(dataset.index, test_size=0.2, random_state=42)
train_dataset = IMDBReviewsDataset(dataset.loc[train_indices].reset_index(drop=True), transform=preprocess)
test_dataset = IMDBReviewsDataset(dataset.loc[test_indices].reset_index(drop=True), transform=preprocess)
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Padding de secuencias
#
# Dado que en este caso utilizaremos las secuencias completas sobre las que aplicaremos las convoluciones, necesitamos trabajar con dichas secuencias de manera que en un batch de datos tengan el tamaño correcto.
# + slideshow={"slide_type": "subslide"}
class PadSequences:
def __init__(self, pad_value=0, max_length=None, min_length=1):
assert max_length is None or min_length <= max_length
self.pad_value = pad_value
self.max_length = max_length
self.min_length = min_length
def __call__(self, items):
data, target = list(zip(*[(item["data"], item["target"]) for item in items]))
seq_lengths = [len(d) for d in data]
if self.max_length:
max_length = self.max_length
seq_lengths = [min(self.max_length, l) for l in seq_lengths]
else:
max_length = max(self.min_length, max(seq_lengths))
data = [d[:l] + [self.pad_value] * (max_length - l)
for d, l in zip(data, seq_lengths)]
return {
"data": torch.LongTensor(data),
"target": torch.FloatTensor(target)
}
# + [markdown] slideshow={"slide_type": "subslide"}
# ### DataLoaders
#
# Una vez creada nuestra función para hacer padding de secuencia, definiremos los `DataLoader`s. Una cuestión importante, las redes convolucionales sobre text esperan que todas las secuencias sean al menos del tamaño de la convolución máxima (caso contrario ocurrirá un error por no poder realizar la convolución sobre un espacio más chico que el tamaño de la convolución). Es por eso que utilizamos el parámetro `min_length` esta vez.
# + slideshow={"slide_type": "subslide"}
EPOCHS = 2
FILTERS_COUNT = 100
FILTERS_LENGTH = [2, 3, 4]
pad_sequences = PadSequences(min_length=max(FILTERS_LENGTH))
train_loader = DataLoader(train_dataset, batch_size=128, shuffle=True,
collate_fn=pad_sequences, drop_last=False)
test_loader = DataLoader(test_dataset, batch_size=128, shuffle=False,
collate_fn=pad_sequences, drop_last=False)
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Red convolucional sobre texto
#
# Por último, tenemos la red convolucional sobre texto. Si bien arranca muy similar al caso del clasificador del perceptrón multicapa, vemos que en este caso hacemos uso de [`torch.nn.Conv1d`](https://pytorch.org/docs/stable/generated/torch.nn.Conv1d.html) dado que sólo nos desplazamos por una dimensión (i.e. la secuencia). En particular, como utilizamos *max pooling* global, no hacemos uso del módulo `torch.nn` para calcularlo, simplemente utilizamos el método `max()` del tensor.
# + slideshow={"slide_type": "subslide"}
class IMDBReviewsClassifier(nn.Module):
def __init__(self,
pretrained_embeddings_path,
dictionary,
vector_size,
freeze_embedings):
super().__init__()
embeddings_matrix = torch.randn(len(dictionary), vector_size)
embeddings_matrix[0] = torch.zeros(vector_size)
with gzip.open(pretrained_embeddings_path, "rt") as fh:
for line in fh:
word, vector = line.strip().split(None, 1)
if word in dictionary.token2id:
embeddings_matrix[dictionary.token2id[word]] =\
torch.FloatTensor([float(n) for n in vector.split()])
self.embeddings = nn.Embedding.from_pretrained(embeddings_matrix,
freeze=freeze_embedings,
padding_idx=0)
self.convs = []
for filter_lenght in FILTERS_LENGTH:
self.convs.append(
nn.Conv1d(vector_size, FILTERS_COUNT, filter_lenght)
)
self.convs = nn.ModuleList(self.convs)
self.fc = nn.Linear(FILTERS_COUNT * len(FILTERS_LENGTH), 128)
self.output = nn.Linear(128, 1)
self.vector_size = vector_size
@staticmethod
def conv_global_max_pool(x, conv):
return F.relu(conv(x).transpose(1, 2).max(1)[0])
def forward(self, x):
x = self.embeddings(x).transpose(1, 2) # Conv1d takes (batch, channel, seq_len)
x = [self.conv_global_max_pool(x, conv) for conv in self.convs]
x = torch.cat(x, dim=1)
x = F.relu(self.fc(x))
x = torch.sigmoid(self.output(x))
return x
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Experimento
#
# El experimento de MLflow es prácticamente igual, salvo que cambiamos algunos de los parámetros a guardar.
# + slideshow={"slide_type": "subslide"}
mlflow.set_experiment("a_naive_experiment")
with mlflow.start_run():
mlflow.log_param("model_name", "cnn")
mlflow.log_param("freeze_embedding", True)
mlflow.log_params({
"filters_count": FILTERS_COUNT,
"filters_length": FILTERS_LENGTH,
"fc_size": 128
})
model = IMDBReviewsClassifier("./data/glove.6B.50d.txt.gz", preprocess.dictionary, 50, True)
loss = nn.BCELoss()
optimizer = optim.Adam(model.parameters(), lr=1e-3, weight_decay=1e-5)
for epoch in trange(3):
model.train()
running_loss = []
for idx, batch in enumerate(tqdm(train_loader)):
optimizer.zero_grad()
output = model(batch["data"])
loss_value = loss(output, batch["target"].view(-1, 1))
loss_value.backward()
optimizer.step()
running_loss.append(loss_value.item())
mlflow.log_metric("train_loss", sum(running_loss) / len(running_loss), epoch)
model.eval()
running_loss = []
targets = []
predictions = []
for batch in tqdm(test_loader):
output = model(batch["data"])
running_loss.append(
loss(output, batch["target"].view(-1, 1)).item()
)
targets.extend(batch["target"].numpy())
predictions.extend(output.squeeze().detach().numpy())
mlflow.log_metric("test_loss", sum(running_loss) / len(running_loss), epoch)
mlflow.log_metric("test_avp", average_precision_score(targets, predictions), epoch)
with tempfile.TemporaryDirectory() as tmpdirname:
targets = []
predictions = []
for batch in tqdm(test_loader):
output = model(batch["data"])
targets.extend(batch["target"].numpy())
predictions.extend(output.squeeze().detach().numpy())
pd.DataFrame({"prediction": predictions, "target": targets}).to_csv(
f"{tmpdirname}/predictions.csv.gz", index=False
)
mlflow.log_artifact(f"{tmpdirname}/predictions.csv.gz")
|
4_cnns.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
from scipy.stats import chi2
import numpy as np
from numba import jit, prange
# %matplotlib inline
def Normalization(Data):
"""
Normalize the data
"""
Mean1 = np.mean(Data, axis = 0)
Std1 = np.std(Data, axis = 0)
return (Data-Mean1)/Std1
def ReturnDataFrame(path):
"""
Read df into memory
"""
return pd.read_csv(path, sep=',',skipinitialspace=True)
def MahalonobisDetection(Data, alpha):
"""
Old Version
"""
Data = Data - np.mean(Data, axis = 0)
n1,n2 = Data.shape
Cov = (1/float(n1-1))*np.dot(Data.T,Data)
M = np.zeros(n1)
# Using extra memory
RightP = np.dot(np.linalg.inv(Cov),Data.T)
for i in range(0,n1):
M[i] = np.dot(Data[i,:],RightP[:,i])
c = chi2.isf(alpha,n2)
return M, c, Cov
@jit(parallel=True, nogil=True, cache=True)
def NumbaMahalonobisDetection(Data, alpha):
"""
The numba version
"""
# Get shape of data
n1,n2 = Data.shape
# substract the mean
Data = Data - Data.mean(axis = 0)
# Get the Covariance
Cov = (1/float(n1-1))*np.dot(Data.T,Data)
M = np.zeros(n1)
# Using extra memory
RightP = np.dot(np.linalg.inv(Cov),Data.T)
# Here we use a paralel version
for i in prange(0,n1):
M[i] = np.dot(Data[i,:],RightP[:,i])
c = chi_statistics(alpha, n2)
return M, c, Cov
@jit(nopython=True, parallel=True, nogil=True, cache=True)
def chi_statistics(alpha, k):
"""
Getting the value for X^2_{alpha,k}
"""
# Wilson and Hilferty approximation
return k*np.power(z(alpha)*np.sqrt(2.0/(9.0*k))+(1.0-(2.0/(9.0*k))),2)
@jit(nopython=True, parallel=True, nogil=True, cache=True)
def z(alpha):
"""
Z score with level of confidence alpha z = x
Using Shore approximation 1982
"""
# Get the CDF value
p = 1.0-alpha
z = 5.5556*(1.0-np.power(((1.0-p)/p),0.1986))
return z
# +
# Load CVS
Path1 = 'voice.csv'
DataMatrix = ReturnDataFrame(Path1)
# Shuffle the data randomly
DataMatrix = DataMatrix.sample(frac=1).reset_index(drop=True)
DataMatrix.replace({'male': 1.0, 'female': -1.0},
inplace=True)
DataLabels = DataMatrix['label']
DataMatrix.drop('label', axis=1, inplace=True)
# Transform to an NP Array
Data = DataMatrix.values
Label = DataLabels.values
fmask = (Label == 1.0)
mmask = (Label == -1.0)
# Normalize your Data #
NData = np.asmatrix(Normalization(Data))
Class1 = NData[fmask,:]
Class2 = NData[mmask,:]
alpha = 0.10
# -
# %%timeit -n 100
# Testing the functions
M1, c1, _ = MahalonobisDetection(Class1, alpha)
# %%timeit -n 100
# Testing the functions
M2, c2, _ = NumbaMahalonobisDetection(Class1, alpha)
print(c1)
print(c2)
M1
M2
|
Jupyter/Class_ML_Path/04 Machine Learning Path/Numba_Test_Mahalonobis.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:detect] *
# language: python
# name: conda-env-detect-py
# ---
# # Likelihood Ratio Outlier Detection with PixelCNN++
#
# ## Method
#
# The outlier detector described by [Ren et al. (2019)](https://arxiv.org/abs/1906.02845) in [Likelihood Ratios for Out-of-Distribution Detection](https://arxiv.org/abs/1906.02845) uses the likelihood ratio between 2 generative models as the outlier score. One model is trained on the original data while the other is trained on a perturbed version of the dataset. This is based on the observation that the likelihood score for an instance under a generative model can be heavily affected by population level background statistics. The second generative model is therefore trained to capture the background statistics still present in the perturbed data while the semantic features have been erased by the perturbations.
#
# The perturbations are added using an independent and identical Bernoulli distribution with rate $\mu$ which substitutes a feature with one of the other possible feature values with equal probability. For images, this means changing a pixel with a different pixel randomly sampled within the $0$ to $255$ pixel range.
#
# The generative model used in the example is a [PixelCNN++](https://arxiv.org/abs/1701.05517), adapted from the official TensorFlow Probability [implementation](https://www.tensorflow.org/probability/api_docs/python/tfp/distributions/PixelCNN), and available as a standalone model in `alibi_detect.models.pixelcnn`.
#
#
# ## Dataset
#
# The training set [Fashion-MNIST](https://github.com/zalandoresearch/fashion-mnist) consists of 60,000 28 by 28 grayscale images distributed over 10 classes. The classes represent items of clothing such as shirts or trousers. At test time, we want to distinguish the Fashion-MNIST test set from MNIST, which represents 28 by 28 grayscale numbers from 0 to 9.
# +
import os
from functools import partial
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score
import tensorflow as tf
from alibi_detect.od import LLR
from alibi_detect.models import PixelCNN
from alibi_detect.utils.fetching import fetch_detector
from alibi_detect.utils.saving import save_detector, load_detector
from alibi_detect.utils.prediction import predict_batch
from alibi_detect.utils.visualize import plot_roc
# -
# ### Utility Functions
# +
def load_data(dataset: str) -> tuple:
if dataset == 'mnist':
(X_train, y_train), (X_test, y_test) = tf.keras.datasets.mnist.load_data()
elif dataset == 'fashion_mnist':
(X_train, y_train), (X_test, y_test) = tf.keras.datasets.fashion_mnist.load_data()
else:
raise NotImplementedError
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
y_train = y_train.astype('int64').reshape(-1,)
y_test = y_test.astype('int64').reshape(-1,)
if len(X_train.shape) == 3:
shape = (-1,) + X_train.shape[1:] + (1,)
X_train = X_train.reshape(shape)
X_test = X_test.reshape(shape)
return (X_train, y_train), (X_test, y_test)
def plot_grid_img(X: np.ndarray, figsize: tuple = (10, 6)) -> None:
n = X.shape[0]
nrows = int(n**.5)
ncols = int(np.ceil(n / nrows))
fig, ax = plt.subplots(nrows=nrows, ncols=ncols, figsize=figsize)
n_subplot = 1
for r in range(nrows):
for c in range(ncols):
plt.subplot(nrows, ncols, n_subplot)
plt.axis('off')
plt.imshow(X[n_subplot-1, :, :, 0])
n_subplot += 1
def plot_grid_logp(idx: list, X: np.ndarray, logp_s: np.ndarray,
logp_b: np.ndarray, figsize: tuple = (10, 6)) -> None:
nrows, ncols = len(idx), 4
fig, ax = plt.subplots(nrows=nrows, ncols=ncols, figsize=figsize)
n_subplot = 1
for r in range(nrows):
plt.subplot(nrows, ncols, n_subplot)
plt.imshow(X[idx[r], :, :, 0])
plt.colorbar()
plt.axis('off')
if r == 0:
plt.title('Image')
n_subplot += 1
plt.subplot(nrows, ncols, n_subplot)
plt.imshow(logp_s[idx[r], :, :])
plt.colorbar()
plt.axis('off')
if r == 0:
plt.title('Semantic Logp')
n_subplot += 1
plt.subplot(nrows, ncols, n_subplot)
plt.imshow(logp_b[idx[r], :, :])
plt.colorbar()
plt.axis('off')
if r == 0:
plt.title('Background Logp')
n_subplot += 1
plt.subplot(nrows, ncols, n_subplot)
plt.imshow(logp_s[idx[r], :, :] - logp_b[idx[r], :, :])
plt.colorbar()
plt.axis('off')
if r == 0:
plt.title('LLR')
n_subplot += 1
# -
# ### Load data
#
# The in-distribution dataset is Fashion-MNIST and the out-of-distribution dataset we'd like to detect is MNIST.
(X_train_in, y_train_in), (X_test_in, y_test_in) = load_data('fashion_mnist')
X_test_ood, y_test_ood = load_data('mnist')[1]
input_shape = X_train_in.shape[1:]
print(X_train_in.shape, X_test_in.shape, X_test_ood.shape)
i = 0
plt.imshow(X_train_in[i].reshape(input_shape[:-1]))
plt.title('Fashion-MNIST')
plt.axis('off')
plt.show();
plt.imshow(X_test_ood[i].reshape(input_shape[:-1]))
plt.title('MNIST')
plt.axis('off')
plt.show();
# ### Define PixelCNN++ model
#
# We now need to define our generative model. This is not necessary if the pretrained detector is later loaded from the Google Bucket.
#
# Key PixelCNN++ arguments in a nutshell:
#
# * *num_resnet*: number of layers ([Fig.2 PixelCNN](https://arxiv.org/abs/1606.05328)) within each hierarchical block ([Fig.2 PixelCNN++](https://arxiv.org/abs/1701.05517)).
#
# * *num_hierarchies*: number of blocks separated by expansions or contractions of dimensions. See [Fig.2 PixelCNN++](https://arxiv.org/abs/1701.05517).
#
# * *num_filters*: number of convolutional filters.
#
# * *num_logistic_mix*: number of components in the logistic mixture distribution.
#
# * *receptive_field_dims*: height and width in pixels of the receptive field above and to the left of a given pixel.
#
# Optionally, a different model can be passed to the detector with argument *model_background*. The [Likelihood Ratio paper](https://arxiv.org/abs/1906.02845) mentions that additional $L2$-regularization (*l2_weight*) for the background model could improve detection performance.
model = PixelCNN(
image_shape=input_shape,
num_resnet=5,
num_hierarchies=2,
num_filters=32,
num_logistic_mix=1,
receptive_field_dims=(3, 3),
dropout_p=.3,
l2_weight=0.
)
# ### Load or train the outlier detector
#
# We can again either fetch the pretrained detector from a [Google Cloud Bucket](https://console.cloud.google.com/storage/browser/seldon-models/alibi-detect/od/LLR/fashion_mnist) or train one from scratch:
load_pretrained = True
filepath = os.path.join(os.getcwd(), 'my_path') # change to download directory
if load_pretrained: # load pretrained outlier detector
detector_type = 'outlier'
dataset = 'fashion_mnist'
detector_name = 'LLR'
od = fetch_detector(filepath, detector_type, dataset, detector_name)
filepath = os.path.join(filepath, detector_name)
else:
# initialize detector
od = LLR(threshold=None, model=model)
# train
od.fit(
X_train_in,
mutate_fn_kwargs=dict(rate=.2),
mutate_batch_size=1000,
optimizer=tf.keras.optimizers.Adam(learning_rate=1e-4),
epochs=20,
batch_size=32,
verbose=False
)
# save the trained outlier detector
save_detector(od, filepath)
# We can load our saved detector again by defining the PixelCNN architectures for the semantic and background models as well as providing the shape of the input data:
kwargs = {'dist_s': model, 'dist_b': model.copy(), 'input_shape': input_shape}
od = load_detector(filepath, **kwargs)
# Let's sample some instances from the semantic model to check how good our generative model is:
n_sample = 16
X_sample = od.dist_s.sample(n_sample).numpy()
plot_grid_img(X_sample)
# Most of the instances look like they represent the dataset well. When we do the same thing for our background model, we see that there is some background noise injected:
X_sample = od.dist_b.sample(n_sample).numpy()
plot_grid_img(X_sample)
# ### Compare the log likelihoods
#
# Let's compare the log likelihoods of the inliers vs. the outlier data under the semantic and background models. Although MNIST data looks very distinct from Fashion-MNIST, the generative model does not distinguish well between the 2 datasets as shown by the histograms of the log likelihoods:
shape_in, shape_ood = X_test_in.shape[0], X_test_ood.shape[0]
# semantic model
logp_s_in = predict_batch(od.dist_s.log_prob, X_test_in, batch_size=32, shape=shape_in)
logp_s_ood = predict_batch(od.dist_s.log_prob, X_test_ood, batch_size=32, shape=shape_ood)
logp_s = np.concatenate([logp_s_in, logp_s_ood])
# background model
logp_b_in = predict_batch(od.dist_b.log_prob, X_test_in, batch_size=32, shape=shape_in)
logp_b_ood = predict_batch(od.dist_b.log_prob, X_test_ood, batch_size=32, shape=shape_ood)
# +
# show histograms
plt.hist(logp_s_in, bins=100, label='in');
plt.hist(logp_s_ood, bins=100, label='ood');
plt.title('Semantic Log Probabilities')
plt.legend()
plt.show()
plt.hist(logp_b_in, bins=100, label='in');
plt.hist(logp_b_ood, bins=100, label='ood');
plt.title('Background Log Probabilities')
plt.legend()
plt.show()
# -
# This is due to the dominance of the background which is similar (basically lots of $0$'s for both datasets). If we however take the likelihood ratio, the MNIST data are detected as outliers. And this is exactly what the outlier detector does as well:
llr_in = logp_s_in - logp_b_in
llr_ood = logp_s_ood - logp_b_ood
plt.hist(llr_in, bins=100, label='in');
plt.hist(llr_ood, bins=100, label='ood');
plt.title('Likelihood Ratio')
plt.legend()
plt.show()
# ### Detect outliers
#
# We follow the same procedure with the outlier detector. First we need to set an outlier threshold with `infer_threshold`. We need to pass a batch of instances and specify what percentage of those we consider to be normal via `threshold_perc`. Let's assume we have a small batch of data with roughly $50$% outliers but we don't know exactly which ones.
n, frac_outlier = 500, .5
perc_outlier = 100 * frac_outlier
n_in, n_ood = int(n * (1 - frac_outlier)), int(n * frac_outlier)
idx_in = np.random.choice(shape_in, size=n_in, replace=False)
idx_ood = np.random.choice(shape_ood, size=n_ood, replace=False)
X_threshold = np.concatenate([X_test_in[idx_in], X_test_ood[idx_ood]])
od.infer_threshold(X_threshold, threshold_perc=perc_outlier, batch_size=32)
print('New threshold: {}'.format(od.threshold))
# Let's save the outlier detector with updated threshold:
save_detector(od, filepath)
# Let's now predict outliers on the combined Fashion-MNIST and MNIST datasets:
X_test = np.concatenate([X_test_in, X_test_ood])
y_test = np.concatenate([np.zeros(X_test_in.shape[0]), np.ones(X_test_ood.shape[0])])
print(X_test.shape, y_test.shape)
od_preds = od.predict(X_test,
batch_size=32,
outlier_type='instance', # use 'feature' or 'instance' level
return_feature_score=True, # scores used to determine outliers
return_instance_score=True)
# ### Display results
#
# F1 score, accuracy, precision, recall and confusion matrix:
y_pred = od_preds['data']['is_outlier']
labels = ['normal', 'outlier']
f1 = f1_score(y_test, y_pred)
acc = accuracy_score(y_test, y_pred)
prec = precision_score(y_test, y_pred)
rec = recall_score(y_test, y_pred)
print('F1 score: {:.3f} -- Accuracy: {:.3f} -- Precision: {:.3f} '
'-- Recall: {:.3f}'.format(f1, acc, prec, rec))
cm = confusion_matrix(y_test, y_pred)
df_cm = pd.DataFrame(cm, index=labels, columns=labels)
sns.heatmap(df_cm, annot=True, cbar=True, linewidths=.5)
plt.show()
# We can also plot the ROC curve based on the instance level outlier scores and compare it with the likelihood of only the semantic model:
roc_data = {
'LLR': {'scores': od_preds['data']['instance_score'], 'labels': y_test},
'Likelihood': {'scores': -logp_s, 'labels': y_test} # negative b/c outlier score
}
plot_roc(roc_data)
# ### Analyse feature scores
#
# To understand why the likelihood ratio works to detect outliers but the raw log likelihoods don't, it is helpful to look at the pixel-wise log likelihoods of both the semantic and background models.
n_plot = 5
# +
# semantic model
logp_fn_s = partial(od.dist_s.log_prob, return_per_feature=True)
logp_s_pixel_in = predict_batch(logp_fn_s, X_test_in[:n_plot], batch_size=32)
logp_s_pixel_ood = predict_batch(logp_fn_s, X_test_ood[:n_plot], batch_size=32)
# background model
logp_fn_b = partial(od.dist_b.log_prob, return_per_feature=True)
logp_b_pixel_in = predict_batch(logp_fn_b, X_test_in[:n_plot], batch_size=32)
logp_b_pixel_ood = predict_batch(logp_fn_b, X_test_ood[:n_plot], batch_size=32)
# pixel-wise likelihood ratios
llr_pixel_in = logp_s_pixel_in - logp_b_pixel_in
llr_pixel_ood = logp_s_pixel_ood - logp_b_pixel_ood
# -
# Plot in-distribution instances:
idx = list(np.arange(n_plot))
plot_grid_logp(idx, X_test_in, logp_s_pixel_in, logp_b_pixel_in, figsize=(14,14))
# It is clear that both the semantic and background model attach high probabilities to the background pixels. This effect is cancelled out in the likelihood ratio in the last column. The same applies to the out-of-distribution instances:
idx = list(np.arange(n_plot))
plot_grid_logp(idx, X_test_ood, logp_s_pixel_ood, logp_b_pixel_ood, figsize=(14,14))
|
examples/od_llr_mnist.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="IdIhVR8fpmzl"
# # "Dynamic display of audio spectrogra of voice"
# > "Wanted a simple way to show the differenct frequencies we use when we talk"
#
#
# - toc: true
# - branch: master
# - badges: false
# - comments: true
# - categories: [audio, dsp, spectrogram, librosa]
# - image: images/spectrogram/Feb-05-2021_12-10-51_sample_audio.gif
# - hide: false
# - search_exclude: true
# - metadata_key1: metadata_value1
# - metadata_key2: metadata_value2
# + [markdown] id="PoM5SOK8qHmJ"
# #Problem
# + [markdown] id="_-h47fIwqKBI"
# Wanted a simple way to show that when we talk we use different frequencies.
# + [markdown] id="-9yX6sBZqXnu"
# #Solution
# + [markdown] id="ZfA1Mfs5qZwM"
#
#
# * Record sound using the computers microphone
# * Analyze the audio with [librosa](https://librosa.org/)
# * Display the results dynamically
#
#
# + [markdown] id="mGr-9_XhrD3B"
# ## Record sound
# + [markdown] id="pJuyAdyE3FuQ"
# install libraries
# + colab={"base_uri": "https://localhost:8080/"} id="b5IL_1U3w1mV" outputId="5307acb8-f1e4-4209-fcfc-9640dab5b497"
#hide_show
# !pip install pyaudio
# + [markdown] id="eNLcxZV63JrN"
# import libraries
# + id="jRO28MjfrJh2"
#hide_show
import pyaudio
import time
import librosa
import librosa.display
import matplotlib.pyplot as plt
import numpy as np
from IPython import display
# + [markdown] id="DxkvHECs3P3q"
# function for displaying spectrogram of audio
# + id="yRV29QKqxAp2"
def plot_spectrogram(wave_data, sr=44100, DIV=4, n_fft=2048, hop_length=512):
plt.clf()
D = np.abs(librosa.stft(wave_data, n_fft=n_fft, hop_length=hop_length))
librosa.display.specshow(D, sr=sr, x_axis='time', y_axis='linear')
display.display(plt.gcf())
display.clear_output(wait=True)
# + [markdown] id="n0u8P8BDy_R7"
# Code below works directly on a pc in a jupyter notebook,(NOT on a virtual machine. This wont work in colab)
#
# Continuously stream audio to spectrogram for analysis
# + id="de4BebJixDjr"
plt.rcParams['figure.figsize'] = [15, 15]
p = pyaudio.PyAudio()
frames = []
max_frames = 50
RATE = 44100
BUFFER_SIZE = 1024 * 2
def callback(in_data, frame_count, time_info, status):
frames.append(np.frombuffer(in_data, dtype=np.int16))
if len(frames) > max_frames:
frames.pop(0)
return (in_data, pyaudio.paContinue)
start_t = time.time()
stream = p.open(format=pyaudio.paInt16,
channels=1,
rate=RATE,
input=True,
frames_per_buffer=BUFFER_SIZE,
stream_callback=callback)
stream.start_stream()
while True:
try:
if len(frames) >= 4:
wave_data = np.concatenate( tuple(frames[-20:]) )
wave_data=wave_data/9000.
plot_spectrogram(wave_data)
time.sleep(0.01)
except KeyboardInterrupt:
break
print("stop stream")
stream.stop_stream()
stream.close()
p.terminate()
# + [markdown] id="dg68GXct7Opq"
# # example of raw data being captured and analyzed.
# + [markdown] id="ePom1q0-4T6q"
# 
|
_notebooks/2021_02_1_audio_spectrogram.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: sax
# language: python
# name: sax
# ---
# +
# default_exp typing_
# -
# # Typing
#
# > SAX types
# +
# hide
import matplotlib.pyplot as plt
from fastcore.test import test_eq
from pytest import approx, raises
import os, sys; sys.stderr = open(os.devnull, "w")
# +
# export
from __future__ import annotations
import functools
import inspect
from collections.abc import Callable as CallableABC
from typing import Any, Callable, Dict, Tuple, TypedDict, Union, cast, overload
import jax.numpy as jnp
import numpy as np
from natsort import natsorted
# -
# ## Core Types
# #### Array
# an `Array` is either a jax array or a numpy array:
# exports
Array = Union[jnp.ndarray, np.ndarray]
# #### Int
# An `Int` is either a built-in `int` or an `Array` [of dtype `int`]
# exports
Int = Union[int, Array]
# #### Float
# A `Float` is eiter a built-in `float` or an `Array` [of dtype `float`]
# exports
Float = Union[float, Array]
# #### ComplexFloat
# A `ComplexFloat` is either a build-in `complex` or an Array [of dtype `complex`]:
# exports
ComplexFloat = Union[complex, Float]
# #### Settings
# A `Settings` dictionary is a nested mapping between setting names [`str`] to either `ComplexFloat` values OR to another lower level `Settings` dictionary.
# exports
Settings = Union[Dict[str, ComplexFloat], Dict[str, "Settings"]]
# Settings dictionaries are used to parametrize a SAX `Model` or a `circuit`. The settings dictionary should have the same hierarchy levels as the circuit:
#
# > Example:
mzi_settings = {
"wl": 1.5, # global settings
"lft": {"coupling": 0.5}, # settings for the left coupler
"top": {"neff": 3.4}, # settings for the top waveguide
"rgt": {"coupling": 0.3}, # settings for the right coupler
}
# #### SDict
# An `SDict` is a sparse dictionary based representation of an S-matrix, mapping port name tuples such as `('in0', 'out0')` to `ComplexFloat`.
# exports
SDict = Dict[Tuple[str, str], ComplexFloat]
# > Example:
_sdict: SDict = {
("in0", "out0"): 3.0,
}
# #### SCoo
# An `SCoo` is a sparse matrix based representation of an S-matrix consisting of three arrays and a port map. The three arrays represent the input port indices [`int`], output port indices [`int`] and the S-matrix values [`ComplexFloat`] of the sparse matrix. The port map maps a port name [`str`] to a port index [`int`]. Only these four arrays **together** and in this specific **order** are considered a valid `SCoo` representation!
# exports
SCoo = Tuple[Array, Array, ComplexFloat, Dict[str, int]]
# > Example:
Si = jnp.arange(3, dtype=int)
Sj = jnp.array([0, 1, 0], dtype=int)
Sx = jnp.array([3.0, 4.0, 1.0])
port_map = {"in0": 0, "in1": 2, "out0": 1}
_scoo: SCoo = Si, Sj, Sx, port_map
# #### SDense
# an `SDense` is a dense matrix representation of an S-matrix. It's represented by an NxN `ComplexFloat` array and a port map (mapping port names onto port indices).
# exports
SDense = Tuple[Array, Dict[str, int]]
# > Example:
Sd = jnp.arange(9, dtype=float).reshape(3, 3)
port_map = {"in0": 0, "in1": 2, "out0": 1}
_sdense = Sd, port_map
# #### SType
# an `SType` is either an `SDict` **OR** an `SCoo` **OR** an `SDense`:
# exports
SType = Union[SDict, SCoo, SDense]
# > Example:
obj: SType = _sdict
obj: SType = _scoo
obj: SType = _sdense
# #### Model
# A `Model` is any keyword-only function that returns an `SType`:
# exports
Model = Callable[..., SType]
# #### ModelFactory
# A `ModelFactory` is any keyword-only function that returns a `Model`:
# exports
ModelFactory = Callable[..., Model]
# > Note: SAX sometimes needs to figure out the difference between a `ModelFactory` and a normal `Model` *before* running the function. To do this, SAX will check the return annotation of the function. Any function with a `-> Model` or `-> Callable` annotation will be considered a `ModelFactory`. Any function without this annotation will be considered a normal Model: **don't forget the return annotation of your Model Factory!** To ensure a correct annotation and to ensure forward compatibility, it's recommended to decorate your `ModelFactory` with the `modelfactory` decorator.
# #### GeneralModel
# a `GeneralModel` is either a `Model` or a `LogicalNetlist` (will be defined below):
# exports
GeneralModel = Union[Model, "LogicalNetlist"]
# #### Models
# `Models` is a mapping between model names [`str`] and `GeneralModel`:
# exports
Models = Dict[str, GeneralModel]
# > Note: sometimes 'component' is used to refer to a a `Model` or `GeneralModel`. This is because other tools (such as for example GDSFactory) prefer that terminology.
# ## Netlist Types
# #### Instance
# A netlist `Instance` is a mapping with two keys: `"component"`, which should map to a key in a `Models` dictionary and `"settings"`, which are all the necessary settings to instanciate a component:
# exports
Instance = TypedDict(
"Instance",
{
"component": str,
"settings": Settings,
},
)
# > Note: in SAX, a better name for `"component"` in the instance definition would probably be `"model"` or `"model_name"`. However we chose `"component"` here to have a 1-to-1 map between SAX netlists and GDSFactory netlists.
# #### GeneralInstance
# A general instance can be any of the following (`LogicalNetlist` and `Netlist` will be defined below):
# exports
GeneralInstance = Union[str, Instance, "LogicalNetlist", "Netlist"]
# > For example, this is allowed:
inst: GeneralInstance = "my_component_model"
inst: GeneralInstance = {
"component": "my_component_model",
"settings": {},
}
# > ... and this is not (will be flagged by a static type checker like pyright or mypy):
inst: GeneralInstance = {
"component": "my_component_model",
"settings": {},
"extra_arg": "invalid",
}
# #### Instances
# `Instances` is a mapping from instance names [`str`] to a `GeneralInstance`:
# exports
Instances = Union[Dict[str, str], Dict[str, GeneralInstance]]
# #### Netlist
# a `Netlist` is a collection of `"instances"`, `"connections"` and `"ports"`:
# +
# exports
Netlist = TypedDict(
"Netlist",
{
"instances": Instances,
"connections": Dict[str, str],
"ports": Dict[str, str],
},
)
# -
# > Example:
mzi_netlist: Netlist = {
"instances": {
"lft": "mmi1x2", # shorthand if no settings need to be given
"top": { # full instance definition
"component": "waveguide",
"settings": {
"length": 100.0,
},
},
"rgt": "mmi2x2", # shorthand if no settings need to be given
},
"connections": {
"lft,out0": "top,in0",
"top,out0": "rgt,in0",
"top,out1": "rgt,in1",
},
"ports": {
"in0": "lft,in0",
"out0": "rgt,out0",
"out1": "rgt,out1",
},
}
# #### LogicalNetlist
# a `LogicalNetlist` is a subset of the more general `Netlist`. It only contains the logical connections and instance names. Not the actual instances. This data structure is mostly used for internal use only.
# +
# exports
LogicalNetlist = TypedDict(
"LogicalNetlist",
{
"instances": Dict[str, str],
"connections": Dict[str, str],
"ports": Dict[str, str],
},
)
# -
# > Example:
mzi_logical_netlist: Netlist = {
"instances": {
"lft": "mmi1x2",
"top": "waveguide",
"rgt": "mmi2x2",
},
"connections": {
"lft,out0": "top,in0",
"top,out0": "rgt,in0",
"top,out1": "rgt,in1",
},
"ports": {
"in0": "lft,in0",
"out0": "rgt,out0",
"out1": "rgt,out1",
},
}
# ## Validation and runtime type-checking:
# > Note: the type-checking functions below are **NOT** very tight and hence should be used within the right context!
# export
def is_float(x: Any) -> bool:
"""Check if an object is a `Float`"""
if isinstance(x, float):
return True
if isinstance(x, np.ndarray):
return x.dtype in (np.float16, np.float32, np.float64, np.float128)
if isinstance(x, jnp.ndarray):
return x.dtype in (jnp.float16, jnp.float32, jnp.float64)
return False
assert is_float(3.0)
assert not is_float(3)
assert not is_float(3.0 + 2j)
assert not is_float(jnp.array(3.0, dtype=complex))
assert not is_float(jnp.array(3, dtype=int))
# export
def is_complex(x: Any) -> bool:
"""check if an object is a `ComplexFloat`"""
if isinstance(x, complex):
return True
if isinstance(x, np.ndarray):
return x.dtype in (np.complex64, np.complex128)
if isinstance(x, jnp.ndarray):
return x.dtype in (jnp.complex64, jnp.complex128)
return False
assert not is_complex(3.0)
assert not is_complex(3)
assert is_complex(3.0 + 2j)
assert is_complex(jnp.array(3.0, dtype=complex))
assert not is_complex(jnp.array(3, dtype=int))
# export
def is_complex_float(x: Any) -> bool:
"""check if an object is either a `ComplexFloat` or a `Float`"""
return is_float(x) or is_complex(x)
assert is_complex_float(3.0)
assert not is_complex_float(3)
assert is_complex_float(3.0 + 2j)
assert is_complex_float(jnp.array(3.0, dtype=complex))
assert not is_complex_float(jnp.array(3, dtype=int))
# export
def is_sdict(x: Any) -> bool:
"""check if an object is an `SDict` (a SAX S-dictionary)"""
return isinstance(x, dict)
assert not is_sdict(object())
assert is_sdict(_sdict)
assert not is_sdict(_scoo)
assert not is_sdict(_sdense)
# export
def is_scoo(x: Any) -> bool:
"""check if an object is an `SCoo` (a SAX sparse S-matrix representation in COO-format)"""
return isinstance(x, (tuple, list)) and len(x) == 4
assert not is_scoo(object)
assert not is_scoo(_sdict)
assert is_scoo(_scoo)
assert not is_scoo(_sdense)
# export
def is_sdense(x: Any) -> bool:
"""check if an object is an `SDense` (a SAX dense S-matrix representation)"""
return isinstance(x, (tuple, list)) and len(x) == 2
assert not is_sdense(object)
assert not is_sdense(_sdict)
assert not is_sdense(_scoo)
assert is_sdense(_sdense)
# +
# export
def is_model(model: Any) -> bool:
"""check if a callable is a `Model` (a callable returning an `SType`)"""
if not callable(model):
return False
try:
sig = inspect.signature(model)
except ValueError:
return False
for param in sig.parameters.values():
if param.default == inspect.Parameter.empty:
return False # a proper SAX model does not have any positional arguments.
if _is_callable_annotation(sig.return_annotation): # model factory
return False
return True
def _is_callable_annotation(annotation: Any) -> bool:
"""check if an annotation is `Callable`-like"""
if isinstance(annotation, str):
# happens when
# from __future__ import annotations
# was imported at the top of the file...
return annotation.startswith("Callable") or annotation.endswith("Model")
# TODO: this is not a very robust check...
try:
return annotation.__origin__ == CallableABC
except AttributeError:
return False
# -
# hide
assert _is_callable_annotation(Callable)
assert not _is_callable_annotation(SDict)
# +
def good_model(x=jnp.array(3.0), y=jnp.array(4.0)) -> SDict:
return {("in0", "out0"): jnp.array(3.0)}
assert is_model(good_model)
def bad_model(positional_argument, x=jnp.array(3.0), y=jnp.array(4.0)) -> SDict:
return {("in0", "out0"): jnp.array(3.0)}
assert not is_model(bad_model)
# -
# export
def is_model_factory(model: Any) -> bool:
"""check if a callable is a model function."""
if not callable(model):
return False
sig = inspect.signature(model)
if _is_callable_annotation(sig.return_annotation): # model factory
return True
return False
# > Note: For a `Callable` to be considered a `ModelFactory` in SAX, it **MUST** have a `Callable` or `Model` return annotation. Otherwise SAX will view it as a `Model` and things might break!
# +
def func() -> Model:
...
assert is_model_factory(func) # yes, we only check the annotation for now...
def func():
...
assert not is_model_factory(func) # yes, we only check the annotation for now...
# -
# export
def validate_model(model: Callable):
"""Validate the parameters of a model"""
positional_arguments = []
for param in inspect.signature(model).parameters.values():
if param.default is inspect.Parameter.empty:
positional_arguments.append(param.name)
if positional_arguments:
raise ValueError(
f"model '{model}' takes positional arguments {', '.join(positional_arguments)} "
"and hence is not a valid SAX Model! A SAX model should ONLY take keyword arguments (or no arguments at all)."
)
# +
def good_model(x=jnp.array(3.0), y=jnp.array(4.0)) -> SDict:
return {("in0", "out0"): jnp.array(3.0)}
assert validate_model(good_model) is None
# +
def bad_model(positional_argument, x=jnp.array(3.0), y=jnp.array(4.0)) -> SDict:
return {("in0", "out0"): jnp.array(3.0)}
with raises(ValueError):
validate_model(bad_model)
# -
# export
def is_instance(instance: Any) -> bool:
"""check if a dictionary is an instance"""
if not isinstance(instance, dict):
return False
return "component" in instance
# export
def is_netlist(netlist: Any) -> bool:
"""check if a dictionary is a netlist"""
if not isinstance(netlist, dict):
return False
if not "instances" in netlist:
return False
if not "connections" in netlist:
return False
if not "ports" in netlist:
return False
return True
# export
def is_stype(stype: Any) -> bool:
"""check if an object is an SDict, SCoo or SDense"""
return is_sdict(stype) or is_scoo(stype) or is_sdense(stype)
# +
# export
def is_singlemode(S: Any) -> bool:
"""check if an stype is single mode"""
if not is_stype(S):
return False
ports = _get_ports(S)
return not any(("@" in p) for p in ports)
def _get_ports(S: SType):
if is_sdict(S):
S = cast(SDict, S)
ports_set = {p1 for p1, _ in S} | {p2 for _, p2 in S}
return tuple(natsorted(ports_set))
else:
*_, ports_map = S
assert isinstance(ports_map, dict)
return tuple(natsorted(ports_map.keys()))
# -
# export
def is_multimode(S: Any) -> bool:
"""check if an stype is single mode"""
if not is_stype(S):
return False
ports = _get_ports(S)
return all(("@" in p) for p in ports)
# export
def is_mixedmode(S: Any) -> bool:
"""check if an stype is neither single mode nor multimode (hence invalid)"""
return not is_singlemode(S) and not is_multimode(S)
# ## SAX return type helpers
#
# > a.k.a SDict, SDense, SCoo helpers
# Convert an `SDict`, `SCoo` or `SDense` into an `SDict` (or convert a model generating any of these types into a model generating an `SDict`):
# +
# exporti
@overload
def sdict(S: Model) -> Model:
...
@overload
def sdict(S: SType) -> SDict:
...
# +
# export
def sdict(S: Union[Model, SType]) -> Union[Model, SType]:
"""Convert an `SCoo` or `SDense` to `SDict`"""
if is_model(S):
model = cast(Model, S)
@functools.wraps(model)
def wrapper(**kwargs):
return sdict(model(**kwargs))
return wrapper
elif is_scoo(S):
x_dict = _scoo_to_sdict(*cast(SCoo, S))
elif is_sdense(S):
x_dict = _sdense_to_sdict(*cast(SDense, S))
elif is_sdict(S):
x_dict = cast(SDict, S)
else:
raise ValueError("Could not convert arguments to sdict.")
return x_dict
def _scoo_to_sdict(Si: Array, Sj: Array, Sx: Array, ports_map: Dict[str, int]) -> SDict:
sdict = {}
inverse_ports_map = {int(i): p for p, i in ports_map.items()}
for i, (si, sj) in enumerate(zip(Si, Sj)):
sdict[
inverse_ports_map.get(int(si), ""), inverse_ports_map.get(int(sj), "")
] = Sx[..., i]
sdict = {(p1, p2): v for (p1, p2), v in sdict.items() if p1 and p2}
return sdict
def _sdense_to_sdict(S: Array, ports_map: Dict[str, int]) -> SDict:
sdict = {}
for p1, i in ports_map.items():
for p2, j in ports_map.items():
sdict[p1, p2] = S[..., i, j]
return sdict
# -
assert sdict(_sdict) is _sdict
assert sdict(_scoo) == {
("in0", "in0"): 3.0,
("in1", "in0"): 1.0,
("out0", "out0"): 4.0,
}
assert sdict(_sdense) == {
("in0", "in0"): 0.0,
("in0", "out0"): 1.0,
("in0", "in1"): 2.0,
("out0", "in0"): 3.0,
("out0", "out0"): 4.0,
("out0", "in1"): 5.0,
("in1", "in0"): 6.0,
("in1", "out0"): 7.0,
("in1", "in1"): 8.0,
}
# Convert an `SDict`, `SCoo` or `SDense` into an `SCoo` (or convert a model generating any of these types into a model generating an `SCoo`):
# +
# exporti
@overload
def scoo(S: Callable) -> Callable:
...
@overload
def scoo(S: SType) -> SCoo:
...
# +
# export
def scoo(S: Union[Callable, SType]) -> Union[Callable, SCoo]:
"""Convert an `SDict` or `SDense` to `SCoo`"""
if is_model(S):
model = cast(Model, S)
@functools.wraps(model)
def wrapper(**kwargs):
return scoo(model(**kwargs))
return wrapper
elif is_scoo(S):
S = cast(SCoo, S)
elif is_sdense(S):
S = _sdense_to_scoo(*cast(SDense, S))
elif is_sdict(S):
S = _sdict_to_scoo(cast(SDict, S))
else:
raise ValueError("Could not convert arguments to scoo.")
return S
def _sdense_to_scoo(S: Array, ports_map: Dict[str, int]) -> SCoo:
Sj, Si = jnp.meshgrid(jnp.arange(S.shape[-1]), jnp.arange(S.shape[-2]))
return Si.ravel(), Sj.ravel(), S.reshape(*S.shape[:-2], -1), ports_map
def _sdict_to_scoo(sdict: SDict) -> SCoo:
all_ports = {}
for p1, p2 in sdict:
all_ports[p1] = None
all_ports[p2] = None
ports_map = {p: i for i, p in enumerate(all_ports)}
Sx = jnp.stack(jnp.broadcast_arrays(*sdict.values()), -1)
Si = jnp.array([ports_map[p] for p, _ in sdict])
Sj = jnp.array([ports_map[p] for _, p in sdict])
return Si, Sj, Sx, ports_map
# -
assert scoo(_scoo) is _scoo
assert scoo(_sdict) == (0, 1, 3.0, {"in0": 0, "out0": 1})
Si, Sj, Sx, port_map = scoo(_sdense) # type: ignore
np.testing.assert_array_equal(Si, jnp.array([0, 0, 0, 1, 1, 1, 2, 2, 2]))
np.testing.assert_array_equal(Sj, jnp.array([0, 1, 2, 0, 1, 2, 0, 1, 2]))
np.testing.assert_array_almost_equal(Sx, jnp.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0]))
assert port_map == {"in0": 0, "in1": 2, "out0": 1}
# Convert an `SDict`, `SCoo` or `SDense` into an `SDense` (or convert a model generating any of these types into a model generating an `SDense`):
# +
# exporti
@overload
def sdense(S: Callable) -> Callable:
...
@overload
def sdense(S: SType) -> SDense:
...
# +
# export
def sdense(S: Union[Callable, SType]) -> Union[Callable, SDense]:
"""Convert an `SDict` or `SCoo` to `SDense`"""
if is_model(S):
model = cast(Model, S)
@functools.wraps(model)
def wrapper(**kwargs):
return sdense(model(**kwargs))
return wrapper
if is_sdict(S):
S = _sdict_to_sdense(cast(SDict, S))
elif is_scoo(S):
S = _scoo_to_sdense(*cast(SCoo, S))
elif is_sdense(S):
S = cast(SDense, S)
else:
raise ValueError("Could not convert arguments to sdense.")
return S
def _scoo_to_sdense(
Si: Array, Sj: Array, Sx: Array, ports_map: Dict[str, int]
) -> SDense:
n_col = len(ports_map)
S = jnp.zeros((*Sx.shape[:-1], n_col, n_col), dtype=complex)
S = S.at[..., Si, Sj].add(Sx)
return S, ports_map
def _sdict_to_sdense(sdict: SDict) -> SDense:
Si, Sj, Sx, ports_map = _sdict_to_scoo(sdict)
return _scoo_to_sdense(Si, Sj, Sx, ports_map)
# +
assert sdense(_sdense) is _sdense
Sd, port_map = sdense(_scoo) # type: ignore
Sd_ = jnp.array([[3.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j],
[0.0 + 0.0j, 4.0 + 0.0j, 0.0 + 0.0j],
[1.0 + 0.0j, 0.0 + 0.0j, 0.0 + 0.0j]])
np.testing.assert_array_almost_equal(Sd, Sd_)
assert port_map == {"in0": 0, "in1": 2, "out0": 1}
# +
# export
def modelfactory(func):
"""Decorator that marks a function as `ModelFactory`"""
sig = inspect.signature(func)
if _is_callable_annotation(sig.return_annotation): # already model factory
return func
func.__signature__ = sig.replace(return_annotation=Model)
return func
|
nbs/00_typing.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: recommender_deployed
# language: python
# name: recommender_deployed
# ---
# # Purpose
#
# The purpose of this notebook is to generate movie poster urls for each movie_id we observe in our interactions dataset. These movie poster urls will be utilized in the front-end visualization tool we build for understanding recommender performance.
# cd ../
# +
# %matplotlib inline
# %config InlineBackend.figure_format = 'retina'
from dotenv import find_dotenv, load_dotenv
load_dotenv(find_dotenv())
import pandas as pd
import numpy as np
import scipy.sparse as scs
from lightfm import LightFM
from tqdm import tqdm, tqdm_notebook
import time
import json
import os
import tmdbsimple as tmdb
tmdb.API_KEY = os.environ['TMDB_API_KEY']
# -
# # Load data
interactions = pd.read_csv('data/ratings.dat',
sep='::', engine='python',
header=None,
names=['uid', 'iid', 'rating', 'timestamp'],
usecols=['uid', 'iid', 'rating'],
)
display(interactions.sample(5))
print('Shape: {:>9,} x {}'.format(*interactions.shape))
# [links](https://www.kaggle.com/grouplens/movielens-20m-dataset/version/2) is a downloaded csv which connects `movieId` of the movielens dataset to `tmdbId` of [The Movie Database](https://www.themoviedb.org/?language=en). The Movie Database contains the poster urls for each movieId.
links = pd.read_csv('data/links.csv')
display(links.sample(5))
print('Shape: {:>9,} x {}'.format(*links.shape))
# # Generate posters for each movieId in dataset
# First we join movieIds in our dataset with tmbIds in links
movieIds = pd.DataFrame(interactions.iid.unique(), columns=['movieId'])
m = movieIds.merge(links[['movieId', 'tmdbId']], how='left').dropna().astype('int64')
m.head(4)
# Next we loop through each tmdbId to get the poster_url. To simplify this process, I used the [tmdbsimple](https://github.com/celiao/tmdbsimple) library to abstract the requests process.
# +
posters = []
for i, movie in tqdm_notebook(m.iterrows(), total=10634):
# by sleeping for half second, we do not hit tmdb's api too aggressively.
time.sleep(0.5)
try:
_id = movie['tmdbId']
poster_path = tmdb.Movies(_id).info()['poster_path']
except:
poster_path = 'error'
posters.append(poster_path)
# -
# Clean up the data and view a couple results
m['poster_path'] = posters
m['url_base'] = 'https://image.tmdb.org/t/p/w200'
m['poster_url'] = m['url_base'] + m['poster_path']
for url in m.sample(5).poster_url.tolist():
print(url)
# Convert to a dictionary, and store as a json object. This json file will be utilized on the front end
d = m['poster_path'].to_dict()
with open('app/objects/posters.json', 'w') as f:
json.dump(d, f, indent=4)
d
|
notebooks/Get movie poster urls for visualization.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:bench_env]
# language: python
# name: conda-env-bench_env-py
# ---
# + [markdown] papermill={"duration": 0.004874, "end_time": "2022-06-04T11:41:32.964526", "exception": false, "start_time": "2022-06-04T11:41:32.959652", "status": "completed"} tags=[]
# # PYTORCH CNN Classifier
#
# To run this notebook on an another benchmark, use
#
# ```
# papermill utils/torch_cnn_classifier.ipynb torch_cnn_experiments/[DATASET NAME].ipynb -p DATASET [DATASET NAME]
# ```
# + papermill={"duration": 0.00997, "end_time": "2022-06-04T11:41:32.978820", "exception": false, "start_time": "2022-06-04T11:41:32.968850", "status": "completed"} tags=["parameters"]
# DATASET = 'no_dataset'
DATASET = 'demo_human_or_worm'
VERSION = 0
BATCH_SIZE = 64
EPOCHS = 1
# + papermill={"duration": 0.006992, "end_time": "2022-06-04T11:41:32.989911", "exception": false, "start_time": "2022-06-04T11:41:32.982919", "status": "completed"} tags=["injected-parameters"]
# Parameters
DATASET = "demo_human_or_worm"
EPOCHS = 10
# + papermill={"duration": 0.007706, "end_time": "2022-06-04T11:41:33.001495", "exception": false, "start_time": "2022-06-04T11:41:32.993789", "status": "completed"} tags=[]
print(DATASET, VERSION, BATCH_SIZE, EPOCHS)
# + [markdown] papermill={"duration": 0.003812, "end_time": "2022-06-04T11:41:33.009077", "exception": false, "start_time": "2022-06-04T11:41:33.005265", "status": "completed"} tags=[]
# ## Config
# + papermill={"duration": 1.276742, "end_time": "2022-06-04T11:41:34.289412", "exception": false, "start_time": "2022-06-04T11:41:33.012670", "status": "completed"} tags=[]
import os
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader
from torchtext.data.utils import get_tokenizer
from genomic_benchmarks.data_check import is_downloaded, info
from genomic_benchmarks.dataset_getters.pytorch_datasets import get_dataset
from genomic_benchmarks.loc2seq import download_dataset
from genomic_benchmarks.models.torch import CNN
from genomic_benchmarks.dataset_getters.utils import coll_factory, LetterTokenizer, build_vocab, check_seq_lengths, check_config, VARIABLE_LENGTH_DATASETS
# + papermill={"duration": 0.009463, "end_time": "2022-06-04T11:41:34.304488", "exception": false, "start_time": "2022-06-04T11:41:34.295025", "status": "completed"} tags=[]
USE_PADDING = DATASET in VARIABLE_LENGTH_DATASETS
# + [markdown] papermill={"duration": 0.00357, "end_time": "2022-06-04T11:41:34.312182", "exception": false, "start_time": "2022-06-04T11:41:34.308612", "status": "completed"} tags=[]
# ## Choose the dataset
# + papermill={"duration": 1.640855, "end_time": "2022-06-04T11:41:35.956613", "exception": true, "start_time": "2022-06-04T11:41:34.315758", "status": "failed"} tags=[]
if not is_downloaded(DATASET):
download_dataset(DATASET, local_repo=True)
# + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[]
info(DATASET, local_repo=True)
# + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[]
train_dset = get_dataset(DATASET, 'train')
# + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[]
NUM_CLASSES = len(set([train_dset[i][1] for i in range(len(train_dset))]))
NUM_CLASSES
# + [markdown] papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[]
# ## Tokenizer and vocab
# + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[]
tokenizer = get_tokenizer(LetterTokenizer())
vocabulary = build_vocab(train_dset, tokenizer, use_padding=USE_PADDING)
print("vocab len:" ,vocabulary.__len__())
print(vocabulary.get_stoi())
# + [markdown] papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[]
# ## Dataloader and batch preparation
# + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[]
# Run on GPU or CPU
device = 'cuda' if torch.cuda.is_available() else 'cpu'
print('Using {} device'.format(device))
max_seq_len, nn_input_len = check_seq_lengths(dataset=train_dset, use_padding=USE_PADDING)
# Data Loader
if(USE_PADDING):
collate = coll_factory(vocabulary, tokenizer, device, pad_to_length = nn_input_len)
else:
collate = coll_factory(vocabulary, tokenizer, device, pad_to_length = None)
train_loader = DataLoader(train_dset, batch_size=BATCH_SIZE, shuffle=True, collate_fn=collate)
# + [markdown] papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[]
# ## Model
# + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[]
model = CNN(
number_of_classes=NUM_CLASSES,
vocab_size=vocabulary.__len__(),
embedding_dim=100,
input_len=nn_input_len,
device=device
).to(device)
# + [markdown] papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[]
# ## Training
# + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[]
model.fit(train_loader, epochs=EPOCHS)
# + [markdown] papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[]
# ## Testing
# + papermill={"duration": null, "end_time": null, "exception": null, "start_time": null, "status": "pending"} tags=[]
test_dset = get_dataset(DATASET, 'test')
test_loader = DataLoader(test_dset, batch_size=BATCH_SIZE, shuffle=True, collate_fn=collate)
acc, f1 = model.test(test_loader)
acc, f1
|
experiments/torch_cnn_experiments/demo_human_or_worm.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Importing VTK data to be displayed as Mesh
#
# Additional requirements for this example: `vtk`
#
# We will also see how to do some processing with `vtk` and visualize its results.
# +
import k3d
import os
import vtk
from k3d.helpers import download
filename = download('https://raw.githubusercontent.com/naucoin/VTKData/master/Data/cow.vtp')
model_matrix = (
1.0, 0.0, 0.0, 0.0,
0.0, 0.0, 1.0, 0.0,
0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 0.0, 1.0
)
reader = vtk.vtkXMLPolyDataReader()
reader.SetFileName(filename)
reader.Update()
plot = k3d.plot()
cow3d = k3d.vtk_poly_data(reader.GetOutput(), color=0xff0000, model_matrix=model_matrix)
plot += cow3d
plot.display()
# -
cow3d.wireframe = True
cow3d.wireframe = False
# +
plane=vtk.vtkPlane()
plane.SetOrigin(0,0,0)
plane.SetNormal(1,1,0)
cutter=vtk.vtkCutter()
cutter.SetCutFunction(plane)
cutter.SetInputConnection(reader.GetOutputPort())
cutter.Update()
FeatureEdges = vtk.vtkFeatureEdges()
FeatureEdges.SetInputConnection(cutter.GetOutputPort())
FeatureEdges.BoundaryEdgesOn()
FeatureEdges.FeatureEdgesOff()
FeatureEdges.NonManifoldEdgesOff()
FeatureEdges.ManifoldEdgesOff()
FeatureEdges.Update()
cutStrips = vtk.vtkStripper() ; #Forms loops (closed polylines) from cutter
cutStrips.SetInputConnection(cutter.GetOutputPort())
cutStrips.Update()
cutPoly = vtk.vtkPolyData() ; #This trick defines polygons as polyline loop
cutPoly.SetPoints((cutStrips.GetOutput()).GetPoints())
cutPoly.SetPolys((cutStrips.GetOutput()).GetLines())
# -
model_matrix = (
1.0, 0.0, 0.0, 0.0,
0.0, 0.0, 1.0, -5.0,
0.0, 1.0, 0.0, 0.0,
0.0, 0.0, 0.0, 1.0
)
plot += k3d.vtk_poly_data(cutPoly, color=0x0000ff, model_matrix=model_matrix)
|
examples/vtk.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# (bcg_incomplete_final)=
# ```{raw} html
# <div id="qe-notebook-header" align="right" style="text-align:right;">
# <a href="https://quantecon.org/" title="quantecon.org">
# <img style="width:250px;display:inline;" width="250px" src="https://assets.quantecon.org/img/qe-menubar-logo.svg" alt="QuantEcon">
# </a>
# </div>
# ```
#
# # Equilibrium Capital Structures with Incomplete Markets
#
# ```{contents} Contents
# :depth: 2
# ```
#
# In addition to what's in Anaconda, this lecture will need the following libraries:
# + tags=["hide-output"]
# !pip install --upgrade quantecon
# !pip install interpolation
# !conda install -y -c plotly plotly plotly-orca
# -
# ## Introduction
#
# This is an extension of an earlier lecture {doc}`BCG_complete_mkts <BCG_complete_mkts>` about a **complete markets**
# model.
#
# In contrast to that lecture, this lecture describes an instance of a model authored by Bisin, Clementi, and Gottardi {cite}`BCG_2018`
# in which financial markets are **incomplete**.
#
# Instead of being able to trade equities and a full set of one-period
# Arrow securities as they can in the earlier lecture {doc}`BCG_complete_mkts <BCG_complete_mkts>`, now consumers and firms trade only equity and a bond.
#
# It is useful to watch how outcomes differ as consequences of trading opportunities differing in the two settings.
#
# In the complete markets economy in {doc}`BCG_complete_mkts <BCG_complete_mkts>`
#
# {cite}`Modigliani_Miller_1958`- there is a unique stochastic discount factor that prices all assets
# - consumers’ portfolio choices are indeterminate
# - firms' financial structures are indeterminate, so the model embodies an instance of a Modigliani-Miller theorem
# - the aggregate of all firms' financial structures are indeterminate, a consequence of there being redundant assets
#
# In the incomplete markets economy studied here
#
# {cite}`Modigliani_Miller_1958`- there is a not a unique equilibrium stochastic discount factor
# - different stochastic discount factors price different assets
# - consumers’ portfolio choices are determinate
# - while **individual** firms' financial structures are indeterminate, thus conforming to an aspect of a Modigliani-Miller theorem,
# , the **aggregate** of all firms' financial structures **is** determinate.
#
# A `Big K, little k` analysis played an important role in the previous lecture {doc}`BCG_complete_mkts <BCG_complete_mkts>`.
#
# A more subtle version of a `Big K, little k` will be featured in our presentation and analysis of BCG incomplete markets environment.
#
# We use it to convey the heart of what BCG call a **rational conjectures** equilibrium in which the `rational conjectures` are about
# equilibrium pricing functions in regions of the state space that an average consumer or firm does not choose to inhabit in equilibrium.
#
# ### Setup
#
# We adopt specifications of preferences and technologies used by Bisin,
# Clemente, and Gottardi (2018) {cite}`BCG_2018` and in our earlier lecture on a complete markets
# version of their model.
#
# The economy lasts for two periods, $t=0, 1$.
#
# There are two types of consumers named $i=1,2$.
#
# A scalar random variable $\epsilon$ affects both
#
# - a representative firm’s physical return $f(k)e^\epsilon$ in
# period $1$ from investing $k \geq 0$ in capital in period
# $0$.
# - period $1$ endowments $w_1^i(\epsilon)$ of the
# consumption good for agents $i =1$ and $i=2$.
#
# ### Shock distribution:
#
# Sometimes we assume that
#
# $$
# \epsilon \sim g(\epsilon) = {\mathcal N}(0,1)
# $$
#
# Other times, for compuatational purposes in our Python code, we’ll
# instead assume that $g(\cdot)$ is a probability mass function that
# serves as a discrete approximation to a standardized normal density.
#
# ### Preferences:
#
# A consumer of type $i$ orders period $0$ consumption
# $c_0^i$ and state $\epsilon$-period $1$ consumption
# $c^i(\epsilon)$ by
#
# $$
# u^i = u(c_0^i) + \beta \int u(c_1^i(\epsilon)) g (\epsilon) d \epsilon, \quad i = 1,2
# $$
#
# $\beta \in (0,1)$ and the one-period utility function is
#
# $$
# u(c) = \begin{cases}
# \frac{c^{1 -\gamma}} { 1 - \gamma} & \textrm{if } \gamma \neq 1 \\
# \log c & \textrm{if } \gamma = 1
# \end{cases}
# $$
#
# ### Ownership
#
# A consumer of type $i$ is endowed with $w_0^i$ units of the
# time $0$ good and $w_1^i(\epsilon)$ of the time $1$
# consumption good when the random variable takes value $\epsilon$.
#
# At the start of period $0$, a consumer of type $i$ also owns
# $\theta^i_0$ shares of a representative firm.
#
# ### Risk-sharing motives
#
# The two types of agents’ period $1$ endowments are correlated with
# the physical return on capital in different ways.
#
# Endowment differences give agents incentives to trade risks that in the
# complete market version of the model showed up in their demands for
# equity and their demands and supplies of one-period Arrow securities.
#
# In the incomplete-markets setting under study here, these differences
# show up in differences in the two types of consumers’ demands for a
# typical firm’s bonds and equity, the only two assets that agents can now
# trade.
#
# ### Aggregate Endowments
#
# Economy-wide endowments in periods $0$ and $1$ are
#
# $$
# \begin{aligned}
# w_0 & = w_0^1 + w_0^2 \cr
# w_1(\epsilon) & = w_1^1(\epsilon) + w_1^2(\epsilon) \textrm{ in state }\epsilon
# \end{aligned}
# $$
#
# ### Feasibility:
#
# Where $\alpha \in (0,1)$ and $A >0$
#
# $$
# \begin{aligned}
# c_0^1 + c_0^2 & = w_0^1 + w_0^2 - k \cr
# c_1^1(\epsilon) + c_1^2(\epsilon) & = w_1^1(\epsilon) + w_1^2(\epsilon) + e^\epsilon f(k), \quad k \geq 0
# \end{aligned}
# $$
#
# where $f(k) = A k^\alpha, A >0, \alpha \in (0,1)$.
#
# ### Measures of agents and firms
#
# As in the companion lecture {doc}`BCG_complete_mkts <BCG_complete_mkts>` that studies a complete markets version of
# the model, we follow BCG in assuming that there are equal unit measures of
#
# - consumers of type $i=1$
# - consumers of type $i=2$
# - firms with access to the production technology that converts
# $k$ units of time $0$ good into
# $A k^\alpha e^\epsilon$ units of the time $1$ good in
# random state $\epsilon$
#
# Thus, let $\omega \in [0,1]$ index a particular consumer of type
# $i$.
#
# Then define Big $C^i$ as
#
# $$
# C^i = \int_0^1 c^i(\omega) d \, \omega
# $$
#
# with components
#
# $$
# \begin{aligned}
# C^i_0 & = \int_0^1 c^i_0(\omega) d \, \omega \cr
# C^i_1(\epsilon) & = \int_0^1 c^i_1(\epsilon;\omega) d \, \omega
# \end{aligned}
# $$
#
# In the same spirit, let $\zeta \in [0,1]$ index a particular firm
# and let firm $\zeta$ purchase $k(\zeta)$ units of capital
# and issue $b(\zeta)$ bonds.
#
# Then define Big $K$ and Big $B$ as
#
# $$
# K = \int_0^1 k(\zeta) d \, \zeta, \quad B = \int_0^1 b(\zeta) d \, \zeta
# $$
#
# The assumption that there are equal measures of our three types of
# agents plays an important role making each individual agent into a
# powerless **price taker**:
#
# - an individual consumer chooses its own (infinitesimal) part
# $c^i(\omega)$ of $C^i$ taking prices as given
# - an individual firm chooses its own (infinitesmimal) part
# $k(\zeta)$ of $K$ and $b(\zeta)$ of $B$
# taking pricing functions as given
# - However, equilibrium prices depend on the `Big K, Big B, Big C`
# objects $K$, $B$, and $C$
#
# The assumption about measures of agents is a powerful device for making
# a host of competitive agents take as given the equilibrium prices that
# turn out to be determined by the decisions of hosts of agents just like
# them.
#
# We call an equilibrium **symmetric** if
#
# - all type $i$ consumers choose the same consumption profiles so
# that $c^i(\omega) = C^i$ for all $\omega \in [0,1]$
# - all firms choose the same levels of $k$ and $b$ so that
# $k(\zeta) = K$, $b(\zeta) = B$ for all
# $\zeta \in [0,1]$
#
# In this lecture, we restrict ourselves to describing symmetric
# equilibria.
#
# ## Asset Markets
#
# Markets are incomplete: only equities and bonds issued by representative
# firms can be traded.
#
# Let $\theta^i$ and $\xi^i$ be a consumer of type
# $i$’s post-trade holdings of equity and bonds, respectively.
#
# The firm issues bonds promising to pay $b$ units of consumption at
# time $t=1$ and purchases $k$ units of physical capital at
# time $t=0$.
#
# When $e^\epsilon A k^\alpha < b$, the firm defaults and output is
# divided equally among bondholders.
#
# Evidently, when the productivity shock
# $\epsilon < \epsilon^* = \log \left(\frac{b}{ Ak^\alpha}\right)$,
# the firm defaults on its debt
#
# Payoffs to equity and debt at date 1 as functions of the productivity
# shock $\epsilon$ are
#
# $$
# \begin{aligned}
# d^e(k,b;\epsilon) &= \max \left\{ e^\epsilon A k^\alpha - b, 0 \right\} \\
# d^b(k,b;\epsilon) &= \min \left\{ \frac{e^\epsilon A k^\alpha}{b}, 1 \right\}
# \end{aligned}
# $$
#
# The firm faces a bond price function $p(k,b)$ when it issues
# $b$ bonds and purchases $k$ units of physical capital.
#
# The firm regards the bond price function $p(k,b)$ as exogenous in
# the sense that it is not affected by its own choices of $k$ and
# $b$.
#
# The firm’s equity is worth $q(k,b)$ when it issues $b$ bonds
# and purchases $k$ units of physical capital.
#
# A firm regards the equity-pricing function $q(k,b)$ and the bond
# pricing function $p(k,b)$ as exogenous in the sense that they are
# not affected by its choices of $k$ and $b$.
#
# Consumers face equilibrium prices $\check q$ and $\check p$
# for bonds and equities, where $\check q$ and $\check p$ are
# both scalars.
#
# ### Consumers
#
# Each consumer of type $i$ is endowed with $w_0^i$ of the
# time $0$ consumption good, $w_1^i(\epsilon)$ of the time
# $1$, state $\epsilon$ consumption good, and owns a fraction
# $\theta^i_0 \in (0,1)$ of the initial value of a representative
# firm, where $\theta^1_0 + \theta^2_0 = 1$.
#
# The initial value of a representative firm is $V$ (an object to be
# determined in a rational expectations equilibrium).
#
# Household $i$ buys $\theta^i$ shares of equity and bonds
# worth $\check p \xi^i$ where $\check p$ is the bond price.
#
# Being a price-taker, the household takes $V$,
# $\check q = q(K,B)$, $\check p = p(K,B)$, and $K, B$
# as given.
#
# Household $i$’s optimization problem is
#
# $$
# \begin{aligned}
# \max_{c^i_0,\theta^i,\xi^i,c^i_1(\epsilon)} & u(c^i_0) + \beta \int u(c^i(\epsilon)) g(\epsilon) \ d\epsilon \\
# \text{subject to } \quad
# & c^i_0 = w^i_0 + \theta^i_0V - \check q\theta^i - \check p \xi^i, \\
# & c^i_1(\epsilon) = w^i_1(\epsilon) + \theta^i d^e(K,B;\epsilon) + \xi^i d^b(K,B;\epsilon) \ \forall \ \epsilon, \\
# & \theta^i \geq 0, \xi^i \geq 0.
# \end{aligned}
# $$
#
# The last two inequalities impose that the household cannot short sell
# equity or bonds.
#
# Form household $i$’s Lagrangian:
#
# $$
# \begin{aligned}
# L^i := & u(c^i_0) + \beta \int u(c^i(\epsilon)) g(\epsilon) \ d\epsilon \\
# & +\lambda^i_0 [w^i_0 + \theta_0V - \check q\theta^i - \check p \xi^i - c^i_0] \\
# & + \beta \int \lambda^i_1(\epsilon) \left[ w^i_1(\epsilon) + \theta^i d^e(K,B;\epsilon) + \xi^i d^b(K,B;\epsilon) - c^i_1(\epsilon) \right] g(\epsilon) \ d\epsilon
# \end{aligned}
# $$
#
# Household $i$’s first-order necessary conditions for an optimum
# include:
#
# $$
# \begin{aligned}
# c^i_0:& \quad u^\prime(c^i_0) = \lambda^i_0 \\
# c^i_1(\epsilon):& \quad u^\prime(c^i_1(\epsilon)) = \lambda^i_1(\epsilon) \\
# \theta^i:& \quad \beta \int \lambda^i_1(\epsilon) d^e(K,B;\epsilon) g(\epsilon) \ d\epsilon \leq \lambda^i_0 \check q \quad (= \ \ \text{if} \ \ \theta^i>0) \\
# \xi^i:& \quad \beta \int \lambda^i_1(\epsilon) d^b(K,B;\epsilon) g(\epsilon) \ d\epsilon \leq \lambda^i_0 \check p \quad (= \ \ \text{if} \ \ b^i>0) \\
# \end{aligned}
# $$
#
# We can combine and rearrange household $i$’s first-order
# conditions to become:
#
# $$
# \begin{aligned}
# \check q \geq \beta \int \frac{u^\prime(c^i_1(\epsilon))}{u^\prime(c^i_0)} d^e(K,B;\epsilon) g(\epsilon) \ d\epsilon \quad (= \ \ \text{if} \ \ \theta^i>0) \\
# \check p \geq \beta \int \frac{u^\prime(c^i_1(\epsilon))}{u^\prime(c^i_0)} d^b(K,B;\epsilon) g(\epsilon) \ d\epsilon \quad (= \ \ \text{if} \ \ b^i>0)\\
# \end{aligned}
# $$
#
# These inequalities imply that equilibrium consumption allocations and
# prices in a symmetric rational expectations equilibrium will satisfy
#
# $$
# \begin{aligned}
# \check q = \max_i \beta \int \frac{u^\prime(c^i_1(\epsilon))}{u^\prime(c^i_0)} d^e(K,B;\epsilon) g(\epsilon) \ d\epsilon \\
# \check p = \max_i \beta \int \frac{u^\prime(c^i_1(\epsilon))}{u^\prime(c^i_0)} d^b(K,B;\epsilon) g(\epsilon) \ d\epsilon \\
# \end{aligned}
# $$
#
# ### Firm
#
# When individual firms solve their optimization problems, they take big
# $C^i$’s as fixed objects that they don’t influence.
#
# A representative firm faces a price function $q(k,b)$ for its
# equity and a price function $p(k, b)$ per unit of bonds that
# satisfy
#
# $$
# \begin{aligned}
# q(k,b) = \max_i \beta \int \frac{u^\prime(C^i_1(\epsilon))}{u^\prime(C^i_0)} d^e(k,b;\epsilon) g(\epsilon) \ d\epsilon \\
# p(k,b) = \max_i \beta \int \frac{u^\prime(C^i_1(\epsilon))}{u^\prime(C^i_0)} d^b(k,b;\epsilon) g(\epsilon) \ d\epsilon \\
# \end{aligned}
# $$
#
# Notice the appearance of big $C^i$’s on the right sides of these
# two equations that define equilibrium pricing functions.
#
# The two price functions are satisfied not only for equilibrium choices
# $K, B$ of capital $k$ and debt $b$, but also for any
# **out-of-equilibrium** pairs $(k, b) \neq (K, B)$.
#
# The firm is assumed to know both price functions.
#
# The package of assumptions just described is sometimes called the
# assumption of **rational conjectures** (about the price functions). BCG
# give credit to Makowski for emphasizing and clarifying how this approach
# is an element of rational expectations models.
#
# The firm chooses capital $k$ and debt $b$ to maximize its
# market value:
#
# $$
# V \equiv \max_{k,b} -k + q(k,b) + p(k,b) b
# $$
#
# **Note:** It will turn out that in equilibrium consumers of both types
# *want* the firm to maximize its value.
#
# In the special cases studied here
#
# - consumers of types $i=1,2$ both hold equity
# - only consumers of type $i=2$ hold debt; holds of type
# $i=1$ hold none.
#
# These outcomes occur because we follow BCG and set parameters so that a
# type 2 consumer’s stochastic endowment of the consumption good in period
# $1$ is more correlated with the firm’s output than is a type 1
# consumer’s.
#
# This gives consumers of type $2$ a motive to hedge second period
# endowment risk by holding some bonds (these consumers also choose to
# hold some equity).
#
# These outcomes mean that in our setting the pricing functions end up
# satisfying
#
# $$
# \begin{aligned}
# q(k,b) &= \beta \int \frac{u^\prime(C^1_1(\epsilon))}{u^\prime(C^1_0)} d^e(k,b;\epsilon) g(\epsilon) \ d\epsilon = \beta \int \frac{u^\prime(C^2_1(\epsilon))}{u^\prime(C^2_0)} d^e(k,b;\epsilon) g(\epsilon) \ d\epsilon \\
# p(k,b) &= \beta \int \frac{u^\prime(C^2_1(\epsilon))}{u^\prime(C^2_0)} d^b(k,b;\epsilon) g(\epsilon) \ d\epsilon \\
# \end{aligned}
# $$
#
# Recall that
# $\epsilon^*(k,b) \equiv \log\left(\frac{b}{Ak^\alpha}\right)$ is a
# firm’s default threshold.
#
# We can rewrite the pricing functions as:
#
# $$
# \begin{aligned}
# q(k,b) &= \beta \int_{\epsilon^*}^\infty \frac{u^\prime(C^i_1(\epsilon))}{u^\prime(C^i_0)} \left( e^\epsilon Ak^\alpha - b \right) g(\epsilon) \ d\epsilon, \quad i=1,2\\
# p(k,b) &= \beta \int^{\epsilon^*}_{-\infty} \frac{u^\prime(C^2_1(\epsilon))}{u^\prime(C^2_0)} \left( \frac{e^\epsilon Ak^\alpha}{b} \right) g(\epsilon) \ d\epsilon + \beta \int_{\epsilon^*}^{\infty} \frac{u^\prime(C^2_1(\epsilon))}{u^\prime(C^2_0)} g(\epsilon) \ d\epsilon \\
# \end{aligned}
# $$
#
# #### Firm’s optimization problem
#
# The firm’s optimization problem is
#
# $$
# V \equiv \max_{k,b} \left\{ -k + q(k,b) + p(k, b) b \right\}
# $$
#
# The firm’s first-order necessary conditions with respect to $k$
# and $b$, respectively, are
#
# $$
# \begin{aligned}
# k: \quad & -1 + \frac{\partial q(k,b)}{\partial k} + b \frac{\partial p(q,b)}{\partial k} = 0 \cr
# b: \quad & \frac{\partial q(k,b)}{\partial b} + p(k,b) + b \frac{\partial p(k,b)}{\partial b} = 0
# \end{aligned}
# $$
#
# To proceed, we use the Leibniz integral rule several times to arrive at
# the following derivatives:
#
# $$
# \frac{\partial q(k,b)}{\partial k} = \beta \alpha A k^{\alpha-1} \int_{\epsilon^*}^\infty \frac{u'(C_1^i(\epsilon))}{u'(C_0^i)}
# e^\epsilon g(\epsilon) d \epsilon, \quad i=1,2
# $$
#
# $$
# \frac{\partial q(k,b)}{\partial b} = -\beta \int_{\epsilon^*}^\infty \frac{u'(C_1^i(\epsilon))}{u'(C_0^i)} g(\epsilon) d \epsilon, \quad i=1,2
# $$
#
# $$
# \frac{\partial p(k,b)}{\partial k} = \beta \alpha \frac{A k^{\alpha -1}}{b} \int_{-\infty}^{\epsilon^*} \frac{u'(C_1^2(\epsilon))}{u'(C_0^2)} g(\epsilon) d \epsilon
# $$
#
# $$
# \frac{\partial p(k,b)}{\partial b} = - \beta \frac{A k^\alpha}{b^2} \int_{-\infty}^{\epsilon^*} \frac{u'(C_1^2(\epsilon))}{u'(C_0^2)} e^\epsilon g(\epsilon) d \epsilon
# $$
#
# **Special case:** We confine ourselves to a special case in which both
# agents hold positive equities so that
# $\frac{\partial q(k,b)}{\partial k}$ and
# $\frac{\partial q(k,b)}{\partial b}$ are related to rates of
# intertemporal substitution for both agents.
#
# Substituting these partial derivatives into the above first-order
# conditions for $k$ and $B$, respectively, we obtain the
# following forms of those first order conditions:
#
# ```{math}
# :label: Eqn1
#
# k: \quad -1 + \beta \alpha A k^{\alpha -1} \int_{-\infty}^\infty \frac{u'(C_1^2(\epsilon))}{u'(C_0^2)} e^\epsilon g(\epsilon) d \epsilon = 0
# ```
#
# ```{math}
# :label: Eqn2
#
# b: \quad
# \int_{\epsilon^*}^\infty \left( \frac{u^\prime(C^1_1(\epsilon))}{u^\prime(C^1_0)} \right) g(\epsilon) \ d\epsilon = \int_{\epsilon^*}^\infty \left( \frac{u^\prime(C^2_1(\epsilon))}{u^\prime(C^2_0)} \right) g(\epsilon) \ d\epsilon
# ```
#
# where again recall that
# $\epsilon^*(k,b) \equiv \log\left(\frac{b}{Ak^\alpha}\right)$.
#
# Taking $C_0^i, C_1^i(\epsilon)$ as given, these are two equations
# that we want to solve for the firm’s optimal decisions $k, b$.
#
# ## Equilibrium verification
#
# On page 5 of BCG (2018), the authors say
#
# *If the price conjectures corresponding to the plan chosen by firms in
# equilibrium are correct, that is equal to the market prices* $\check q$ *and* $\check p$, *it is immediate to verify that
# the rationality of the conjecture coincides with the agents’ Euler
# equations.*
#
# Here BCG are describing how they go about verifying that when they set
# little $k$, little $\xi$ from the firm’s first-order
# conditions equal to the big $K$, big $B$ at the big
# $C$’s that appear in the pricing functions, then
#
# - households’ Euler equations are satisfied if little $c$’s are
# equated to those Big C’s
# - firms’ first-order necessary conditions for $k, b$ are
# satisfied.
# - Therefore in equilibrium, $\check q = q(k,b)$ and
# $\check p = p(k,b)$.
#
# ## Pseudo Code
#
# Before displaying our Python code for solving a BCG incomplete markets,
# we’ll sketch some pseudo code that displays the logical flow of our
# Python code.
#
# Here goes:
#
# 1. Set upper and lower bounds for firm value as $V_h$ and
# $V_l$, for capital as $k_h$ and $k_l$, and for debt
# as $b_h$ and $b_l$.
# 1. Conjecture firm value $V = \frac{1}{2}(V_h + V_l)$
# 1. Conjecture debt level $b = \frac{1}{2}(b_h + b_l)$.
# 1. Conjecture capital $k = \frac{1}{2}(k_h + k_l)$.
# 1. Compute the default threshold
# $\epsilon^* \equiv \log\left(\frac{b}{Ak^\alpha}\right)$.
# 1. (In this step we abuse notation by freezing $V, k, b$ and in
# effect temporarily treating them as Big $K,B$ values. Thus, in
# this step 6 little k, b are frozen at guessed at value of K, B.)
# Fixing the values of $V$, $b$ and $k$, compute
# optimal choices of consumption $c^i$ with households’ FOCs.
# Assume only agent 2 holds debt: $\xi^2 = b$, and both agents
# hold equity: $0 <\theta^i < 1$ for $i=1,2$.
# 1. Set high and low bounds for equity holdings for agent 1 as
# $\theta^1_h$ and $\theta^1_l$. Guess
# $\theta^1 = \frac{1}{2}(\theta^1_h + \theta^1_l)$, and
# $\theta^2 = 1 - \theta^1$. While
# $|\theta^1_h - \theta^1_l|$ is large:
# 1. Compute agent 1’s valuation of the equity claim with a
# fixed-point iteration:
#
# $q_1 = \beta \int \frac{u^\prime(c^1_1(\epsilon))}{u^\prime(c^1_0)} d^e(k,b;\epsilon) g(\epsilon) \ d\epsilon$
#
# where
#
# $c^1_1(\epsilon) = w^1_1(\epsilon) + \theta^1 d^e(k,b;\epsilon)$
#
# and
#
# $c^1_0 = w^1_0 + \theta^1_0V - q_1\theta^1$
# 1. Compute agent 2’s valuation of the bond claim with a
# fixed-point iteration:
#
# $p = \beta \int \frac{u^\prime(c^2_1(\epsilon))}{u^\prime(c^2_0)} d^b(k,b;\epsilon) g(\epsilon) \ d\epsilon$
#
# where
#
# $c^2_1(\epsilon) = w^2_1(\epsilon) + \theta^2 d^e(k,b;\epsilon) + b$
#
# and
#
# $c^2_0 = w^2_0 + \theta^2_0 V - q_1 \theta^2 - pb$
# 1. Compute agent 2’s valuation of the equity claim with a
# fixed-point iteration:
#
# $q_2 = \beta \int \frac{u^\prime(c^2_1(\epsilon))}{u^\prime(c^2_0)} d^e(k,b;\epsilon) g(\epsilon) \ d\epsilon$
#
# where
#
# $c^2_1(\epsilon) = w^2_1(\epsilon) + \theta^2 d^e(k,b;\epsilon) + b$
#
# and
#
# $c^2_0 = w^2_0 + \theta^2_0 V - q_2 \theta^2 - pb$
# 1. If $q_1 > q_2$, Set $\theta_l = \theta^1$;
# otherwise, set $\theta_h = \theta^1$.
# 1. Repeat steps 6Aa through 6Ad until
# $|\theta^1_h - \theta^1_l|$ is small.
# 1. Set bond price as $p$ and equity price as
# $q = \max(q_1,q_2)$.
# 1. Compute optimal choices of consumption with prices:
#
# $$
# \begin{aligned}
# c^1_0 &= w^1_0 + \theta^1_0V - q\theta^1 \\
# c^2_0 &= w^2_0 + \theta^2_0V - q\theta^2 - pb \\
# c^1_1(\epsilon) &= w^1_1(\epsilon) + \theta^1 d^e(k,b;\epsilon) \\
# c^2_1(\epsilon) &= w^2_1(\epsilon) + \theta^2 d^e(k,b;\epsilon) + b
# \end{aligned}
# $$
#
# 1. (Here we confess to abusing notation again, but now in a different
# way. In step 7, we interpret frozen $c^i$s as Big
# $C^i$. We do this to solve the firm’s problem.) Fixing the
# values of $c^i_0$ and $c^i_1(\epsilon)$, compute optimal
# choices of capital $k$ and debt level $b$ using the
# firm’s FONCs.
# 1. Compute deviations from the firm’s FONC for capital $k$ as:
#
# $kfoc = \beta \alpha A k^{\alpha - 1} \left( \int \frac{u^\prime(c^2_1(\epsilon))}{u^\prime(c^2_0)} e^\epsilon g(\epsilon) \ d\epsilon \right) - 1$
# - If $kfoc > 0$, Set $k_l = k$; otherwise, set
# $k_h = k$.
# - Repeat steps 4 through 7A until $|k_h-k_l|$ is small.
# 1. Compute deviations from the firm’s FONC for debt level $b$
# as:
#
# $bfoc = \beta \left[ \int_{\epsilon^*}^\infty \left( \frac{u^\prime(c^1_1(\epsilon))}{u^\prime(c^1_0)} \right) g(\epsilon) \ d\epsilon - \int_{\epsilon^*}^\infty \left( \frac{u^\prime(c^2_1(\epsilon))}{u^\prime(c^2_0)} \right) g(\epsilon) \ d\epsilon \right]$
# - If $bfoc > 0$, Set $b_h = b$; otherwise, set
# $b_l = b$.
# - Repeat steps 3 through 7B until $|b_h-b_l|$ is small.
# 1. Given prices $q$ and $p$ from step 6, and the firm
# choices of $k$ and $b$ from step 7, compute the synthetic
# firm value:
#
# $V_x = -k + q + pb$
# - If $V_x > V$, then set $V_l = V$; otherwise, set
# $V_h = V$.
# - Repeat steps 1 through 8 until $|V_x - V|$ is small.
# 1. At the end, the algorithm returns the equilibrium capital
# $k^*$, debt $b^*$ and firm value $V^*$, as well as
# the following equilibrium values:
# - Equity holdings $\theta^{1,*} = \theta^1(k^*,b^*)$
# - Prices $q^*=q(k^*,b^*), \ p^*=p(k^*,b^*)$
# - Consumption plans
# $C^{1,*}_0 = c^1_0(k^*,b^*),\ C^{2,*}_0 = c^2_0(k^*,b^*), \ C^{1,*}_1(\epsilon) = c^1_1(k^*,b^*;\epsilon),\ C^{1,*}_1(\epsilon) = c^2_1(k^*,b^*;\epsilon)$.
#
# ## Code
#
# We create a Python class `BCG_incomplete_markets` to compute the
# equilibrium allocations of the incomplete market BCG model, given a set
# of parameter values.
#
# The class includes the following methods i.e., functions:
#
# - `solve_eq`: solves the BCG model and returns the equilibrium values
# of capital $k$, debt $b$ and firm value $V$, as
# well as
# - agent 1’s equity holdings $\theta^{1,*}$
# - prices $q^*, p^*$
# - consumption plans
# $C^{1,*}_0, C^{2,*}_0, C^{1,*}_1(\epsilon), C^{2,*}_1(\epsilon)$.
# - `eq_valuation`: inputs equilibrium consumpion plans $C^*$ and
# outputs the following valuations for each pair of $(k,b)$ in
# the grid:
# - the firm $V(k,b)$
# - the equity $q(k,b)$
# - the bond $p(k,b)$.
#
# Parameters include:
#
# - $\chi_1$, $\chi_2$: The correlation parameter for agent 1
# and 2. Default values are respectively 0 and 0.9.
# - $w^1_0$, $w^2_0$: The initial endowments. Default values
# are respectively 0.9 and 1.1.
# - $\theta^1_0$, $\theta^2_0$: The initial holding of the
# firm. Default values are 0.5.
# - $\psi$: The risk parameter. The default value is 3.
# - $\alpha$: The Production function parameter. The default value
# is 0.6.
# - $A$: The productivity of the firm. Default value is 2.5.
# - $\mu$, $\sigma$: The mean and standard deviation of the
# shock distribution. Default values are respectively -0.025 and 0.4
# - $\beta$: The discount factor. The default value is 0.96.
# - bound: The bound for truncated normal distribution. Default is 3.
import pandas as pd
import numpy as np
from scipy.stats import norm
from scipy.stats import truncnorm
from scipy.integrate import quad
from scipy.optimize import bisect
from numba import njit
from interpolation import interp
# +
class BCG_incomplete_markets:
# init method or constructor
def __init__(self,
𝜒1 = 0,
𝜒2 = 0.9,
w10 = 0.9,
w20 = 1.1,
𝜃10 = 0.5,
𝜃20 = 0.5,
𝜓1 = 3,
𝜓2 = 3,
𝛼 = 0.6,
A = 2.5,
𝜇 = -0.025,
𝜎 = 0.4,
𝛽 = 0.96,
bound = 3,
Vl = 0,
Vh = 0.5,
kbot = 0.01,
#ktop = (𝛼*A)**(1/(1-𝛼)),
ktop = 0.25,
bbot = 0.1,
btop = 0.8):
#=========== Setup ===========#
# Risk parameters
self.𝜒1 = 𝜒1
self.𝜒2 = 𝜒2
# Other parameters
self.𝜓1 = 𝜓1
self.𝜓2 = 𝜓2
self.𝛼 = 𝛼
self.A = A
self.𝜇 = 𝜇
self.𝜎 = 𝜎
self.𝛽 = 𝛽
self.bound = bound
# Bounds for firm value, capital, and debt
self.Vl = Vl
self.Vh = Vh
self.kbot = kbot
#self.kbot = (𝛼*A)**(1/(1-𝛼))
self.ktop = ktop
self.bbot = bbot
self.btop = btop
# Utility
self.u = njit(lambda c: (c**(1-𝜓)) / (1-𝜓))
# Initial endowments
self.w10 = w10
self.w20 = w20
self.w0 = w10 + w20
# Initial holdings
self.𝜃10 = 𝜃10
self.𝜃20 = 𝜃20
# Endowments at t=1
self.w11 = njit(lambda 𝜖: np.exp(-𝜒1*𝜇 - 0.5*(𝜒1**2)*(𝜎**2) + 𝜒1*𝜖))
self.w21 = njit(lambda 𝜖: np.exp(-𝜒2*𝜇 - 0.5*(𝜒2**2)*(𝜎**2) + 𝜒2*𝜖))
self.w1 = njit(lambda 𝜖: self.w11(𝜖) + self.w21(𝜖))
# Truncated normal
ta, tb = (-bound - 𝜇) / 𝜎, (bound - 𝜇) / 𝜎
rv = truncnorm(ta, tb, loc=𝜇, scale=𝜎)
𝜖_range = np.linspace(ta, tb, 1000000)
pdf_range = rv.pdf(𝜖_range)
self.g = njit(lambda 𝜖: interp(𝜖_range, pdf_range, 𝜖))
#*************************************************************
# Function: Solve for equilibrium of the BCG model
#*************************************************************
def solve_eq(self, print_crit=True):
# Load parameters
𝜓1 = self.𝜓1
𝜓2 = self.𝜓2
𝛼 = self.𝛼
A = self.A
𝛽 = self.𝛽
bound = self.bound
Vl = self.Vl
Vh = self.Vh
kbot = self.kbot
ktop = self.ktop
bbot = self.bbot
btop = self.btop
w10 = self.w10
w20 = self.w20
𝜃10 = self.𝜃10
𝜃20 = self.𝜃20
w11 = self.w11
w21 = self.w21
g = self.g
# We need to find a fixed point on the value of the firm
V_crit = 1
Y = njit(lambda 𝜖, fk: np.exp(𝜖)*fk)
intqq1 = njit(lambda 𝜖, fk, 𝜃1, 𝜓1, b: (w11(𝜖) + 𝜃1*(Y(𝜖, fk) - b))**(-𝜓1)*(Y(𝜖, fk) - b)*g(𝜖))
intp1 = njit(lambda 𝜖, fk, 𝜓2, b: (Y(𝜖, fk)/b)*(w21(𝜖) + Y(𝜖, fk))**(-𝜓2)*g(𝜖))
intp2 = njit(lambda 𝜖, fk, 𝜃2, 𝜓2, b: (w21(𝜖) + 𝜃2*(Y(𝜖, fk)-b) + b)**(-𝜓2)*g(𝜖))
intqq2 = njit(lambda 𝜖, fk, 𝜃2, 𝜓2, b: (w21(𝜖) + 𝜃2*(Y(𝜖, fk)-b) + b)**(-𝜓2)*(Y(𝜖, fk) - b)*g(𝜖))
intk1 = njit(lambda 𝜖, fk, 𝜓2: (w21(𝜖) + Y(𝜖, fk))**(-𝜓2)*np.exp(𝜖)*g(𝜖))
intk2 = njit(lambda 𝜖, fk, 𝜃2, 𝜓2, b: (w21(𝜖) + 𝜃2*(Y(𝜖, fk)-b) + b)**(-𝜓2)*np.exp(𝜖)*g(𝜖))
intB1 = njit(lambda 𝜖, fk, 𝜃1, 𝜓1, b: (w11(𝜖) + 𝜃1*(Y(𝜖, fk) - b))**(-𝜓1)*g(𝜖))
intB2 = njit(lambda 𝜖, fk, 𝜃2, 𝜓2, b: (w21(𝜖) + 𝜃2*(Y(𝜖, fk) - b) + b)**(-𝜓2)*g(𝜖))
while V_crit>1e-4:
# We begin by adding the guess for the value of the firm to endowment
V = (Vl+Vh)/2
ww10 = w10 + 𝜃10*V
ww20 = w20 + 𝜃20*V
# Figure out the optimal level of debt
bl = bbot
bh = btop
b_crit=1
while b_crit>1e-5:
# Setting the conjecture for debt
b = (bl+bh)/2
# Figure out the optimal level of capital
kl = kbot
kh = ktop
k_crit=1
while k_crit>1e-5:
# Setting the conjecture for capital
k = (kl+kh)/2
# Production
fk = A*(k**𝛼)
# Y = lambda 𝜖: np.exp(𝜖)*fk
# Compute integration threshold
epstar = np.log(b/fk)
#**************************************************************
# Compute the prices and allocations consistent with consumers'
# Euler equations
#**************************************************************
# We impose the following:
# Agent 1 buys equity
# Agent 2 buys equity and all debt
# Agents trade such that prices converge
#========
# Agent 1
#========
# Holdings
𝜉1 = 0
𝜃1a = 0.3
𝜃1b = 1
while abs(𝜃1b - 𝜃1a) > 0.001:
𝜃1 = (𝜃1a + 𝜃1b) / 2
# qq1 is the equity price consistent with agent-1 Euler Equation
## Note: Price is in the date-0 budget constraint of the agent
## First, compute the constant term that is not influenced by q
## that is, 𝛽E[u'(c^{1}_{1})d^{e}(k,B)]
# intqq1 = lambda 𝜖: (w11(𝜖) + 𝜃1*(Y(𝜖, fk) - b))**(-𝜓1)*(Y(𝜖, fk) - b)*g(𝜖)
# const_qq1 = 𝛽 * quad(intqq1,epstar,bound)[0]
const_qq1 = 𝛽 * quad(intqq1,epstar,bound, args=(fk, 𝜃1, 𝜓1, b))[0]
## Second, iterate to get the equity price q
qq1l = 0
qq1h = ww10
diff = 1
while diff > 1e-7:
qq1 = (qq1l+qq1h)/2
rhs = const_qq1/((ww10-qq1*𝜃1)**(-𝜓1));
if (rhs > qq1):
qq1l = qq1
else:
qq1h = qq1
diff = abs(qq1l-qq1h)
#========
# Agent 2
#========
𝜉2 = b - 𝜉1
𝜃2 = 1 - 𝜃1
# p is the bond price consistent with agent-2 Euler Equation
## Note: Price is in the date-0 budget constraint of the agent
## First, compute the constant term that is not influenced by p
## that is, 𝛽E[u'(c^{2}_{1})d^{b}(k,B)]
# intp1 = lambda 𝜖: (Y(𝜖, fk)/b)*(w21(𝜖) + Y(𝜖, fk))**(-𝜓2)*g(𝜖)
# intp2 = lambda 𝜖: (w21(𝜖) + 𝜃2*(Y(𝜖, fk)-b) + b)**(-𝜓2)*g(𝜖)
# const_p = 𝛽 * (quad(intp1,-bound,epstar)[0] + quad(intp2,epstar,bound)[0])
const_p = 𝛽 * (quad(intp1,-bound,epstar, args=(fk, 𝜓2, b))[0]\
+ quad(intp2,epstar,bound, args=(fk, 𝜃2, 𝜓2, b))[0])
## iterate to get the bond price p
pl = 0
ph = ww20/b
diff = 1
while diff > 1e-7:
p = (pl+ph)/2
rhs = const_p/((ww20-qq1*𝜃2-p*b)**(-𝜓2))
if (rhs > p):
pl = p
else:
ph = p
diff = abs(pl-ph)
# qq2 is the equity price consistent with agent-2 Euler Equation
# intqq2 = lambda 𝜖: (w21(𝜖) + 𝜃2*(Y(𝜖, fk)-b) + b)**(-𝜓2)*(Y(𝜖, fk) - b)*g(𝜖)
const_qq2 = 𝛽 * quad(intqq2,epstar,bound, args=(fk, 𝜃2, 𝜓2, b))[0]
qq2l = 0
qq2h = ww20
diff = 1
while diff > 1e-7:
qq2 = (qq2l+qq2h)/2
rhs = const_qq2/((ww20-qq2*𝜃2-p*b)**(-𝜓2));
if (rhs > qq2):
qq2l = qq2
else:
qq2h = qq2
diff = abs(qq2l-qq2h)
# q be the maximum valuation for the equity among agents
## This will be the equity price based on Makowski's criterion
q = max(qq1,qq2)
#================
# Update holdings
#================
if qq1 > qq2:
𝜃1a = 𝜃1
else:
𝜃1b = 𝜃1
#================
# Get consumption
#================
c10 = ww10 - q*𝜃1
c11 = lambda 𝜖: w11(𝜖) + 𝜃1*max(Y(𝜖, fk)-b,0)
c20 = ww20 - q*(1-𝜃1) - p*b
c21 = lambda 𝜖: w21(𝜖) + (1-𝜃1)*max(Y(𝜖, fk)-b,0) + min(Y(𝜖, fk),b)
#*************************************************
# Compute the first order conditions for the firm
#*************************************************
#===========
# Equity FOC
#===========
# Only agent 2's IMRS is relevent
# intk1 = lambda 𝜖: (w21(𝜖) + Y(𝜖, fk))**(-𝜓2)*np.exp(𝜖)*g(𝜖)
# intk2 = lambda 𝜖: (w21(𝜖) + 𝜃2*(Y(𝜖, fk)-b) + b)**(-𝜓2)*np.exp(𝜖)*g(𝜖)
# kfoc_num = quad(intk1,-bound,epstar)[0] + quad(intk2,epstar,bound)[0]
kfoc_num = quad(intk1,-bound,epstar, args=(fk, 𝜓2))[0] + quad(intk2,epstar,bound, args=(fk, 𝜃2, 𝜓2, b))[0]
kfoc_denom = (ww20- q*𝜃2 - p*b)**(-𝜓2)
kfoc = 𝛽*𝛼*A*(k**(𝛼-1))*(kfoc_num/kfoc_denom) - 1
if (kfoc > 0):
kl = k
else:
kh = k
k_crit = abs(kh-kl)
if print_crit:
print("critical value of k: {:.5f}".format(k_crit))
#=========
# Bond FOC
#=========
# intB1 = lambda 𝜖: (w11(𝜖) + 𝜃1*(Y(𝜖, fk) - b))**(-𝜓1)*g(𝜖)
# intB2 = lambda 𝜖: (w21(𝜖) + 𝜃2*(Y(𝜖, fk) - b) + b)**(-𝜓2)*g(𝜖)
# bfoc1 = quad(intB1,epstar,bound)[0] / (ww10 - q*𝜃1)**(-𝜓1)
# bfoc2 = quad(intB2,epstar,bound)[0] / (ww20 - q*𝜃2 - p*b)**(-𝜓2)
bfoc1 = quad(intB1,epstar,bound, args=(fk, 𝜃1, 𝜓1, b))[0] / (ww10 - q*𝜃1)**(-𝜓1)
bfoc2 = quad(intB2,epstar,bound, args=(fk, 𝜃2, 𝜓2, b))[0] / (ww20 - q*𝜃2 - p*b)**(-𝜓2)
bfoc = bfoc1 - bfoc2
if (bfoc > 0):
bh = b
else:
bl = b
b_crit = abs(bh-bl)
if print_crit:
print("#=== critical value of b: {:.5f}".format(b_crit))
# Compute the value of the firm
value_x = -k + q + p*b
if (value_x > V):
Vl = V
else:
Vh = V
V_crit = abs(value_x-V)
if print_crit:
print("#====== critical value of V: {:.5f}".format(V_crit))
print('k,b,p,q,kfoc,bfoc,epstar,V,V_crit')
formattedList = ["%.3f" % member for member in [k,
b,
p,
q,
kfoc,
bfoc,
epstar,
V,
V_crit]]
print(formattedList)
#*********************************
# Equilibrium values
#*********************************
# Return the results
kss = k
bss = b
Vss = V
qss = q
pss = p
c10ss = c10
c11ss = c11
c20ss = c20
c21ss = c21
𝜃1ss = 𝜃1
# Print the results
print('finished')
# print('k,b,p,q,kfoc,bfoc,epstar,V,V_crit')
#formattedList = ["%.3f" % member for member in [kss,
# bss,
# pss,
# qss,
# kfoc,
# bfoc,
# epstar,
# Vss,
# V_crit]]
#print(formattedList)
return kss,bss,Vss,qss,pss,c10ss,c11ss,c20ss,c21ss,𝜃1ss
#*************************************************************
# Function: Equity and bond valuations by different agents
#*************************************************************
def valuations_by_agent(self,
c10, c11, c20, c21,
k, b):
# Load parameters
𝜓1 = self.𝜓1
𝜓2 = self.𝜓2
𝛼 = self.𝛼
A = self.A
𝛽 = self.𝛽
bound = self.bound
Vl = self.Vl
Vh = self.Vh
kbot = self.kbot
ktop = self.ktop
bbot = self.bbot
btop = self.btop
w10 = self.w10
w20 = self.w20
𝜃10 = self.𝜃10
𝜃20 = self.𝜃20
w11 = self.w11
w21 = self.w21
g = self.g
# Get functions for IMRS/state price density
IMRS1 = lambda 𝜖: 𝛽 * (c11(𝜖)/c10)**(-𝜓1)*g(𝜖)
IMRS2 = lambda 𝜖: 𝛽 * (c21(𝜖)/c20)**(-𝜓2)*g(𝜖)
# Production
fk = A*(k**𝛼)
Y = lambda 𝜖: np.exp(𝜖)*fk
# Compute integration threshold
epstar = np.log(b/fk)
# Compute equity valuation with agent 1's IMRS
intQ1 = lambda 𝜖: IMRS1(𝜖)*(Y(𝜖) - b)
Q1 = quad(intQ1, epstar, bound)[0]
# Compute bond valuation with agent 1's IMRS
intP1 = lambda 𝜖: IMRS1(𝜖)*Y(𝜖)/b
P1 = quad(intP1, -bound, epstar)[0] + quad(IMRS1, epstar, bound)[0]
# Compute equity valuation with agent 2's IMRS
intQ2 = lambda 𝜖: IMRS2(𝜖)*(Y(𝜖) - b)
Q2 = quad(intQ2, epstar, bound)[0]
# Compute bond valuation with agent 2's IMRS
intP2 = lambda 𝜖: IMRS2(𝜖)*Y(𝜖)/b
P2 = quad(intP2, -bound, epstar)[0] + quad(IMRS2, epstar, bound)[0]
return Q1,Q2,P1,P2
#*************************************************************
# Function: equilibrium valuations for firm, equity, bond
#*************************************************************
def eq_valuation(self, c10, c11, c20, c21, N=30):
# Load parameters
𝜓1 = self.𝜓1
𝜓2 = self.𝜓2
𝛼 = self.𝛼
A = self.A
𝛽 = self.𝛽
bound = self.bound
Vl = self.Vl
Vh = self.Vh
kbot = self.kbot
ktop = self.ktop
bbot = self.bbot
btop = self.btop
w10 = self.w10
w20 = self.w20
𝜃10 = self.𝜃10
𝜃20 = self.𝜃20
w11 = self.w11
w21 = self.w21
g = self.g
# Create grids
kgrid, bgrid = np.meshgrid(np.linspace(kbot,ktop,N),
np.linspace(bbot,btop,N))
Vgrid = np.zeros_like(kgrid)
Qgrid = np.zeros_like(kgrid)
Pgrid = np.zeros_like(kgrid)
# Loop: firm value
for i in range(N):
for j in range(N):
# Get capital and debt
k = kgrid[i,j]
b = bgrid[i,j]
# Valuations by each agent
Q1,Q2,P1,P2 = self.valuations_by_agent(c10,
c11,
c20,
c21,
k,
b)
# The prices will be the maximum of the valuations
Q = max(Q1,Q2)
P = max(P1,P2)
# Compute firm value
V = -k + Q + P*b
Vgrid[i,j] = V
Qgrid[i,j] = Q
Pgrid[i,j] = P
return kgrid, bgrid, Vgrid, Qgrid, Pgrid
# -
# ## Examples
#
# Below we show some examples computed using `BCG_incomplete markets`.
#
# ### First example
#
# In the first example, we set up an instance of the BCG incomplete
# markets model with default parameter values.
mdl = BCG_incomplete_markets()
kss,bss,Vss,qss,pss,c10ss,c11ss,c20ss,c21ss,𝜃1ss = mdl.solve_eq(print_crit=False)
print(-kss+qss+pss*bss)
print(Vss)
print(𝜃1ss)
# Python reports to us that the equilibrium firm value is $V=0.101$,
# with capital $k = 0.151$ and debt $b=0.484$.
#
# Let’s verify some things that have to be true if our algorithm has truly
# found an equilibrium.
#
# Thus, let’s see if the firm is actually maximizing its firm value given
# the equilibrium pricing function $q(k,b)$ forequity and
# $p(k,b)$ for the bond.
# +
kgrid, bgrid, Vgrid, Qgrid, Pgrid = mdl.eq_valuation(c10ss, c11ss, c20ss, c21ss,N=30)
print('Maximum valuation of the firm value in the (k,B) grid: {:.5f}'.format(Vgrid.max()))
print('Equilibrium firm value: {:.5f}'.format(Vss))
# -
# Up to the approximation involved in using a discrete grid, these numbers
# give us comfort that the firm does indeed seem to be maximizing its
# value at the top of the value hill on the $(k,b)$ plane that it
# faces.
#
# Below we will plot the firm’s value as a function of $k,b$.
#
# We’ll also plot the equilibrium price functions $q(k,b)$ and
# $p(k,b)$.
# +
from IPython.display import Image
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
import plotly.graph_objs as go
# Firm Valuation
fig = go.Figure(data=[go.Scatter3d(x=[kss],
y=[bss],
z=[Vss],
mode='markers',
marker=dict(size=3, color='red')),
go.Surface(x=kgrid,
y=bgrid,
z=Vgrid,
colorscale='Greens',opacity=0.6)])
fig.update_layout(scene = dict(
xaxis_title='x - Capital k',
yaxis_title='y - Debt b',
zaxis_title='z - Firm Value V',
aspectratio = dict(x=1,y=1,z=1)),
width=700,
height=700,
margin=dict(l=50, r=50, b=65, t=90))
fig.update_layout(scene_camera=dict(eye=dict(x=1.5, y=-1.5, z=2)))
fig.update_layout(title='Equilibrium firm valuation for the grid of (k,b)')
# Export to PNG file
Image(fig.to_image(format="png"))
# fig.show() will provide interactive plot when running
# code locally
# -
# #### A Modigliani-Miller theorem?
#
# The red dot in the above graph is **both** an equilibrium $(b,k)$
# chosen by a representative firm **and** the equilibrium $B, K$
# pair chosen by the aggregate of all firms. Thus, **in equilibrium** it
# is true that
#
# $$
# (b,k) = (B,K)
# $$
#
# But an individual firm named $\xi \in [0,1]$ neither knows nor
# cares whether it sets $(b(\zeta),k(\zeta)) = (B,K)$.
#
# Indeed the above graph has a ridge of $b(\xi)$’s that also
# maximize the firm’s value so long as it sets $k(\zeta) = K$.
#
# Here is is important that the measure of firms that deviate from setting
# $b$ at the red dot is very small – measure zero – so that
# $B$ remains at the red dot even while one firm $\zeta$
# deviates.
#
# So within this equilibrium, there is a severely qualified type of
# Modigliani-Miller theorem asserting that firm $\zeta$’s value is
# independent of how it mixes its financing between equity and bonds (so
# long as it is atypical of what other firms are doing).
#
# Thus, while an individual firm $\zeta$’s financial structure is
# indeterminate, the **market’s** financial structure is determinant and
# sits at the red dot in the above graph.
#
# This contrasts sharply with the *unqualified* Modigliani-Miller theorem
# descibed in the complete markets model in the lecture {doc}`BCG_complete_mkts <BCG_complete_mkts>` . There
# the **market’s** financial structure was indeterminate.
#
# These things bear some more thought and exploration.
#
# So we will do some calculations designed to ferret out a sense in which
# the equilibrium $(k,b) = (K,B)$ outcome at the red dot in the
# above graph is **stable**.
#
# IN particular, we’ll explore the consequences of some choices of
# $b=B$ that deviate from the red dot and ask whether firm
# $\zeta$ would want to remain at that $b$.
#
# In more detail, here is what we’ll do:
#
# 1. Obtain equilibrium values of capital and debt as $k^*$ and
# $b^*$, which is the red dot above.
# 1. Now fixed $k^*$, and let $b^{**} = b^* - e$ for some
# $e > 0$. Conjecture that big $K = K^*$ but big
# $B = b^{**}$.
# 1. Take $K$ and $B$ and compute IMRS as we did before.
# 1. Taking the **new** IMRS to the firm’s problem. Plot 3D surface for
# the valuations of the firm with this **new** IMRS.
# 1. Check if the value at $k^*$, $b^{**}$ is at the top of
# this new 3D surface.
# 1. Repeat the above analyses for $b^{**} = b^* + e$.
#
# To conduct the above procedures, we create a function `off_eq_check`
# thatinputs the BCG model instance parameters, equilibrium capital
# $k^*$ and debt $b^*$, and a perturbation of debt $e$.
#
# The function outputs the fixed point firm values $V^{**}$, prices
# $q^{**}$, $p^{**}$, and consumption choices $c^{**}$.
#
# Importantly, we relax the condition that only agent 2 holds bonds.
#
# Now **both** agents can hold bonds, i.e., $0\leq \xi^1 \leq B$ and
# $\xi^1 +\xi^2 = B$.
#
# That implies the consumers’ budget constraints are:
#
# $$
# \begin{aligned}
# c^1_0 &= w^1_0 + \theta^1_0V - q\theta^1 - p\xi^1 \\
# c^2_0 &= w^2_0 + \theta^2_0V - q\theta^2 - p\xi^2 \\
# c^1_1(\epsilon) &= w^1_1(\epsilon) + \theta^1 d^e(k,b;\epsilon) + \xi^1 \\
# c^2_1(\epsilon) &= w^2_1(\epsilon) + \theta^2 d^e(k,b;\epsilon) + \xi^2
# \end{aligned}
# $$
#
# The function also outputs agent 1’s bond holdings $\xi_1$.
def off_eq_check(mdl,kss,bss,e=0.1):
# Big K and big B
k = kss
b = bss + e
# Load parameters
𝜓1 = mdl.𝜓1
𝜓2 = mdl.𝜓2
𝛼 = mdl.𝛼
A = mdl.A
𝛽 = mdl.𝛽
bound = mdl.bound
Vl = mdl.Vl
Vh = mdl.Vh
kbot = mdl.kbot
ktop = mdl.ktop
bbot = mdl.bbot
btop = mdl.btop
w10 = mdl.w10
w20 = mdl.w20
𝜃10 = mdl.𝜃10
𝜃20 = mdl.𝜃20
w11 = mdl.w11
w21 = mdl.w21
g = mdl.g
Y = njit(lambda 𝜖, fk: np.exp(𝜖)*fk)
intqq1 = njit(lambda 𝜖, fk, 𝜃1, 𝜓1, 𝜉1, b: (w11(𝜖) + 𝜃1*(Y(𝜖, fk) - b) + 𝜉1)**(-𝜓1)*(Y(𝜖, fk) - b)*g(𝜖))
intpp1a = njit(lambda 𝜖, fk, 𝜓1, 𝜉1, b: (Y(𝜖, fk)/b)*(w11(𝜖) + Y(𝜖, fk)/b*𝜉1)**(-𝜓1)*g(𝜖))
intpp1b = njit(lambda 𝜖, fk, 𝜃1, 𝜓1, 𝜉1, b: (w11(𝜖) + 𝜃1*(Y(𝜖, fk)-b) + 𝜉1)**(-𝜓1)*g(𝜖))
intpp2a = njit(lambda 𝜖, fk, 𝜓2, 𝜉2, b: (Y(𝜖, fk)/b)*(w21(𝜖) + Y(𝜖, fk)/b*𝜉2)**(-𝜓2)*g(𝜖))
intpp2b = njit(lambda 𝜖, fk, 𝜃2, 𝜓2, 𝜉2, b: (w21(𝜖) + 𝜃2*(Y(𝜖, fk)-b) + 𝜉2)**(-𝜓2)*g(𝜖))
intqq2 = njit(lambda 𝜖, fk, 𝜃2, 𝜓2, b: (w21(𝜖) + 𝜃2*(Y(𝜖, fk)-b) + b)**(-𝜓2)*(Y(𝜖, fk) - b)*g(𝜖))
# Loop: Find fixed points V, q and p
V_crit = 1
while V_crit>1e-5:
# We begin by adding the guess for the value of the firm to endowment
V = (Vl+Vh)/2
ww10 = w10 + 𝜃10*V
ww20 = w20 + 𝜃20*V
# Production
fk = A*(k**𝛼)
# Y = lambda 𝜖: np.exp(𝜖)*fk
# Compute integration threshold
epstar = np.log(b/fk)
#**************************************************************
# Compute the prices and allocations consistent with consumers'
# Euler equations
#**************************************************************
# We impose the following:
# Agent 1 buys equity
# Agent 2 buys equity and all debt
# Agents trade such that prices converge
#========
# Agent 1
#========
# Holdings
𝜉1a = 0
𝜉1b = b/2
p = 0.3
while abs(𝜉1b - 𝜉1a) > 0.001:
𝜉1 = (𝜉1a + 𝜉1b) / 2
𝜃1a = 0.3
𝜃1b = 1
while abs(𝜃1b - 𝜃1a) > (0.001/b):
𝜃1 = (𝜃1a + 𝜃1b) / 2
# qq1 is the equity price consistent with agent-1 Euler Equation
## Note: Price is in the date-0 budget constraint of the agent
## First, compute the constant term that is not influenced by q
## that is, 𝛽E[u'(c^{1}_{1})d^{e}(k,B)]
# intqq1 = lambda 𝜖: (w11(𝜖) + 𝜃1*(Y(𝜖, fk) - b) + 𝜉1)**(-𝜓1)*(Y(𝜖, fk) - b)*g(𝜖)
# const_qq1 = 𝛽 * quad(intqq1,epstar,bound)[0]
const_qq1 = 𝛽 * quad(intqq1,epstar,bound, args=(fk, 𝜃1, 𝜓1, 𝜉1, b))[0]
## Second, iterate to get the equity price q
qq1l = 0
qq1h = ww10
diff = 1
while diff > 1e-7:
qq1 = (qq1l+qq1h)/2
rhs = const_qq1/((ww10-qq1*𝜃1-p*𝜉1)**(-𝜓1));
if (rhs > qq1):
qq1l = qq1
else:
qq1h = qq1
diff = abs(qq1l-qq1h)
# pp1 is the bond price consistent with agent-2 Euler Equation
## Note: Price is in the date-0 budget constraint of the agent
## First, compute the constant term that is not influenced by p
## that is, 𝛽E[u'(c^{1}_{1})d^{b}(k,B)]
# intpp1a = lambda 𝜖: (Y(𝜖, fk)/b)*(w11(𝜖) + Y(𝜖, fk)/b*𝜉1)**(-𝜓1)*g(𝜖)
# intpp1b = lambda 𝜖: (w11(𝜖) + 𝜃1*(Y(𝜖, fk)-b) + 𝜉1)**(-𝜓1)*g(𝜖)
# const_pp1 = 𝛽 * (quad(intpp1a,-bound,epstar)[0] + quad(intpp1b,epstar,bound)[0])
const_pp1 = 𝛽 * (quad(intpp1a,-bound,epstar, args=(fk, 𝜓1, 𝜉1, b))[0] \
+ quad(intpp1b,epstar,bound, args=(fk, 𝜃1, 𝜓1, 𝜉1, b))[0])
## iterate to get the bond price p
pp1l = 0
pp1h = ww10/b
diff = 1
while diff > 1e-7:
pp1 = (pp1l+pp1h)/2
rhs = const_pp1/((ww10-qq1*𝜃1-pp1*𝜉1)**(-𝜓1))
if (rhs > pp1):
pp1l = pp1
else:
pp1h = pp1
diff = abs(pp1l-pp1h)
#========
# Agent 2
#========
𝜉2 = b - 𝜉1
𝜃2 = 1 - 𝜃1
# pp2 is the bond price consistent with agent-2 Euler Equation
## Note: Price is in the date-0 budget constraint of the agent
## First, compute the constant term that is not influenced by p
## that is, 𝛽E[u'(c^{2}_{1})d^{b}(k,B)]
# intpp2a = lambda 𝜖: (Y(𝜖, fk)/b)*(w21(𝜖) + Y(𝜖, fk)/b*𝜉2)**(-𝜓2)*g(𝜖)
# intpp2b = lambda 𝜖: (w21(𝜖) + 𝜃2*(Y(𝜖, fk)-b) + 𝜉2)**(-𝜓2)*g(𝜖)
# const_pp2 = 𝛽 * (quad(intpp2a,-bound,epstar)[0] + quad(intpp2b,epstar,bound)[0])
const_pp2 = 𝛽 * (quad(intpp2a,-bound,epstar, args=(fk, 𝜓2, 𝜉2, b))[0] \
+ quad(intpp2b,epstar,bound, args=(fk, 𝜃2, 𝜓2, 𝜉2, b))[0])
## iterate to get the bond price p
pp2l = 0
pp2h = ww20/b
diff = 1
while diff > 1e-7:
pp2 = (pp2l+pp2h)/2
rhs = const_pp2/((ww20-qq1*𝜃2-pp2*𝜉2)**(-𝜓2))
if (rhs > pp2):
pp2l = pp2
else:
pp2h = pp2
diff = abs(pp2l-pp2h)
# p be the maximum valuation for the bond among agents
## This will be the equity price based on Makowski's criterion
p = max(pp1,pp2)
# qq2 is the equity price consistent with agent-2 Euler Equation
# intqq2 = lambda 𝜖: (w21(𝜖) + 𝜃2*(Y(𝜖, fk)-b) + b)**(-𝜓2)*(Y(𝜖, fk) - b)*g(𝜖)
# const_qq2 = 𝛽 * quad(intqq2,epstar,bound)[0]
const_qq2 = 𝛽 * quad(intqq2,epstar,bound, args=(fk, 𝜃2, 𝜓2, b))[0]
qq2l = 0
qq2h = ww20
diff = 1
while diff > 1e-7:
qq2 = (qq2l+qq2h)/2
rhs = const_qq2/((ww20-qq2*𝜃2-p*𝜉2)**(-𝜓2));
if (rhs > qq2):
qq2l = qq2
else:
qq2h = qq2
diff = abs(qq2l-qq2h)
# q be the maximum valuation for the equity among agents
## This will be the equity price based on Makowski's criterion
q = max(qq1,qq2)
#================
# Update holdings
#================
if qq1 > qq2:
𝜃1a = 𝜃1
else:
𝜃1b = 𝜃1
#print(p,q,𝜉1,𝜃1)
if pp1 > pp2:
𝜉1a = 𝜉1
else:
𝜉1b = 𝜉1
#================
# Get consumption
#================
c10 = ww10 - q*𝜃1 - p*𝜉1
c11 = lambda 𝜖: w11(𝜖) + 𝜃1*max(Y(𝜖, fk)-b,0) + 𝜉1*min(Y(𝜖, fk)/b,1)
c20 = ww20 - q*(1-𝜃1) - p*(b-𝜉1)
c21 = lambda 𝜖: w21(𝜖) + (1-𝜃1)*max(Y(𝜖, fk)-b,0) + (b-𝜉1)*min(Y(𝜖, fk)/b,1)
# Compute the value of the firm
value_x = -k + q + p*b
if (value_x > V):
Vl = V
else:
Vh = V
V_crit = abs(value_x-V)
return V,k,b,p,q,c10,c11,c20,c21,𝜉1
# Here is our strategy for checking *stability* of an equilibrium.
#
# We use `off_eq_check` to obtain the consumption plans from both agents
# with regard to the conjectured big $K$ and big $B$.
#
# Then we input the consumption plans into the function `eq_valuation`
# from the BCG model class, and plot the agents’ valuations associated
# with different choices of $k$ and $b$.
#
# Our hypothesis is that $(k^*,b^{**})$ is **not** at the top of the
# firm valuation 3D surface so that the firm is **not** maximizing its
# value if it chooses $k = K = k^*$ and $b = B = b^{**}$.
#
# That indicates that $(k^*,b^{**})$ is not an equilibrium capital
# structure for the firm.
#
# We first check the case in which $b^{**} = b^* - e$ where
# $e = 0.1$:
# +
#====================== Experiment 1 ======================#
Ve1,ke1,be1,pe1,qe1,c10e1,c11e1,c20e1,c21e1,𝜉1e1 = off_eq_check(mdl,
kss,
bss,
e=-0.1)
# Firm Valuation
kgride1, bgride1, Vgride1, Qgride1, Pgride1 = mdl.eq_valuation(c10e1, c11e1, c20e1, c21e1,N=20)
print('Maximum valuation of the firm value in the (k,b) grid: {:.4f}'.format(Vgride1.max()))
print('Equilibrium firm value: {:.4f}'.format(Ve1))
fig = go.Figure(data=[go.Scatter3d(x=[ke1],
y=[be1],
z=[Ve1],
mode='markers',
marker=dict(size=3, color='red')),
go.Surface(x=kgride1,
y=bgride1,
z=Vgride1,
colorscale='Greens',opacity=0.6)])
fig.update_layout(scene = dict(
xaxis_title='x - Capital k',
yaxis_title='y - Debt b',
zaxis_title='z - Firm Value V',
aspectratio = dict(x=1,y=1,z=1)),
width=700,
height=700,
margin=dict(l=50, r=50, b=65, t=90))
fig.update_layout(scene_camera=dict(eye=dict(x=1.5, y=-1.5, z=2)))
fig.update_layout(title='Equilibrium firm valuation for the grid of (k,b)')
# Export to PNG file
Image(fig.to_image(format="png"))
# fig.show() will provide interactive plot when running
# code locally
# -
# In the above 3D surface of prospective firm valuations, the perturbed
# choice $(k^*,b^{*}-e)$, represented by the red dot, is not at the
# top.
#
# The firm could issue more debts and attain a higher firm valuation from
# the market.
#
# Therefore, $(k^*,b^{*}-e)$ would not be an equilibrium.
#
# Next, we check for $b^{**} = b^* + e$.
# +
#====================== Experiment 2 ======================#
Ve2,ke2,be2,pe2,qe2,c10e2,c11e2,c20e2,c21e2,𝜉1e2 = off_eq_check(mdl,
kss,
bss,
e=0.1)
# Firm Valuation
kgride2, bgride2, Vgride2, Qgride2, Pgride2 = mdl.eq_valuation(c10e2, c11e2, c20e2, c21e2,N=20)
print('Maximum valuation of the firm value in the (k,b) grid: {:.4f}'.format(Vgride2.max()))
print('Equilibrium firm value: {:.4f}'.format(Ve2))
fig = go.Figure(data=[go.Scatter3d(x=[ke2],
y=[be2],
z=[Ve2],
mode='markers',
marker=dict(size=3, color='red')),
go.Surface(x=kgride2,
y=bgride2,
z=Vgride2,
colorscale='Greens',opacity=0.6)])
fig.update_layout(scene = dict(
xaxis_title='x - Capital k',
yaxis_title='y - Debt b',
zaxis_title='z - Firm Value V',
aspectratio = dict(x=1,y=1,z=1)),
width=700,
height=700,
margin=dict(l=50, r=50, b=65, t=90))
fig.update_layout(scene_camera=dict(eye=dict(x=1.5, y=-1.5, z=2)))
fig.update_layout(title='Equilibrium firm valuation for the grid of (k,b)')
# Export to PNG file
Image(fig.to_image(format="png"))
# fig.show() will provide interactive plot when running
# code locally
# -
# In contrast to $(k^*,b^* - e)$, the 3D surface for
# $(k^*,b^*+e)$ now indicates that a firm would want o *decrease*
# its debt issuance to attain a higher valuation.
#
# That incentive to deviate means that $(k^*,b^*+e)$ is not an
# equilibrium capital structure for the firm.
#
# Interestingly, if consumers were to anticiapte that firms would
# over-issue debt, i.e. $B > b^*$,then both types of agents would
# want willing to hold corporate debt. Specifically, $\xi^1 > 0$:
print('Bond holdings of agent 1: {:.3f}'.format(𝜉1e2))
# Our two *stability experiments* suggest that the equilibrium capital
# structure $(k^*,b^*)$ is locally unique even though **at the
# equilibrium** an individual firm would be willing to deviate from the
# equilibrium representative firms’ debt choice.
#
# These experiments thus refine our discussion of the *qualified*
# Modigliani-Miller theorem that prevails in this example economy.
#
# #### Equilibrium equity and bond price functions
#
# It is also interesting to look at the equilibrium price functions
# $q(k,b)$ and $p(k,b)$ faced by firms in our rational
# expectations equilibrium.
# +
# Equity Valuation
fig = go.Figure(data=[go.Scatter3d(x=[kss],
y=[bss],
z=[qss],
mode='markers',
marker=dict(size=3, color='red')),
go.Surface(x=kgrid,
y=bgrid,
z=Qgrid,
colorscale='Blues',opacity=0.6)])
fig.update_layout(scene = dict(
xaxis_title='x - Capital k',
yaxis_title='y - Debt b',
zaxis_title='z - Equity price q',
aspectratio = dict(x=1,y=1,z=1)),
width=700,
height=700,
margin=dict(l=50, r=50, b=65, t=90))
fig.update_layout(scene_camera=dict(eye=dict(x=1.5, y=-1.5, z=2)))
fig.update_layout(title='Equilibrium equity valuation for the grid of (k,b)')
# Export to PNG file
Image(fig.to_image(format="png"))
# fig.show() will provide interactive plot when running
# code locally
# +
# Bond Valuation
fig = go.Figure(data=[go.Scatter3d(x=[kss],
y=[bss],
z=[pss],
mode='markers',
marker=dict(size=3, color='red')),
go.Surface(x=kgrid,
y=bgrid,
z=Pgrid,
colorscale='Oranges',opacity=0.6)])
fig.update_layout(scene = dict(
xaxis_title='x - Capital k',
yaxis_title='y - Debt b',
zaxis_title='z - Bond price q',
aspectratio = dict(x=1,y=1,z=1)),
width=700,
height=700,
margin=dict(l=50, r=50, b=65, t=90))
fig.update_layout(scene_camera=dict(eye=dict(x=1.5, y=-1.5, z=2)))
fig.update_layout(title='Equilibrium bond valuation for the grid of (k,b)')
# Export to PNG file
Image(fig.to_image(format="png"))
# fig.show() will provide interactive plot when running
# code locally
# -
# ### Comments on equilibrium pricing functions
#
# The equilibrium pricing functions displayed above merit study and
# reflection.
#
# They reveal the countervailing effects on a firm’s valuations of bonds
# and equities that lie beneath the Modigliani-Miller ridge apparent in
# our earlier graph of an individual firm $\zeta$’s value as a
# function of $k(\zeta), b(\zeta)$.
#
# ### Another example economy
#
# We illustrate how the fraction of initial endowments held by agent 2,
# $w^2_0/(w^1_0+w^2_0)$ affects an equilibrium capital structure
# $(k,b) = (K, B)$ well as associated allocations.
#
# We would also like to see how in equilibrium agents 1 and 2 respectively
# value equity and the bond.
#
# $$
# \begin{aligned}
# Q^i = \beta \int \frac{u^\prime(C^{i,*}_1(\epsilon))}{u^\prime(C^{i,*}_0)} d^e(k^*,b^*;\epsilon) g(\epsilon) \ d\epsilon \\
# P^i = \beta \int \frac{u^\prime(C^{i,*}_1(\epsilon))}{u^\prime(C^{i,*}_0)} d^b(k^*,b^*;\epsilon) g(\epsilon) \ d\epsilon \\
# \end{aligned}
# $$
#
# The function `valuations_by_agent` is used in calculating these
# valuations.
# +
# Lists for storage
wlist = []
klist = []
blist = []
qlist = []
plist = []
Vlist = []
tlist = []
q1list = []
q2list = []
p1list = []
p2list = []
# For loop: optimization for each endowment combination
for i in range(10):
print(i)
# Save fraction
w10 = 0.9 - 0.05*i
w20 = 1.1 + 0.05*i
wlist.append(w20/(w10+w20))
# Create the instance
mdl = BCG_incomplete_markets(w10 = w10, w20 = w20, ktop = 0.5, btop = 2.5)
# Solve for equilibrium
kss,bss,Vss,qss,pss,c10ss,c11ss,c20ss,c21ss,𝜃1ss = mdl.solve_eq(print_crit=False)
# Store the equilibrium results
klist.append(kss)
blist.append(bss)
qlist.append(qss)
plist.append(pss)
Vlist.append(Vss)
tlist.append(𝜃1ss)
# Evaluations of equity and bond by each agent
Q1,Q2,P1,P2 = mdl.valuations_by_agent(c10ss, c11ss, c20ss, c21ss, kss, bss)
# Save the valuations
q1list.append(Q1)
q2list.append(Q2)
p1list.append(P1)
p2list.append(P2)
# +
# Plot
fig, ax = plt.subplots(3,2,figsize=(12,12))
ax[0,0].plot(wlist,klist)
ax[0,0].set_title('capital')
ax[0,1].plot(wlist,blist)
ax[0,1].set_title('debt')
ax[1,0].plot(wlist,qlist)
ax[1,0].set_title('equity price')
ax[1,1].plot(wlist,plist)
ax[1,1].set_title('bond price')
ax[2,0].plot(wlist,Vlist)
ax[2,0].set_title('firm value')
ax[2,0].set_xlabel('fraction of initial endowment held by agent 2',fontsize=13)
# Create a list of Default thresholds
A = mdl.A
𝛼 = mdl.𝛼
epslist = []
for i in range(len(wlist)):
bb = blist[i]
kk = klist[i]
eps = np.log(bb/(A*kk**𝛼))
epslist.append(eps)
# Plot (cont.)
ax[2,1].plot(wlist,epslist)
ax[2,1].set_title(r'default threshold $\epsilon^*$')
ax[2,1].set_xlabel('fraction of initial endowment held by agent 2',fontsize=13)
plt.show()
# -
# ## A picture worth a thousand words
#
# Please stare at the above panels.
#
# They describe how equilibrium prices and quantities respond to
# alterations in the structure of society’s *hedging desires* across
# economies with different allocations of the initial endowment to our two
# types of agents.
#
# Now let’s see how the two types of agents value bonds and equities,
# keeping in mind that the type that values the asset highest determines
# the equilibrium price (and thus the pertinent set of Big $C$’s).
# +
# Comparing the prices
fig, ax = plt.subplots(1,3,figsize=(16,6))
ax[0].plot(wlist,q1list,label='agent 1',color='green')
ax[0].plot(wlist,q2list,label='agent 2',color='blue')
ax[0].plot(wlist,qlist,label='equity price',color='red',linestyle='--')
ax[0].legend()
ax[0].set_title('equity valuations')
ax[0].set_xlabel('fraction of initial endowment held by agent 2',fontsize=11)
ax[1].plot(wlist,p1list,label='agent 1',color='green')
ax[1].plot(wlist,p2list,label='agent 2',color='blue')
ax[1].plot(wlist,plist,label='bond price',color='red',linestyle='--')
ax[1].legend()
ax[1].set_title('bond valuations')
ax[1].set_xlabel('fraction of initial endowment held by agent 2',fontsize=11)
ax[2].plot(wlist,tlist,color='blue')
ax[2].set_title('equity holdings by agent 1')
ax[2].set_xlabel('fraction of initial endowment held by agent 2',fontsize=11)
plt.show()
# -
# It is rewarding to stare at the above plots too.
#
# In equilibrium, equity valuations are the same across the two types of
# agents but bond valuations are not.
#
# Agents of type 2 value bonds more highly (they want more hedging).
#
# Taken together with our earlier plot of equity holdings of type w
# agents, these graphs confirm the earlier conjecture that while both type
# of agents hold equities, only agents of type 2 holds bonds.
|
.archive/lectures-mixed/BCG_incomplete_mkts.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Twitter Sentiment Classification using Vowpal Wabbit in SynapseML
#
# In this example, we show how to build a sentiment classification model using Vowpal Wabbit (VW) in SynapseML. The data set we use to train and evaluate the model is [Sentiment140](http://help.sentiment140.com/for-students/?source=post_page---------------------------) twitter data. First, we import a few packages that we need.
import os
import re
import urllib.request
import numpy as np
import pandas as pd
from zipfile import ZipFile
from bs4 import BeautifulSoup
from pyspark.sql.functions import udf, rand, when, col
from pyspark.sql.types import StructType, StructField, DoubleType, StringType
from pyspark.ml import Pipeline
from pyspark.ml.feature import CountVectorizer, RegexTokenizer
from synapse.ml.vw import VowpalWabbitClassifier
from synapse.ml.train import ComputeModelStatistics
from pyspark.mllib.evaluation import BinaryClassificationMetrics
import matplotlib.pyplot as plt
if os.environ.get("AZURE_SERVICE", None) == "Microsoft.ProjectArcadia":
from pyspark.sql import SparkSession
spark = SparkSession.builder.getOrCreate()
# URL to download the sentiment140 dataset and data file names
DATA_URL = "http://cs.stanford.edu/people/alecmgo/trainingandtestdata.zip"
TRAIN_FILENAME = "training.1600000.processed.noemoticon.csv"
TEST_FILENAME = "testdata.manual.2009.06.14.csv"
# Folder for storing the downloaded data
DATA_FOLDER = "data"
# Data column names
COL_NAMES = ["label", "id", "date", "query_string", "user", "text"]
# Text encoding type of the data
ENCODING = "iso-8859-1"
# ## Data Preparation
#
# We use [Sentiment140](http://help.sentiment140.com/for-students/?source=post_page---------------------------) twitter data which originated from a Standford research project to train and evaluate VW classification model on Spark. The same dataset has been used in a previous [Azure Machine Learning sample](https://github.com/Azure-Samples/MachineLearningSamples-TwitterSentimentPrediction) on twitter sentiment prediction. Before using the data to build the classification model, we first download and clean up the data.
# +
def download_data(url, data_folder=DATA_FOLDER, filename="downloaded_data.zip"):
"""Download and extract data from url"""
data_dir = "./" + DATA_FOLDER
if not os.path.exists(data_dir): os.makedirs(data_dir)
downloaded_filepath = os.path.join(data_dir, filename)
print("Downloading data...")
urllib.request.urlretrieve(url, downloaded_filepath)
print("Extracting data...")
zipfile = ZipFile(downloaded_filepath)
zipfile.extractall(data_dir)
zipfile.close()
print("Finished data downloading and extraction.")
download_data(DATA_URL)
# -
# Let's read the training data into a Spark DataFrame.
df_train = pd.read_csv(os.path.join(".", DATA_FOLDER, TRAIN_FILENAME),
header=None, names=COL_NAMES, encoding=ENCODING)
df_train = spark.createDataFrame(df_train, verifySchema=False)
# We can take a look at the training data and check how many samples it has. We should see that there are 1.6 million samples in the training data. There are 6 fields in the training data:
# * label: the sentiment of the tweet (0.0 = negative, 2.0 = neutral, 4.0 = positive)
# * id: the id of the tweet
# * date: the date of the tweet
# * query_string: The query used to extract the data. If there is no query, then this value is NO_QUERY.
# * user: the user that tweeted
# * text: the text of the tweet
df_train.limit(10).toPandas()
print("Number of training samples: ", df_train.count())
# Before training the model, we randomly permute the data to mix negative and positive samples. This is helpful for properly training online learning algorithms like VW. To speed up model training, we use a subset of the data to train the model. If training with the full training set, typically you will see better performance of the model on the test set.
df_train = df_train.orderBy(rand()) \
.limit(100000) \
.withColumn("label", when(col("label") > 0, 1.0).otherwise(0.0)) \
.select(["label", "text"])
# ## VW SynapseML Training
#
# Now we are ready to define a pipeline which consists of feture engineering steps and the VW model.
# +
# Specify featurizers
tokenizer = RegexTokenizer(inputCol="text",
outputCol="words")
count_vectorizer = CountVectorizer(inputCol="words",
outputCol="features")
# Define VW classification model
args = "--loss_function=logistic --quiet --holdout_off"
vw_model = VowpalWabbitClassifier(featuresCol="features",
labelCol="label",
passThroughArgs=args,
numPasses=10)
# Create a pipeline
vw_pipeline = Pipeline(stages=[tokenizer, count_vectorizer, vw_model])
# -
# With the prepared training data, we can fit the model pipeline as follows.
vw_trained = vw_pipeline.fit(df_train)
# ## Model Performance Evaluation
#
# After training the model, we evaluate the performance of the model using the test set which is manually labeled.
df_test = pd.read_csv(os.path.join(".", DATA_FOLDER, TEST_FILENAME),
header=None, names=COL_NAMES, encoding=ENCODING)
df_test = spark.createDataFrame(df_test, verifySchema=False)
# We only use positive and negative tweets in the test set to evaluate the model, since our model is a binary classification model trained with only positive and negative tweets.
print("Number of test samples before filtering: ", df_test.count())
df_test = df_test.filter(col("label") != 2.0) \
.withColumn("label", when(col("label") > 0, 1.0).otherwise(0.0)) \
.select(["label", "text"])
print("Number of test samples after filtering: ", df_test.count())
# Make predictions
predictions = vw_trained.transform(df_test)
predictions.limit(10).toPandas()
# Compute model performance metrics
metrics = ComputeModelStatistics(evaluationMetric="classification",
labelCol="label",
scoredLabelsCol="prediction").transform(predictions)
metrics.toPandas()
# +
# Utility class for plotting ROC curve (https://stackoverflow.com/questions/52847408/pyspark-extract-roc-curve)
class CurveMetrics(BinaryClassificationMetrics):
def __init__(self, *args):
super(CurveMetrics, self).__init__(*args)
def get_curve(self, method):
rdd = getattr(self._java_model, method)().toJavaRDD()
points = []
for row in rdd.collect():
points += [(float(row._1()), float(row._2()))]
return points
preds = predictions.select("label", "probability") \
.rdd.map(lambda row: (float(row["probability"][1]), float(row["label"])))
roc_points = CurveMetrics(preds).get_curve("roc")
# Plot ROC curve
fig = plt.figure()
x_val = [x[0] for x in roc_points]
y_val = [x[1] for x in roc_points]
plt.title("ROC curve on test set")
plt.xlabel("False positive rate")
plt.ylabel("True positive rate")
plt.plot(x_val, y_val)
# Use display() if you're on Azure Databricks or you can do plt.show()
plt.show()
# -
# You should see an ROC curve like the following after the above cell is executed.
#
# <img src="https://user-images.githubusercontent.com/20047467/69376052-9b0a3380-0c77-11ea-9266-11aa44350cbe.png" width="400" height="320" />
|
notebooks/features/classification/Classification - Twitter Sentiment with Vowpal Wabbit.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Coding Exercise #0708b
# ### 1. Inverse transform with AutoEncoder (reduced dimensional input):
import pandas as pd
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.examples.tutorials.mnist import input_data # MNIST handwritten digits data!
from sklearn.preprocessing import MinMaxScaler
# %matplotlib inline
# #### 1.1. Download the MNIST data:
# verbosity_saved = tf.logging.get_verbosity() # Save the current verbosity lebel if needed.
tf.logging.set_verbosity(tf.logging.ERROR) # Set the verbosity lebel high so that most warnings are ignored.
mnist = input_data.read_data_sets("MNIST_data/",one_hot=True) # Download the data.
type(mnist)
# #### 1.2. Visualize the data:
n_rows = 3
n_cols = 3
n_shift = 10
f, a = plt.subplots(n_rows, n_cols, figsize=(5, 5))
for i in range(n_rows):
for j in range(n_cols):
single_image = mnist.test.images[n_shift+i*n_cols+j].reshape(28,28)
a[i,j].imshow(1.0-single_image, cmap='gist_gray') # In Grayscale.
plt.show()
# #### 1.3. Define an AutoEncoder model:
# Hyperparameters definition.
n_input = 784 # Input layers has as many nodes as the number of variables.
n_hidden = 98 # The number of nodes in the hidden layer <= targeted reduced dimension.
n_output = n_input # The output should have the same number of nodes as the input.
learn_rate = 0.005
n_epochs = 1001
# Variables definition.
initializer = tf.variance_scaling_initializer()
W1 = tf.Variable(initializer([n_input, n_hidden]), dtype=tf.float32)
W2 = tf.Variable(initializer([n_hidden, n_output]), dtype=tf.float32)
b1 = tf.Variable(tf.zeros(n_hidden))
b2 = tf.Variable(tf.zeros(n_output))
# Placeholder definition.
X_ph = tf.placeholder(tf.float32, shape=[None, n_input])
# AutoEncoder model definition.
hidden_layer = tf.matmul(X_ph, W1) + b1 # ReLu activation.
y_model = tf.matmul(hidden_layer, W2) + b2
loss = tf.reduce_mean(tf.square(X_ph- y_model)) # 'X' takes the place of 'y'!!!
optimizer = tf.train.AdamOptimizer(learning_rate = learn_rate)
train = optimizer.minimize(loss)
init = tf.global_variables_initializer()
# #### 1.4. AutoEncoder training:
saver = tf.train.Saver()
with tf.Session() as sess:
sess.run(init)
my_feed = {X_ph: mnist.test.images}
for i in range(n_epochs):
sess.run(train, feed_dict = my_feed)
mse = sess.run(loss, feed_dict = my_feed)
if (i % 100 == 0):
print("Step : {} , MSE : {}".format(i, mse))
saver.save(sess, "./autoencoder") # Save the result.
# #### 1.5. Visualize the result:
# Left 3x3 are the original.
# Right 3x3 are the reduced dimensional input.
n_images = n_rows*n_cols
with tf.Session() as sess:
saver.restore(sess,"./autoencoder")
X_inverse_transform = y_model.eval(feed_dict={X_ph:mnist.test.images[n_shift:n_shift+n_images]}) # The output is the reduced dimensional input!
f, a = plt.subplots(n_rows, 2*n_cols, figsize=(20, 10))
for i in range(n_rows):
for j in range(n_cols):
single_image1 = mnist.test.images[n_shift+i*n_cols+j].reshape(28,28)
single_image2 = X_inverse_transform[i*n_cols+j].reshape(28,28)
a[i][j].imshow(1.0-single_image1, cmap='gist_gray') # Grayscale.
a[i][j+n_cols].imshow(1.0-single_image2, cmap='gist_gray') # Grayscale.
plt.show()
|
SIC_AI_Coding_Exercises/SIC_AI_Chapter_08_Coding_Exercises/ex_0708b.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# ## Deep Learning - Flower Image Classification
from pyspark.ml import Transformer, Estimator, Pipeline
from pyspark.ml.classification import LogisticRegression
from synapse.ml.downloader import ModelDownloader
import os, sys, time
# + mml-deploy="local"
model = ModelDownloader(spark, "dbfs:/models/").downloadByName("ResNet50")
# +
# Load the images
# use flowers_and_labels.parquet on larger cluster in order to get better results
imagesWithLabels = spark.read.parquet("wasbs://<EMAIL>/flowers_and_labels2.parquet") \
.withColumnRenamed("bytes","image").sample(.1)
imagesWithLabels.printSchema()
# -
# 
# +
from synapse.ml.opencv import ImageTransformer
from synapse.ml.image import UnrollImage
from synapse.ml.cntk import ImageFeaturizer
from synapse.ml.stages import *
# Make some featurizers
it = ImageTransformer()\
.setOutputCol("scaled")\
.resize(size=(60, 60))
ur = UnrollImage()\
.setInputCol("scaled")\
.setOutputCol("features")
dc1 = DropColumns().setCols(["scaled", "image"])
lr1 = LogisticRegression().setMaxIter(8).setFeaturesCol("features").setLabelCol("labels")
dc2 = DropColumns().setCols(["features"])
basicModel = Pipeline(stages=[it, ur, dc1, lr1, dc2])
# +
resnet = ImageFeaturizer()\
.setInputCol("image")\
.setOutputCol("features")\
.setModelLocation(model.uri)\
.setLayerNames(model.layerNames)\
.setCutOutputLayers(1)
dc3 = DropColumns().setCols(["image"])
lr2 = LogisticRegression().setMaxIter(8).setFeaturesCol("features").setLabelCol("labels")
dc4 = DropColumns().setCols(["features"])
deepModel = Pipeline(stages=[resnet, dc3, lr2, dc4])
# -
# 
# ### How does it work?
#
# 
# ### Run the experiment
def timedExperiment(model, train, test):
start = time.time()
result = model.fit(train).transform(test).toPandas()
print("Experiment took {}s".format(time.time() - start))
return result
train, test = imagesWithLabels.randomSplit([.8,.2])
train.count(), test.count()
basicResults = timedExperiment(basicModel, train, test)
deepResults = timedExperiment(deepModel, train, test)
# ### Plot confusion matrix.
# +
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
import numpy as np
def evaluate(results, name):
y, y_hat = results["labels"],results["prediction"]
y = [int(l) for l in y]
accuracy = np.mean([1. if pred==true else 0. for (pred,true) in zip(y_hat,y)])
cm = confusion_matrix(y, y_hat)
cm = cm.astype("float") / cm.sum(axis=1)[:, np.newaxis]
plt.text(40, 10,"$Accuracy$ $=$ ${}\%$".format(round(accuracy*100,1)),fontsize=14)
plt.imshow(cm, interpolation="nearest", cmap=plt.cm.Blues)
plt.colorbar()
plt.xlabel("$Predicted$ $label$", fontsize=18)
plt.ylabel("$True$ $Label$", fontsize=18)
plt.title("$Normalized$ $CM$ $for$ ${}$".format(name))
plt.figure(figsize=(12,5))
plt.subplot(1,2,1)
evaluate(deepResults,"CNTKModel + LR")
plt.subplot(1,2,2)
evaluate(basicResults,"LR")
# Note that on the larger dataset the accuracy will bump up from 44% to >90%
display(plt.show())
|
notebooks/features/other/DeepLearning - Flower Image Classification.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/aditya270520/100Daysofpython/blob/main/password_with_requirement.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + colab={"base_uri": "https://localhost:8080/"} id="HuFeB3-snQwC" outputId="<PASSWORD>"
import random
import string
def randomPassword(size):
all_chars = string.ascii_letters + string.digits + string.punctuation
password = ""
password += random.choice(string.ascii_lowercase)
password += random.choice(string.ascii_uppercase)
password += random.choice(string.digits)
password += random.choice(string.punctuation)
for i in range(size-4):
password += random.choice(all_chars)
return password
pass_len = int(input("What would be the password length? "))
print ("First Random Password is:", randomPassword(pass_len))
print ("Second Random Password is:", randomPassword(pass_len))
print ("Third Random Password is:", randomPassword(pass_len))
|
password_with_requirement.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: cs771
# language: python
# name: cs771
# ---
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.WARN)
import pickle
import numpy as np
import os
from sklearn.model_selection import train_test_split
from sklearn.metrics import f1_score
from sklearn.metrics import accuracy_score
import os
from tensorflow.python.client import device_lib
from collections import Counter
# +
f = open('../Glove/word_embedding_glove', 'rb')
word_embedding = pickle.load(f)
f.close()
word_embedding = word_embedding[: len(word_embedding)-1]
f = open('../Glove/vocab_glove', 'rb')
vocab = pickle.load(f)
f.close()
word2id = dict((w, i) for i,w in enumerate(vocab))
id2word = dict((i, w) for i,w in enumerate(vocab))
unknown_token = "UNKNOWN_TOKEN"
f = open("train.pickle", 'rb')
full_data = pickle.load(f)
f.close()
# +
# Model Description
sense_word = 'interest'
model_name = 'model-6'
model_dir = 'output/' + sense_word + '/' + model_name
save_dir = os.path.join(model_dir, "save/")
log_dir = os.path.join(model_dir, "log")
if not os.path.exists(model_dir):
os.mkdir(model_dir)
if not os.path.exists(save_dir):
os.mkdir(save_dir)
if not os.path.exists(log_dir):
os.mkdir(log_dir)
# +
sense_counts = Counter(full_data[sense_word][1])
print("Initial: ", sense_counts)
sort_sense_counts = sense_counts.most_common()
vocab_sense = [k for k,v in sort_sense_counts]
xx = full_data[sense_word][0]
yy = full_data[sense_word][1]
data_x = []
data_label = []
for i in range(len(xx)):
if(yy[i]!=vocab_sense[-1]):
data_label.append(yy[i])
data_x.append(xx[i])
sense_counts = Counter(data_label)
print("Final: ",sense_counts)
total_count = len(data_label)
sort_sense_counts = sense_counts.most_common()
vocab_sense = [k for k,v in sort_sense_counts]
freq_sense = [v for k,v in sort_sense_counts]
weights = np.multiply(6, [1 - count/total_count for count in freq_sense])
weights = weights.astype(np.float32)
print(weights)
sense2id = dict((w, i) for i,w in enumerate(vocab_sense))
id2sense= dict((i, w) for i,w in enumerate(vocab_sense))
# -
# Parameters
mode = 'train'
num_senses = len(vocab_sense)
batch_size = 64
vocab_size = len(vocab)
unk_vocab_size = 1
word_emb_size = len(word_embedding[0])
max_sent_size = 200
hidden_size = 100
keep_prob = 0.5
l2_lambda = 0.002
init_lr = 0.005
decay_steps = 500
decay_rate = 0.96
clip_norm = 1
clipping = True
# +
# MODEL
x = tf.placeholder('int32', [batch_size, max_sent_size], name="x")
y = tf.placeholder('int32', [batch_size], name="y")
x_mask = tf.placeholder('bool', [batch_size, max_sent_size], name='x_mask')
is_train = tf.placeholder('bool', [], name='is_train')
word_emb_mat = tf.placeholder('float', [None, word_emb_size], name='emb_mat')
input_keep_prob = tf.cond(is_train,lambda:keep_prob, lambda:tf.constant(1.0))
x_len = tf.reduce_sum(tf.cast(x_mask, 'int32'), 1)
with tf.name_scope("word_embedding"):
if mode == 'train':
unk_word_emb_mat = tf.get_variable("word_emb_mat", dtype='float', shape=[unk_vocab_size, word_emb_size], initializer=tf.contrib.layers.xavier_initializer(uniform=True, seed=0, dtype=tf.float32))
else:
unk_word_emb_mat = tf.get_variable("word_emb_mat", shape=[unk_vocab_size, word_emb_size], dtype='float')
final_word_emb_mat = tf.concat([word_emb_mat, unk_word_emb_mat], 0)
Wx = tf.nn.embedding_lookup(final_word_emb_mat, x)
with tf.variable_scope("lstm1"):
cell_fw1 = tf.contrib.rnn.BasicLSTMCell(hidden_size,state_is_tuple=True)
cell_bw1 = tf.contrib.rnn.BasicLSTMCell(hidden_size,state_is_tuple=True)
d_cell_fw1 = tf.contrib.rnn.DropoutWrapper(cell_fw1, input_keep_prob=input_keep_prob)
d_cell_bw1 = tf.contrib.rnn.DropoutWrapper(cell_bw1, input_keep_prob=input_keep_prob)
(fw_h1, bw_h1), _ = tf.nn.bidirectional_dynamic_rnn(d_cell_fw1, d_cell_bw1, Wx, sequence_length=x_len, dtype='float', scope='lstm1')
h1 = tf.concat([fw_h1, bw_h1], 2)
with tf.variable_scope("lstm2"):
cell_fw2 = tf.contrib.rnn.BasicLSTMCell(hidden_size,state_is_tuple=True)
cell_bw2 = tf.contrib.rnn.BasicLSTMCell(hidden_size,state_is_tuple=True)
d_cell_fw2 = tf.contrib.rnn.DropoutWrapper(cell_fw2, input_keep_prob=input_keep_prob)
d_cell_bw2 = tf.contrib.rnn.DropoutWrapper(cell_bw2, input_keep_prob=input_keep_prob)
(fw_h2, bw_h2), _ = tf.nn.bidirectional_dynamic_rnn(d_cell_fw2, d_cell_bw2, h1, sequence_length=x_len, dtype='float', scope='lstm2')
h = tf.concat([fw_h2, bw_h2], 2)
def attention(input_x, input_mask, W_att):
h_masked = tf.boolean_mask(input_x, input_mask)
h_tanh = tf.tanh(h_masked)
u = tf.matmul(h_tanh, W_att)
a = tf.nn.softmax(u)
c = tf.reduce_sum(tf.multiply(h_tanh, a), 0)
return c
with tf.variable_scope("attention"):
W_att = tf.Variable(tf.truncated_normal([2*hidden_size, 1], mean=0.0, stddev=0.1, seed=0), name="W_att")
c = tf.expand_dims(attention(h[0], x_mask[0], W_att), 0)
for i in range(1, batch_size):
c = tf.concat([c, tf.expand_dims(attention(h[i], x_mask[i], W_att), 0)], 0)
with tf.variable_scope("softmax_layer"):
W = tf.Variable(tf.truncated_normal([2*hidden_size, num_senses], mean=0.0, stddev=0.1, seed=0), name="W")
b = tf.Variable(tf.zeros([num_senses]), name="b")
drop_c = tf.nn.dropout(c, input_keep_prob)
logits = tf.matmul(drop_c, W) + b
predictions = tf.argmax(logits, 1)
class_weight = tf.constant(weights)
weighted_logits = logits * class_weight
loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=weighted_logits, labels=y))
global_step = tf.Variable(0, trainable=False, name="global_step")
learning_rate = tf.train.exponential_decay(init_lr, global_step, decay_steps, decay_rate, staircase=True)
tv_all = tf.trainable_variables()
tv_regu =[]
for t in tv_all:
if t.name.find('b:')==-1:
tv_regu.append(t)
# l2 Loss
l2_loss = l2_lambda * tf.reduce_sum([ tf.nn.l2_loss(v) for v in tv_regu ])
total_loss = loss + l2_loss
# Optimizer for loss
optimizer = tf.train.AdamOptimizer(learning_rate)
# Gradients and Variables for Loss
grads_vars = optimizer.compute_gradients(total_loss)
# Clipping of Gradients
clipped_grads = grads_vars
if(clipping == True):
clipped_grads = [(tf.clip_by_norm(grad, clip_norm), var) for grad, var in clipped_grads]
# Training Optimizer for Total Loss
train_op = optimizer.apply_gradients(clipped_grads, global_step=global_step)
# Summaries
var_summaries = []
for v in tv_all:
var_summary = tf.summary.histogram("{}/var".format(v.name), v)
var_summaries.append(var_summary)
var_summaries_merged = tf.summary.merge(var_summaries)
loss_summary = tf.summary.scalar("loss", loss)
total_loss_summary = tf.summary.scalar("total_loss", total_loss)
summary = tf.summary.merge_all()
# -
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"]="0"
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
sess.run(tf.global_variables_initializer()) # For initializing all the variables
saver = tf.train.Saver() # For Saving the model
summary_writer = tf.summary.FileWriter(log_dir, sess.graph) # For writing Summaries
# +
# Splitting
x_train, x_test, label_train, label_test = train_test_split(data_x, data_label, train_size=0.8, shuffle=True, stratify=data_label, random_state=0)
x_train, x_val, label_train, label_val = train_test_split(x_train, label_train, train_size=0.9, shuffle=True, stratify=label_train, random_state=0)
# +
def data_prepare(x, y):
num_examples = len(x)
xx = np.zeros([num_examples, max_sent_size], dtype=int)
xx_mask = np.zeros([num_examples, max_sent_size], dtype=bool)
yy = np.zeros([num_examples], dtype=int)
for j in range(num_examples):
for i in range(max_sent_size):
if(i>=len(x[j])):
break
w = x[j][i]
xx[j][i] = word2id[w] if w in word2id else word2id['UNKNOWN_TOKEN']
xx_mask[j][i] = True
yy[j] = sense2id[y[j]]
return xx, xx_mask, yy
def eval_score(yy, pred):
num_batches = int(len(yy)/batch_size)
f1 = f1_score(yy[:batch_size*num_batches], pred, average='macro')
accu = accuracy_score(yy[:batch_size*num_batches], pred)
return f1*100, accu*100
def model(xx, yy, mask, train_cond=True):
num_batches = int(len(xx)/batch_size)
losses = 0
preds = []
for j in range(num_batches):
s = j * batch_size
e = (j+1) * batch_size
feed_dict = {x:xx[s:e], y:yy[s:e], x_mask:mask[s:e], is_train:train_cond, input_keep_prob:keep_prob, word_emb_mat:word_embedding}
if(train_cond==True):
_, _loss, step, _summary = sess.run([train_op, total_loss, global_step, summary], feed_dict)
summary_writer.add_summary(_summary, step)
# print("Steps:{}".format(step), ", Loss: {}".format(_loss))
else:
_loss, pred = sess.run([total_loss, predictions], feed_dict)
preds.append(pred)
losses +=_loss
if(train_cond==False):
y_pred = []
for i in range(num_batches):
for pred in preds[i]:
y_pred.append(pred)
return losses/num_batches, y_pred
return losses/num_batches, step
# -
x_id_train, mask_train, y_train = data_prepare(x_train, label_train)
x_id_val, mask_val, y_val = data_prepare(x_val, label_val)
x_id_test, mask_test, y_test = data_prepare(x_test, label_test)
# +
num_epochs = 50
for i in range(num_epochs):
random = np.random.choice(len(y_train), size=(len(y_train)), replace=False)
x_id_train = x_id_train[random]
y_train = y_train[random]
mask_train = mask_train[random]
losses, step = model(x_id_train, y_train, mask_train)
print("Epoch:", i+1,"Step:", step, "loss:",losses)
if((i+1)%5==0):
saver.save(sess, save_path=save_dir)
print("Saved Model Complete")
train_loss, train_pred = model(x_id_train, y_train, mask_train, train_cond=False)
f1_, accu_ = eval_score(y_train, train_pred)
print("Train: F1 Score: ", f1_, "Accuracy: ", accu_, "Loss: ", train_loss)
val_loss, val_pred = model(x_id_val, y_val, mask_val, train_cond=False)
f1_, accu_ = eval_score(y_val, val_pred)
print("Val: F1 Score: ", f1_, "Accuracy: ", accu_, "Loss: ", val_loss)
test_loss, test_pred = model(x_id_test, y_test, mask_test, train_cond=False)
f1_, accu_ = eval_score(y_test, test_pred)
print("Test: F1 Score: ", f1_, "Accuracy: ", accu_, "Loss: ", test_loss)
# -
saver.restore(sess, save_dir)
|
Four Word Model/Model-6.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # ¿Cómo resolver problemas de física en la vida real?
#
#
# ## <NAME>
#
# + [markdown] slideshow={"slide_type": "slide"}
# # Acerca de mí ([nicoguaro.github.io](nicoguaro.github.io))
#
# Descripción
# + [markdown] slideshow={"slide_type": "slide"}
# #### Intereses de Investigación
#
# - Física computacional
# - Diseño de materiales
#
# #### Docencia
#
# - Modelación computacional
# - Métodos numéricos
# - Matemáticas avanzadas para ingenieros
# + slideshow={"slide_type": "notes"}
# Celda oculta para código
import numpy as np
# + [markdown] slideshow={"slide_type": "slide"}
# # Título diapositiva
#
# Texto
# + [markdown] slideshow={"slide_type": "slide"}
# # Referencias
#
#
# - Información bibliográfica.
# + [markdown] slideshow={"slide_type": "slide"}
# | | |
# |-------------------------------------------------------------- |----------------------------------------------------------- |
# | <img style="float:left" src="img/website.svg" width="50px"/> | <h1 class="text-primary">nicoguaro.github.io</h1> |
# | <img style="float:left" src="img/github.svg" width="50px"/> | <h1 class="text-primary">nicoguaro</h1> |
# | <img style="float:left" src="img/twitter.svg" width="50px"/> | <h1 class="text-primary">@nicoguaro</h1> |
# | <img style="float:left" src="img/email.svg" width="50px"/> | <h1 class="text-primary">nicoguarin at gmail dot com</h1> |
# -
|
2020/ode_numerico/ode_numerico.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### What is Pyspark?
# <img src="PySpark.png">
#
#
# >Spark is the name of the engine to realize cluster computing while PySpark is the Python's library to use Spark.PySpark is a great language for performing exploratory data analysis at scale, building machine learning pipelines, and creating ETLs for a data platform. If you’re already familiar with Python and libraries such as Pandas, then PySpark is a great language to learn in order to create more scalable analyses and pipelines.
# ### Load Data
from pyspark.sql import SparkSession
spark = SparkSession.builder.appName('ml-diabetes').getOrCreate()
df = spark.read.csv('diabetes.csv', header = True, inferSchema = True)
df.printSchema()
# ### Show Dataset in DataFrame
import pandas as pd
pd.DataFrame(df.take(5), columns=df.columns).transpose()
# Data grouping by ``Outcome``
df.groupby('Outcome').count().toPandas()
# Dataset showing by ``toPandas`` function
df.toPandas()
# ### Summary statistics for numeric variables
numeric_features = [t[0] for t in df.dtypes if t[1] == 'int']
df.select(numeric_features).describe().toPandas().transpose()
df.columns
# ### Correlations
# +
from pandas.plotting import scatter_matrix
numeric_data = df.select(numeric_features).toPandas()
axs = scatter_matrix(numeric_data, figsize=(8, 8));
# Rotate axis labels and remove axis ticks
n = len(numeric_data.columns)
for i in range(n):
v = axs[i, 0]
v.yaxis.label.set_rotation(0)
v.yaxis.label.set_ha('right')
v.set_yticks(())
h = axs[n-1, i]
h.xaxis.label.set_rotation(90)
h.set_xticks(())
# -
# ### Data preparation and feature engineering
# +
## See if we have missing values
from pyspark.sql.functions import isnull, when, count, col
df.select([count(when(isnull(c), c)).alias(c) for c in df.columns]).show()
# -
dataset = df.replace('null', None)\
.dropna(how='any')
# +
# Drop unnecessary columns
dataset = dataset.drop('SkinThickness')
dataset = dataset.drop('Insulin')
dataset = dataset.drop('DiabetesPedigreeFunction')
dataset = dataset.drop('Pregnancies')
dataset.show()
# +
# Assemble all the features with VectorAssembler
required_features = ['Glucose',
'BloodPressure',
'BMI',
'Age'
]
from pyspark.ml.feature import VectorAssembler
assembler = VectorAssembler(inputCols=required_features, outputCol='features')
transformed_data = assembler.transform(dataset)
# -
transformed_data.show()
# ### Machine learning Model Building
# Split the data
(training_data, test_data) = transformed_data.randomSplit([0.8,0.2])
# ### Random Forest Classifier
# +
# Define the model
from pyspark.ml.classification import RandomForestClassifier
rf = RandomForestClassifier(labelCol='Outcome',
featuresCol='features',
maxDepth=5)
# -
# Fit the model
model = rf.fit(training_data)
# Predict with the test dataset
rf_predictions = model.transform(test_data)
# +
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
multi_evaluator = MulticlassClassificationEvaluator(labelCol = 'Outcome', metricName = 'accuracy')
print('Random Forest classifier Accuracy:', multi_evaluator.evaluate(rf_predictions))
# -
# ### Decision Tree Classifier
#
# Decision trees are widely used since they are easy to interpret, handle categorical features, extend to the multiclass classification setting, do not require feature scaling, and are able to capture non-linearities and feature interactions.
# +
from pyspark.ml.classification import DecisionTreeClassifier
dt = DecisionTreeClassifier(featuresCol = 'features', labelCol = 'Outcome', maxDepth = 3)
dtModel = dt.fit(training_data)
dt_predictions = dtModel.transform(test_data)
dt_predictions.select('Glucose', 'BloodPressure', 'BMI', 'Age', 'Outcome').show(10)
# +
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
multi_evaluator = MulticlassClassificationEvaluator(labelCol = 'Outcome', metricName = 'accuracy')
print('Decision Tree Accuracy:', multi_evaluator.evaluate(dt_predictions))
# -
# ### Logistic Regression Model
# +
from pyspark.ml.classification import LogisticRegression
lr = LogisticRegression(featuresCol = 'features', labelCol = 'Outcome', maxIter=10)
lrModel = lr.fit(training_data)
lr_predictions = dtModel.transform(test_data)
# +
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
multi_evaluator = MulticlassClassificationEvaluator(labelCol = 'Outcome', metricName = 'accuracy')
print('Logistic Regression Accuracy:', multi_evaluator.evaluate(lr_predictions))
# -
# ### Gradient-boosted Tree classifier Model
from pyspark.ml.classification import GBTClassifier
gb = GBTClassifier(labelCol = 'Outcome', featuresCol = 'features')
gbModel = gb.fit(training_data)
gb_predictions = gbModel.transform(test_data)
print('Gradient-boosted Trees Accuracy:', multi_evaluator.evaluate(gb_predictions))
|
First PySpark ml model/First ML models using PySpark.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:mysql]
# language: python
# name: conda-env-mysql-py
# ---
# # Chapter 2. Redis Basics
#
# ## 2.1 Meet the Redis data types
#
# Redis can store data as five different data types:
#
# * Strings
# * List
# * Set
# * Hashes
# * Sorted set
#
# ## 2.2 Strings
#
# (1) Don't have to be text of strings.
#
# (2) Can be any kind of data, e.g., images, serialized objects, as long as <= 512MB.
#
# (3) String basic commands:
#
# * SET
# * GET
# * APPEND
# * INCR and DECR
# * GETRANGE
# * MGET
# * MSET
# * STRLEN
#
# ### 2.2.1 String Get and Set
#
# ```redis
# 127.0.0.1:6379> set user "name: joe"
# OK
# 127.0.0.1:6379> get user
# "name: joe"
# 127.0.0.1:6379> set user:1 "{'name' : 'joe', 'email' : '<EMAIL>'}"
# OK
# 127.0.0.1:6379> get user:1
# "{'name' : 'joe', 'email' : '<EMAIL>'}"
# ```
#
# ### 2.2.2 Incrementing
#
# ```redis
# 127.0.0.1:6379> set user:id 1
# OK
# 127.0.0.1:6379> get user:id
# "1"
# 127.0.0.1:6379> incr user:id
# (integer) 2
# 127.0.0.1:6379> get user:id
# "2"
# 127.0.0.1:6379> append user:1 "extra data"
# (integer) 51
# 127.0.0.1:6379> get user:1
# "{'name' : 'joe', 'email' : '<EMAIL>@joe.com'}extra data"
# ```
#
# ### 2.2.3 Getrange
#
# ```redis
# 127.0.0.1:6379> set customer:1 "ABCDE00123"
# OK
# 127.0.0.1:6379> get customer:1
# "ABCDE00123"
# 127.0.0.1:6379> getrange customer:1 5 9
# "00123"
# ```
#
# ### 2.2.4 Mget and Mset
#
# Get/Set a batch of data.
#
# * MGET = Multi-GET
# * MSET = Multi-SET
#
# ```redis
# 127.0.0.1:6379> mset order:1 "order 1 data" order:2 "order 2 data"
# OK
# 127.0.0.1:6379> mget order:1 order:2
# 1) "order 1 data"
# 2) "order 2 data"
# 127.0.0.1:6379> strlen user:1
# (integer) 51
# ```
#
# ## 2.3 Lists
#
# (1) A sorted list of strings, sorted by insertion order.
#
# (2) New items can be inserted either at the begining or at the end of the list.
#
# (3) List basic commands:
#
# * LPUSH and RPUSH
# * LREM
# * LSET
# * LINDEX
# * LRANGE
# * LLEN
# * LPOP and RPOP
# * LTRIM
#
# ### 2.3.1 Lpush and Rpush
#
# ```redis
# 127.0.0.1:6379> lpush recentcomments "Comment 1"
# (integer) 1
# 127.0.0.1:6379> lrange recentcomments 0 1
# 1) "Comment 1"
# 127.0.0.1:6379> lrange recentcomments 0 0
# 1) "Comment 1"
# 127.0.0.1:6379> lpush recentcomments "Comment 2"
# (integer) 2
# 127.0.0.1:6379> lpush recentcomments "Comment 3" "Comment 4"
# (integer) 4
# 127.0.0.1:6379> lrange recentcomments 0 3
# 1) "Comment 4"
# 2) "Comment 3"
# 3) "Comment 2"
# 4) "Comment 1"
# 127.0.0.1:6379> rpush recentcomments "Comment 5"
# (integer) 5
# 127.0.0.1:6379> rpush recentcomments "Comment 6"
# (integer) 6
# 127.0.0.1:6379> lrange recentcomments 3 0
# (empty list or set)
# 127.0.0.1:6379> lrange recentcomments 0 5
# 1) "Comment 4"
# 2) "Comment 3"
# 3) "Comment 2"
# 4) "Comment 1"
# 5) "Comment 5"
# 6) "Comment 6"
# ```
#
# ### 2.3.2 Trimming lists.
#
# ```redis
# 127.0.0.1:6379> ltrim recentcomments 0 4
# OK
# 127.0.0.1:6379> lrange recentcomments 0 5
# 1) "Comment 4"
# 2) "Comment 3"
# 3) "Comment 2"
# 4) "Comment 1"
# 5) "Comment 5"
# ```
#
# ### 2.3.3 Other list commands
#
# `LINDEX` can be used to get an item in the middle but it is not efficient in a huge linked list.
#
# ```redis
# 127.0.0.1:6379> lindex recentcomments 2
# "Comment 2"
# 127.0.0.1:6379> lrange recentcomments 0 5
# 1) "Comment 4"
# 2) "Comment 3"
# 3) "Comment 2"
# 4) "Comment 1"
# 5) "Comment 5"
# 127.0.0.1:6379> lpop recentcomments
# "Comment 4"
# 127.0.0.1:6379> lrange recentcomments 0 5
# 1) "Comment 3"
# 2) "Comment 2"
# 3) "Comment 1"
# 4) "Comment 5"
# 127.0.0.1:6379> lpop recentcomments
# "Comment 3"
# 127.0.0.1:6379> lrange recentcomments 0 5
# 1) "Comment 2"
# 2) "Comment 1"
# 3) "Comment 5"
# ```
#
# ## 2.4 Sets
#
# (1) A collection of unique strings.
#
# (2) Adding, removing or testing for the existence of a member can be done in constant time.
#
# (3) Sets don't keep the order of members as they are added.
#
# (4) Set basic commands:
#
# * SADD
# * SCARD
# * SDIFF, SINTER, and SUNION
# * SISMEMBER
# * SMEMBERS
# * SMOVE
# * SREM
#
# ### 2.4.1 Adding to sets
#
# ```redis
# 127.0.0.1:6379> sadd post:1:likes "joe" "bob" "mary"
# (integer) 3
# 127.0.0.1:6379> scard post:1:likes
# (integer) 3
# 127.0.0.1:6379> smembers post:1:likes
# 1) "joe"
# 2) "bob"
# 3) "mary"
# 127.0.0.1:6379> sadd post:2:likes "joe" "tim"
# (integer) 2
# 127.0.0.1:6379> sdiff post:1:likes post:2:likes
# 1) "bob"
# 2) "mary"
# 127.0.0.1:6379> sinter post:1:likes post:2:likes
# 1) "joe"
# 127.0.0.1:6379> sunion post:1:likes post:2:likes
# 1) "joe"
# 2) "tim"
# 3) "bob"
# 4) "mary"
# ```
#
# ## 2.5 Hashes
#
# (1) Maps between strings fields and string values.
#
# (2) If a hash contains no more than 100 fields, it is stored in a very efficient way.
#
# (3) Easier to get a particular field of the data, rather than getting the entire set of data.
#
# (4) Hash basic commands:
#
# * HSET
# * HMSET
# * HGET
# * HMGET
# * HGETALL
# * HDEL
# * HEXISTS
# * HINCRBY
# * HKEYS
# * HVALS
#
# ```redis
# 127.0.0.1:6379> hset user:1:h name "joe"
# (integer) 1
# 127.0.0.1:6379> hget user:1:h name
# "joe"
# 127.0.0.1:6379> hmset user:1:h email "<EMAIL>" id 1
# OK
# 127.0.0.1:6379> hmget user:1:h name email id
# 1) "joe"
# 2) "<EMAIL>"
# 3) "1"
# ```
#
# ### 2.5.1 Other hash commands
#
# ```redis
# 127.0.0.1:6379> hgetall user:1:h
# 1) "name"
# 2) "joe"
# 3) "email"
# 4) "<EMAIL>.com"
# 5) "id"
# 6) "1"
# 127.0.0.1:6379> hkeys user:1:h
# 1) "name"
# 2) "email"
# 3) "id"
# 127.0.0.1:6379> hvals user:1:h
# 1) "joe"
# 2) "<EMAIL>"
# 3) "1"
# ```
#
# ## 2.6 Sorted sets
#
# (1) Sets that are sorted.
#
# (2) Each member is associated with a score for sorting.
#
# (3) Adding, removing, and updating items are very fast, so is getting a subset of items within a range of scores.
#
# (4) Sorted set basic commands:
#
# * ZADD
# * ZCARD
# * ZCOUNT
# * ZINCRBY
# * ZRANGE
# * ZRANK
# * ZREM
# * ZSCORE
#
# ```redis
# 127.0.0.1:6379> zadd hs 120 "joe" 100 "bob" 150 "mary" 90 "tim"
# (integer) 4
# 127.0.0.1:6379> zrange hs 0 4
# 1) "tim"
# 2) "bob"
# 3) "joe"
# 4) "mary"
# 127.0.0.1:6379> zrange hs 0 3 WITHSCORES
# 1) "tim"
# 2) "90"
# 3) "bob"
# 4) "100"
# 5) "joe"
# 6) "120"
# 7) "mary"
# 8) "150"
# 127.0.0.1:6379> zadd hs 125 "joe"
# (integer) 0
# 127.0.0.1:6379> zrange hs 0 3 WITHSCORES
# 1) "tim"
# 2) "90"
# 3) "bob"
# 4) "100"
# 5) "joe"
# 6) "125"
# 7) "mary"
# 8) "150"
# ```
#
# ### 2.6.1 Other sorted set commands
#
# ```redis
# 127.0.0.1:6379> zrank hs bob
# (integer) 1
# 127.0.0.1:6379> zrank hs tim
# (integer) 0
# 127.0.0.1:6379> zscore hs tim
# "90"
# 127.0.0.1:6379> zcard hs
# (integer) 4
# 127.0.0.1:6379> zcount hs 90 120
# (integer) 2
# ```
#
# ## 2.7 Pub and sub
#
# Redis can be used as a message bus.
#
# ### 2.7.1 Using pub and sub
#
# (1) Subscribe to a message channel.
#
# ```redis
# 127.0.0.1:6379> subscribe greetings
# Reading messages... (press Ctrl-C to quit)
# 1) "subscribe"
# 2) "greetings"
# 3) (integer) 1
# ```
#
# (2) Subscribe to a message channel pattern.
#
# ```redis
# 127.0.0.1:6379> psubscribe greet*
# Reading messages... (press Ctrl-C to quit)
# 1) "psubscribe"
# 2) "greet*"
# 3) (integer) 1
# ```
#
# (3) Publish a message to a message channel.
#
# ```
# 127.0.0.1:6379> publish greetings "hello redis"
# (integer) 1
# ```
#
# ## 2.8 Transactions
#
# (1) Redis has limited support for transactions.
#
# (2) Group multiple Redis commands together and execute them as a single unit.
#
# (3) Redis transactions are atomic.
#
# (4) No rollback: if one command in a transaction fails during the execution of the transaction, the rest of the commands will still be executed.
#
# (5) But if an error occurs when you are queueing up the commands via multi for the transaction, then the transaction won't run.
#
# ### 2.8.1 Using transactions
#
# Launch two Redis clients RC1 and RC2. In RC1, we use Redis transactions to simulate a transfer from account a to account b. In RC2, we initialize the account balances and try interfering with the transfer.
#
# (1) Initialize the account balances.
#
# ```redis
# ## RC2
# 127.0.0.1:6379> set account-a 100
# OK
# 127.0.0.1:6379> set account-b 200
# OK
# ```
#
# (2) Start the transfer.
#
# ```redis
# ## RC1
# 127.0.0.1:6379> multi
# OK
# 127.0.0.1:6379> incrby account-a -50
# QUEUED
# 127.0.0.1:6379> incrby account-b 50
# QUEUED
# ```
#
# Note that the transaction isn't exeucted at this time.
#
# (3) Check the account balances and verify that the transfer hasn't occurred. Also deposit 300 into account a.
#
# ``` redis
# ## RC2
# 127.0.0.1:6379> get account-a
# "100"
# 127.0.0.1:6379> get account-b
# "200"
# 127.0.0.1:6379> incrby account-a 300
# (integer) 400
# ```
#
# (4) Execute the transfer transaction.
#
# ```redis
# ## RC1
# 127.0.0.1:6379> exec
# 1) (integer) 350
# 2) (integer) 250
# ```
#
# (5) Start another transfer but put a watch on acount a's balance.
#
# ```
# ## RC1
# 127.0.0.1:6379> watch account-a
# OK
# 127.0.0.1:6379> multi
# OK
# 127.0.0.1:6379> incrby account-a -50
# QUEUED
# 127.0.0.1:6379> incrby account-b +50
# QUEUED
# ```
#
# (6) Modify the account a's balance during the transfer.
#
# ```
# ## RC2
# 127.0.0.1:6379> set account-a 0
# OK
# 127.0.0.1:6379> set account-a 25
# OK
# ```
#
# (7) Execute the transfer transaction, but the transaction will NOT be executed due to the modification in (6).
#
# ```
# ## RC1
# 127.0.0.1:6379> exec
# (nil)
# ```
|
pluralsight-building-nosql-apps-with-redis/ch2.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import sys
import time
import os.path
from glob import glob
from datetime import datetime, timedelta
import h5py
import numpy as np
sys.path.insert(0, '/glade/u/home/ksha/WORKSPACE/utils/')
sys.path.insert(0, '/glade/u/home/ksha/WORKSPACE/Analog_BC/')
sys.path.insert(0, '/glade/u/home/ksha/WORKSPACE/Analog_BC/utils/')
import data_utils as du
import graph_utils as gu
from namelist import *
# +
# graph tools
import cmaps
import cartopy.crs as ccrs
import cartopy.mpl.geoaxes
import cartopy.feature as cfeature
from cartopy.io.shapereader import Reader
from cartopy.feature import ShapelyFeature
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
import matplotlib.pyplot as plt
import matplotlib.lines as mlines
import matplotlib.colors as colors
import matplotlib.patches as patches
from matplotlib.collections import PatchCollection
from matplotlib import ticker
import matplotlib.ticker as mticker
import matplotlib.gridspec as gridspec
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
# %matplotlib inline
# -
# +
need_publish = False
# True: publication quality figures
# False: low resolution figures in the notebook
if need_publish:
dpi_ = fig_keys['dpi']
else:
dpi_ = 75
# -
# # Data
# importing domain information
with h5py.File(save_dir+'BC_domain_info.hdf', 'r') as h5io:
base_lon = h5io['base_lon'][...]
base_lat = h5io['base_lat'][...]
bc_lon = h5io['bc_lon'][...]
bc_lat = h5io['bc_lat'][...]
etopo_bc = h5io['etopo_bc'][...]
land_mask = h5io['land_mask_base'][...]
land_mask_bc = h5io['land_mask_bc'][...]
# ## Datetime info
# +
mon_rain = np.array([9, 10, 11, 0, 1, 2])
mon_dry = np.array([3, 4, 5, 6, 7, 8])
base = datetime(2017, 1, 1)
date_list = [base + timedelta(days=x) for x in range(365+365+365)]
rain_inds = np.zeros((len(date_list),), dtype=bool)
dry_inds = np.zeros((len(date_list),), dtype=bool)
mon_inds = []
for d, date in enumerate(date_list):
mon_inds.append(date.month-1)
if date.month-1 in mon_dry:
dry_inds[d] = True
else:
rain_inds[d] = True
mon_inds = np.array(mon_inds)
#fcst_leads = np.arange(9, 72*3+3, 3, dtype=np.float)
fcst_leads = np.arange(3, 72*3+3, 3, dtype=np.float)
# date_base = datetime(2019, 1, 1, 0)
# UTC_H = []
# DAYS = []
# for lead in fcst_leads:
# date_temp = date_base + timedelta(hours=lead)
# UTC_H.append(date_temp.hour)
# DAYS.append(date_temp.day-1)
# -
# ## CRPS results
#
# *Bad value found on: lead=3, day=39, year=2019, all grid points*
# *(just a single day should be fine)*
# +
# def CRPS_per_mon(H15_base_CRPS, mon_inds):
# base_rain_mon = np.empty((12, 70))
# for mon in range(12):
# flag_pick = mon_inds == mon
# base_rain_mon[mon, :] = np.nanmean(H15_base_CRPS[flag_pick, ...], axis=(0, 2, 3))
# return base_rain_mon
# +
# with h5py.File(save_dir+'BASE_final_CRPS_2017.hdf', 'r') as h5io:
# BASE_CRPS_17 = h5io['CRPS'][...]
# with h5py.File(save_dir+'BASE_final_CRPS_2018.hdf', 'r') as h5io:
# BASE_CRPS_18 = h5io['CRPS'][...]
# with h5py.File(save_dir+'BASE_final_CRPS_2019.hdf', 'r') as h5io:
# BASE_CRPS_19 = h5io['CRPS'][...]
# with h5py.File(save_dir+'SL_final_CRPS_2017.hdf', 'r') as h5io:
# SL_CRPS_17 = h5io['CRPS'][...]
# with h5py.File(save_dir+'SL_final_CRPS_2018.hdf', 'r') as h5io:
# SL_CRPS_18 = h5io['CRPS'][...]
# with h5py.File(save_dir+'SL_final_CRPS_2019.hdf', 'r') as h5io:
# SL_CRPS_19 = h5io['CRPS'][...]
# with h5py.File(save_dir+'BASE_CNN_CRPS_2017.hdf', 'r') as h5io:
# BCNN_CRPS_17 = h5io['CRPS'][...]
# with h5py.File(save_dir+'BASE_CNN_CRPS_2018.hdf', 'r') as h5io:
# BCNN_CRPS_18 = h5io['CRPS'][...]
# with h5py.File(save_dir+'BASE_CNN_CRPS_2019.hdf', 'r') as h5io:
# BCNN_CRPS_19 = h5io['CRPS'][...]
# with h5py.File(save_dir+'SL_CNN_CRPS_2017.hdf', 'r') as h5io:
# SCNN_CRPS_17 = h5io['CRPS'][...]
# with h5py.File(save_dir+'SL_CNN_CRPS_2018.hdf', 'r') as h5io:
# SCNN_CRPS_18 = h5io['CRPS'][...]
# with h5py.File(save_dir+'SL_CNN_CRPS_2019.hdf', 'r') as h5io:
# SCNN_CRPS_19 = h5io['CRPS'][...]
# with h5py.File(save_dir+'GEFS_CRPS_2017.hdf', 'r') as h5io:
# GEFS_CRPS_17 = h5io['CRPS'][...]
# with h5py.File(save_dir+'GEFS_CRPS_2018.hdf', 'r') as h5io:
# GEFS_CRPS_18 = h5io['CRPS'][...]
# with h5py.File(save_dir+'GEFS_CRPS_2019.hdf', 'r') as h5io:
# GEFS_CRPS_19 = h5io['CRPS'][...]
# with h5py.File(save_dir+'CLIM_CRPS_2017.hdf', 'r') as h5io:
# CLIM_CRPS_17 = h5io['CRPS'][...]
# with h5py.File(save_dir+'CLIM_CRPS_2018.hdf', 'r') as h5io:
# CLIM_CRPS_18 = h5io['CRPS'][...]
# with h5py.File(save_dir+'CLIM_CRPS_2019.hdf', 'r') as h5io:
# CLIM_CRPS_19 = h5io['CRPS'][...]
# CLIM_CRPS = np.concatenate((CLIM_CRPS_17, CLIM_CRPS_18, CLIM_CRPS_19), axis=0)
# GEFS_CRPS = np.concatenate((GEFS_CRPS_17, GEFS_CRPS_18, GEFS_CRPS_19), axis=0)
# BASE_CRPS = np.concatenate((BASE_CRPS_17, BASE_CRPS_18, BASE_CRPS_19), axis=0)
# SL_CRPS = np.concatenate((SL_CRPS_17, SL_CRPS_18, SL_CRPS_19), axis=0)
# BCNN_CRPS = np.concatenate((BCNN_CRPS_17, BCNN_CRPS_18, BCNN_CRPS_19), axis=0)
# SCNN_CRPS = np.concatenate((SCNN_CRPS_17, SCNN_CRPS_18, SCNN_CRPS_19), axis=0)
# l0 = 6; l1 = 14
# clim_rain_plot = np.nanmean(CLIM_CRPS[rain_inds, l0:l1, ...], axis=(0, 1))
# base_rain_plot = np.nanmean(BASE_CRPS[rain_inds, l0:l1, ...], axis=(0, 1))
# bcnn_rain_plot = np.nanmean(BCNN_CRPS[rain_inds, l0:l1, ...], axis=(0, 1))
# sl_rain_plot = np.nanmean(SL_CRPS[rain_inds, l0:l1, ...], axis=(0, 1))
# scnn_rain_plot = np.nanmean(SCNN_CRPS[rain_inds, l0:l1, ...], axis=(0, 1))
# clim_dry_plot = np.nanmean(CLIM_CRPS[dry_inds, l0:l1, ...], axis=(0, 1))
# base_dry_plot = np.nanmean(BASE_CRPS[dry_inds, l0:l1, ...], axis=(0, 1))
# bcnn_dry_plot = np.nanmean(BCNN_CRPS[dry_inds, l0:l1, ...], axis=(0, 1))
# sl_dry_plot = np.nanmean(SL_CRPS[dry_inds, l0:l1, ...], axis=(0, 1))
# scnn_dry_plot = np.nanmean(SCNN_CRPS[dry_inds, l0:l1, ...], axis=(0, 1))
# CRPS_GRID = {}
# CRPS_GRID['base_rain'] = 1-base_rain_plot/clim_rain_plot
# CRPS_GRID['sl_rain'] = 1-sl_rain_plot/clim_rain_plot
# CRPS_GRID['bcnn_rain'] = 1-bcnn_rain_plot/clim_rain_plot
# CRPS_GRID['scnn_rain'] = 1-scnn_rain_plot/clim_rain_plot
# CRPS_GRID['base_dry'] = 1-base_dry_plot/clim_dry_plot
# CRPS_GRID['sl_dry'] = 1-sl_dry_plot/clim_dry_plot
# CRPS_GRID['bcnn_dry'] = 1-bcnn_dry_plot/clim_dry_plot
# CRPS_GRID['scnn_dry'] = 1-scnn_dry_plot/clim_dry_plot
# from scipy.stats import wilcoxon
# def wilcoxon_by_grids(FCST1, FCST2, CLIM):
# N_days, Nx, Ny = FCST1.shape
# w_stat = np.empty((Nx, Ny)); w_stat[...] = np.nan
# p_vals = np.empty((Nx, Ny)); p_vals[...] = np.nan
# grids1 = 1-FCST1/CLIM
# grids2 = 1-FCST2/CLIM
# for i in range(Nx):
# for j in range(Ny):
# w_, p_ = wilcoxon(grids1[:, i, j], grids2[:, i, j], alternative='greater')
# w_stat[i, j] = w_
# p_vals[i, j] = p_
# return w_stat, p_vals
# clim_rain_grids = np.nanmean(CLIM_CRPS[rain_inds, l0:l1, ...], axis=1)
# base_rain_grids = np.nanmean(BASE_CRPS[rain_inds, l0:l1, ...], axis=1)
# bcnn_rain_grids = np.nanmean(BCNN_CRPS[rain_inds, l0:l1, ...], axis=1)
# sl_rain_grids = np.nanmean(SL_CRPS[rain_inds, l0:l1, ...], axis=1)
# scnn_rain_grids = np.nanmean(SCNN_CRPS[rain_inds, l0:l1, ...], axis=1)
# clim_dry_grids = np.nanmean(CLIM_CRPS[dry_inds, l0:l1, ...], axis=1)
# base_dry_grids = np.nanmean(BASE_CRPS[dry_inds, l0:l1, ...], axis=1)
# bcnn_dry_grids = np.nanmean(BCNN_CRPS[dry_inds, l0:l1, ...], axis=1)
# sl_dry_grids = np.nanmean(SL_CRPS[dry_inds, l0:l1, ...], axis=1)
# scnn_dry_grids = np.nanmean(SCNN_CRPS[dry_inds, l0:l1, ...], axis=1)
# w_, p_ = wilcoxon_by_grids(bcnn_rain_grids, base_rain_grids, clim_rain_grids)
# CRPS_GRID['bcnn_base_rain_p'] = p_
# w_, p_ = wilcoxon_by_grids(scnn_rain_grids, sl_rain_grids, clim_rain_grids)
# CRPS_GRID['scnn_sl_rain_p'] = p_
# w_, p_ = wilcoxon_by_grids(sl_rain_grids, base_rain_grids, clim_rain_grids)
# CRPS_GRID['sl_base_rain_p'] = p_
# w_, p_ = wilcoxon_by_grids(scnn_rain_grids, bcnn_rain_grids, clim_rain_grids)
# CRPS_GRID['scnn_bcnn_rain_p'] = p_
# w_, p_ = wilcoxon_by_grids(bcnn_dry_grids, base_dry_grids, clim_dry_grids)
# CRPS_GRID['bcnn_base_dry_p'] = p_
# w_, p_ = wilcoxon_by_grids(scnn_dry_grids, sl_dry_grids, clim_dry_grids)
# CRPS_GRID['scnn_sl_dry_p'] = p_
# w_, p_ = wilcoxon_by_grids(sl_dry_grids, base_dry_grids, clim_dry_grids)
# CRPS_GRID['sl_base_dry_p'] = p_
# w_, p_ = wilcoxon_by_grids(scnn_dry_grids, bcnn_dry_grids, clim_dry_grids)
# CRPS_GRID['scnn_bcnn_dry_p'] = p_
# CLIM_CRPS_rain_ave = np.nanmean(CLIM_CRPS[rain_inds, ...], axis=(2, 3))
# BASE_CRPS_rain_ave = np.nanmean(BASE_CRPS[rain_inds, ...], axis=(2, 3))
# SL_CRPS_rain_ave = np.nanmean(SL_CRPS[rain_inds, ...], axis=(2, 3))
# BCNN_CRPS_rain_ave = np.nanmean(BCNN_CRPS[rain_inds, ...], axis=(2, 3))
# SCNN_CRPS_rain_ave = np.nanmean(SCNN_CRPS[rain_inds, ...], axis=(2, 3))
# CLIM_CRPS_dry_ave = np.nanmean(CLIM_CRPS[dry_inds, ...], axis=(2, 3))
# BASE_CRPS_dry_ave = np.nanmean(BASE_CRPS[dry_inds, ...], axis=(2, 3))
# SL_CRPS_dry_ave = np.nanmean(SL_CRPS[dry_inds, ...], axis=(2, 3))
# BCNN_CRPS_dry_ave = np.nanmean(BCNN_CRPS[dry_inds, ...], axis=(2, 3))
# SCNN_CRPS_dry_ave = np.nanmean(SCNN_CRPS[dry_inds, ...], axis=(2, 3))
# def wilcoxon_by_leads(FCST1, FCST2, CLIM):
# N, L = FCST1.shape
# w_stat = np.empty((L,))
# p_vals = np.empty((L,))
# series1 = 1-FCST1/CLIM
# series2 = 1-FCST2/CLIM
# for i in range(L):
# w_, p_ = wilcoxon(series1[:, i], series2[:, i], alternative='greater')
# w_stat[i] = w_
# p_vals[i] = p_
# return w_stat, p_vals
# pad_nan = np.array([np.nan, np.nan])
# CRPS_wilcox = {}
# w_stat, p_vals = wilcoxon_by_leads(SL_CRPS_rain_ave, BASE_CRPS_rain_ave, CLIM_CRPS_rain_ave)
# CRPS_wilcox['SL_BASE_rain'] = np.concatenate((pad_nan, p_vals), axis=0)
# w_stat, p_vals = wilcoxon_by_leads(SCNN_CRPS_rain_ave, BCNN_CRPS_rain_ave, CLIM_CRPS_rain_ave)
# CRPS_wilcox['SCNN_BCNN_rain'] = np.concatenate((pad_nan, p_vals), axis=0)
# w_stat, p_vals = wilcoxon_by_leads(SL_CRPS_dry_ave, BASE_CRPS_dry_ave, CLIM_CRPS_dry_ave)
# CRPS_wilcox['SL_BASE_dry'] = np.concatenate((pad_nan, p_vals), axis=0)
# w_stat, p_vals = wilcoxon_by_leads(SCNN_CRPS_dry_ave, BCNN_CRPS_dry_ave, CLIM_CRPS_dry_ave)
# CRPS_wilcox['SCNN_BCNN_dry'] = np.concatenate((pad_nan, p_vals), axis=0)
# CLIM_CRPS_rain = np.nanmean(CLIM_CRPS[rain_inds, :], axis=(0, 2, 3))
# GEFS_CRPS_rain = np.nanmean(GEFS_CRPS[rain_inds, :], axis=(0, 2, 3))
# BASE_CRPS_rain = np.nanmean(BASE_CRPS[rain_inds, :], axis=(0, 2, 3))
# SL_CRPS_rain = np.nanmean(SL_CRPS[rain_inds, :], axis=(0, 2, 3))
# BCNN_CRPS_rain = np.nanmean(BCNN_CRPS[rain_inds, :], axis=(0, 2, 3))
# SCNN_CRPS_rain = np.nanmean(SCNN_CRPS[rain_inds, :], axis=(0, 2, 3))
# CLIM_CRPS_dry = np.nanmean(CLIM_CRPS[dry_inds, :], axis=(0, 2, 3))
# GEFS_CRPS_dry = np.nanmean(GEFS_CRPS[dry_inds, :], axis=(0, 2, 3))
# BASE_CRPS_dry = np.nanmean(BASE_CRPS[dry_inds, :], axis=(0, 2, 3))
# SL_CRPS_dry = np.nanmean(SL_CRPS[dry_inds, :], axis=(0, 2, 3))
# BCNN_CRPS_dry = np.nanmean(BCNN_CRPS[dry_inds, :], axis=(0, 2, 3))
# SCNN_CRPS_dry = np.nanmean(SCNN_CRPS[dry_inds, :], axis=(0, 2, 3))
# CRPS_MEAN = {}
# CRPS_MEAN['clim_rain'] = np.concatenate((pad_nan, CLIM_CRPS_rain), axis=0)
# CRPS_MEAN['clim_dry'] = np.concatenate((pad_nan, CLIM_CRPS_dry), axis=0)
# CRPS_MEAN['gfs_rain'] = 1-np.concatenate((pad_nan, GEFS_CRPS_rain), axis=0)/CRPS_MEAN['clim_rain']
# CRPS_MEAN['base_rain'] = 1-np.concatenate((pad_nan, BASE_CRPS_rain), axis=0)/CRPS_MEAN['clim_rain']
# CRPS_MEAN['sl_rain'] = 1-np.concatenate((pad_nan, SL_CRPS_rain), axis=0)/CRPS_MEAN['clim_rain']
# CRPS_MEAN['bcnn_rain'] = 1-np.concatenate((pad_nan, BCNN_CRPS_rain), axis=0)/CRPS_MEAN['clim_rain']
# CRPS_MEAN['scnn_rain'] = 1-np.concatenate((pad_nan, SCNN_CRPS_rain), axis=0)/CRPS_MEAN['clim_rain']
# CRPS_MEAN['gfs_dry'] = 1-np.concatenate((pad_nan, GEFS_CRPS_dry), axis=0)/CRPS_MEAN['clim_dry']
# CRPS_MEAN['base_dry'] = 1-np.concatenate((pad_nan, BASE_CRPS_dry), axis=0)/CRPS_MEAN['clim_dry']
# CRPS_MEAN['sl_dry'] = 1-np.concatenate((pad_nan, SL_CRPS_dry), axis=0)/CRPS_MEAN['clim_dry']
# CRPS_MEAN['bcnn_dry'] = 1-np.concatenate((pad_nan, BCNN_CRPS_dry), axis=0)/CRPS_MEAN['clim_dry']
# CRPS_MEAN['scnn_dry'] = 1-np.concatenate((pad_nan, SCNN_CRPS_dry), axis=0)/CRPS_MEAN['clim_dry']
# np.save(save_dir+'CRPS_GRID.npy', CRPS_GRID)
# np.save(save_dir+'CRPS_wilcox.npy', CRPS_wilcox)
# np.save(save_dir+'CRPS_rain_dry.npy', CRPS_MEAN)
# -
CRPS_wilcox = np.load(save_dir+'CRPS_wilcox.npy', allow_pickle=True)[()]
CRPS_MEAN = np.load(save_dir+'CRPS_rain_dry.npy', allow_pickle=True)[()]
CRPS_GRID = np.load(save_dir+'CRPS_GRID.npy', allow_pickle=True)[()]
# ### MAE (replaced by CRPS)
# +
# with h5py.File(save_dir+'BASE_final_CRPS_2017.hdf', 'r') as h5io:
# BASE_MAE_17 = h5io['MAE'][...]
# with h5py.File(save_dir+'BASE_final_CRPS_2018.hdf', 'r') as h5io:
# BASE_MAE_18 = h5io['MAE'][...]
# with h5py.File(save_dir+'BASE_final_CRPS_2019.hdf', 'r') as h5io:
# BASE_MAE_19 = h5io['MAE'][...]
# with h5py.File(save_dir+'SL_final_CRPS_2017.hdf', 'r') as h5io:
# SL_MAE_17 = h5io['MAE'][...]
# with h5py.File(save_dir+'SL_final_CRPS_2018.hdf', 'r') as h5io:
# SL_MAE_18 = h5io['MAE'][...]
# with h5py.File(save_dir+'SL_final_CRPS_2019.hdf', 'r') as h5io:
# SL_MAE_19 = h5io['MAE'][...]
# with h5py.File(save_dir+'BASE_CNN_CRPS_2017.hdf', 'r') as h5io:
# BCNN_MAE_17 = h5io['MAE'][...]
# with h5py.File(save_dir+'BASE_CNN_CRPS_2018.hdf', 'r') as h5io:
# BCNN_MAE_18 = h5io['MAE'][...]
# with h5py.File(save_dir+'BASE_CNN_CRPS_2019.hdf', 'r') as h5io:
# BCNN_MAE_19 = h5io['MAE'][...]
# with h5py.File(save_dir+'SL_CNN_CRPS_2017.hdf', 'r') as h5io:
# SCNN_MAE_17 = h5io['MAE'][...]
# with h5py.File(save_dir+'SL_CNN_CRPS_2018.hdf', 'r') as h5io:
# SCNN_MAE_18 = h5io['MAE'][...]
# with h5py.File(save_dir+'SL_CNN_CRPS_2019.hdf', 'r') as h5io:
# SCNN_MAE_19 = h5io['MAE'][...]
# with h5py.File(save_dir+'GEFS_CRPS_2017.hdf', 'r') as h5io:
# GEFS_MAE_17 = h5io['MAE'][...]
# with h5py.File(save_dir+'GEFS_CRPS_2018.hdf', 'r') as h5io:
# GEFS_MAE_18 = h5io['MAE'][...]
# with h5py.File(save_dir+'GEFS_CRPS_2019.hdf', 'r') as h5io:
# GEFS_MAE_19 = h5io['MAE'][...]
# with h5py.File(save_dir+'GEFS_RAW_CRPS_2017.hdf', 'r') as h5io:
# GEFS_RAW_MAE_17 = h5io['MAE'][...]
# with h5py.File(save_dir+'GEFS_RAW_CRPS_2018.hdf', 'r') as h5io:
# GEFS_RAW_MAE_18 = h5io['MAE'][...]
# with h5py.File(save_dir+'GEFS_RAW_CRPS_2019.hdf', 'r') as h5io:
# GEFS_RAW_MAE_19 = h5io['MAE'][...]
# GEFS_RAW_MAE = np.concatenate((GEFS_RAW_MAE_17, GEFS_RAW_MAE_18, GEFS_RAW_MAE_19), axis=0)
# GEFS_MAE = np.concatenate((GEFS_MAE_17, GEFS_MAE_18, GEFS_MAE_19), axis=0)
# BASE_MAE = np.concatenate((BASE_MAE_17, BASE_MAE_18, BASE_MAE_19), axis=0)
# SL_MAE = np.concatenate((SL_MAE_17, SL_MAE_18, SL_MAE_19), axis=0)
# BCNN_MAE = np.concatenate((BCNN_MAE_17, BCNN_MAE_18, BCNN_MAE_19), axis=0)
# SCNN_MAE = np.concatenate((SCNN_MAE_17, SCNN_MAE_18, SCNN_MAE_19), axis=0)
# GEFS_RAW_MAE_rain = np.nanmean(GEFS_RAW_MAE[rain_inds, :], axis=(0, 2, 3))
# GEFS_MAE_rain = np.nanmean(GEFS_MAE[rain_inds, :], axis=(0, 2, 3))
# BASE_MAE_rain = np.nanmean(BASE_MAE[rain_inds, :], axis=(0, 2, 3))
# SL_MAE_rain = np.nanmean(SL_MAE[rain_inds, :], axis=(0, 2, 3))
# BCNN_MAE_rain = np.nanmean(BCNN_MAE[rain_inds, :], axis=(0, 2, 3))
# SCNN_MAE_rain = np.nanmean(SCNN_MAE[rain_inds, :], axis=(0, 2, 3))
# GEFS_RAW_MAE_dry = np.nanmean(GEFS_RAW_MAE[dry_inds, :], axis=(0, 2, 3))
# GEFS_MAE_dry = np.nanmean(GEFS_MAE[dry_inds, :], axis=(0, 2, 3))
# BASE_MAE_dry = np.nanmean(BASE_MAE[dry_inds, :], axis=(0, 2, 3))
# SL_MAE_dry = np.nanmean(SL_MAE[dry_inds, :], axis=(0, 2, 3))
# BCNN_MAE_dry = np.nanmean(BCNN_MAE[dry_inds, :], axis=(0, 2, 3))
# SCNN_MAE_dry = np.nanmean(SCNN_MAE[dry_inds, :], axis=(0, 2, 3))
# pad_nan = np.array([np.nan, np.nan])
# MAE_MEAN = {}
# MAE_MEAN['gfs_rain'] = np.concatenate((pad_nan, GEFS_MAE_rain), axis=0)
# MAE_MEAN['gfs_raw_rain'] = np.concatenate((pad_nan, GEFS_RAW_MAE_rain), axis=0)
# MAE_MEAN['base_rain'] = np.concatenate((pad_nan, BASE_MAE_rain), axis=0)
# MAE_MEAN['sl_rain'] = np.concatenate((pad_nan, SL_MAE_rain), axis=0)
# MAE_MEAN['bcnn_rain'] = np.concatenate((pad_nan, BCNN_MAE_rain), axis=0)
# MAE_MEAN['scnn_rain'] = np.concatenate((pad_nan, SCNN_MAE_rain), axis=0)
# MAE_MEAN['gfs_dry'] = np.concatenate((pad_nan, GEFS_MAE_dry), axis=0)
# MAE_MEAN['gfs_raw_dry'] = np.concatenate((pad_nan, GEFS_RAW_MAE_dry), axis=0)
# MAE_MEAN['base_dry'] = np.concatenate((pad_nan, BASE_MAE_dry), axis=0)
# MAE_MEAN['sl_dry'] = np.concatenate((pad_nan, SL_MAE_dry), axis=0)
# MAE_MEAN['bcnn_dry'] = np.concatenate((pad_nan, BCNN_MAE_dry), axis=0)
# MAE_MEAN['scnn_dry'] = np.concatenate((pad_nan, SCNN_MAE_dry), axis=0)
# np.save(save_dir+'MAE_rain_dry.npy', MAE_MEAN)
# MAE_MEAN = np.load(save_dir+'MAE_rain_dry.npy', allow_pickle=True)[()]
# -
# # Figure
# +
edge_bc = [-141, -113.25, 48.25, 60]
def aspc_cal(edge):
return (edge[3]-edge[2])/(edge[1]-edge[0])
r_bc = aspc_cal(edge_bc)
# -
cmap_pct, A = gu.precip_cmap()
# +
gray = [0.75, 0.75, 0.75]
KW = {}
KW['base'] = {'linestyle': '-', 'color': orange, 'linewidth':2.5}
KW['bcnn'] = {'linestyle': '--', 'color': red, 'linewidth':2.5}
KW['sl'] = {'linestyle': '-', 'color': cyan, 'linewidth':2.5}
KW['scnn'] = {'linestyle': '--', 'color': blue, 'linewidth':2.5}
KW['gfs'] = {'linestyle': '-', 'color': gray, 'linewidth':2.5}
KW['gfs_raw'] = {'linestyle': ':', 'color': gray, 'linewidth':1.5}
kw_bar = {'bottom':0.0, 'width': 3.0, 'color': gray, 'edgecolor': 'k', 'linestyle': '-', 'linewidth': 0}
kw_step = {'color': 'k', 'linestyle': '-', 'linewidth': 1.5, 'where':'mid'}
# -
fontsize = 13
# ## Line graph
# +
cates = ['rain', 'dry']
method_crps = ['base', 'bcnn', 'sl', 'scnn', 'gfs']
method_mae = ['base', 'bcnn', 'sl', 'scnn', 'gfs', 'gfs_raw']
fig = plt.figure(figsize=(13, 8), dpi=dpi_)
gs = gridspec.GridSpec(3, 5, height_ratios=[1, 0.6, 0.6], width_ratios=[1, 1, 0.1, 1, 1])
ax11 = plt.subplot(gs[0, 0])
ax12 = plt.subplot(gs[0, 1])
ax13 = plt.subplot(gs[1, 0])
ax14 = plt.subplot(gs[1, 1])
ax15 = plt.subplot(gs[2, 0])
ax16 = plt.subplot(gs[2, 1])
ax21 = plt.subplot(gs[0, 3])
ax22 = plt.subplot(gs[0, 4])
ax23 = plt.subplot(gs[1, 3])
ax24 = plt.subplot(gs[1, 4])
ax25 = plt.subplot(gs[2, 3])
ax26 = plt.subplot(gs[2, 4])
plt.subplots_adjust(0, 0, 1, 1, hspace=0, wspace=0)
AX_all = [ax11, ax12, ax13, ax14, ax15, ax16, ax21, ax22, ax23, ax24, ax25, ax26]
AX_crps = [ax11, ax12, ax21, ax22]
AX_diff_all = [ax13, ax14, ax15, ax16, ax23, ax24, ax25, ax26]
AX_diff_sub1 = [ax13, ax23]
AX_diff_sub2 = [ax14, ax24]
AX_diff_sub3 = [ax15, ax25]
AX_diff_sub4 = [ax16, ax26]
x_start1, y_start1 = 0.025, 0.975
x_start2, y_start2 = 0.025, 0.95
x_start3, y_start3 = 0.025, 0.95
handles = []
ax_t1 = fig.add_axes([0, 1.0, (2/4.1), 0.03])
ax_t1.set_axis_off()
handles.append(ax_t1.text(0.5, 1, 'Domain-averaged CRPSS in Oct-Mar, 2017-2019', ha='center', va='top',
fontsize=fontsize, transform=ax_t1.transAxes))
handles.append(ax_t1.text(0.5, 1, '[*]', ha='left', va='top', fontsize=9, transform=ax_t1.transAxes))
ax_t2 = fig.add_axes([2.1/4.1, 1.0, (2/4.1), 0.03])
ax_t2.set_axis_off()
handles.append(ax_t2.text(0.5, 1, 'Domain-averaged CRPSS in Apr-Sept, 2017-2019', ha='center', va='top',
fontsize=fontsize, transform=ax_t2.transAxes))
handles += gu.string_partial_format(fig, ax11, x_start1, y_start1, 'left', 'top',
['(a) CRPSS of ', 'noSL-SG', ','],
['k', orange, 'k'], [fontsize,]*3, ['normal', 'bold', 'normal'])
handles += gu.string_partial_format(fig, ax11, x_start1+0.1, y_start1-0.055, 'left', 'top',
['noSL-CNN', ', and quantile'], [red, 'k'], [fontsize,]*2, ['bold', 'normal'])
handles += gu.string_partial_format(fig, ax11, x_start1+0.1, y_start1-0.11, 'left', 'top',
['mapped ', 'GEFS', ' reforecast'], ['k', gray, 'k'], [fontsize,]*3, ['normal', 'bold', 'normal'])
handles += gu.string_partial_format(fig, ax12, x_start1, y_start1, 'left', 'top',
['(b) CRPSS of ', 'SL-SG', ', ', 'SL-CNN', ','],
['k', cyan, 'k', blue, 'k'], [fontsize,]*5, ['normal', 'bold', 'normal', 'bold', 'normal'])
handles += gu.string_partial_format(fig, ax12, x_start1+0.1, y_start1-0.055, 'left', 'top',
['and quantile mapped ', 'GEFS'], ['k', gray], [fontsize,]*2, ['normal', 'bold'])
handles += gu.string_partial_format(fig, ax12, x_start1+0.1, y_start1-0.11, 'left', 'top',
['reforecast'], ['k',], [fontsize,], ['normal',])
handles.append(ax21.text(x_start1, y_start1, '(c) Same as (a), but for Apr-Sept.', ha='left', va='top',
fontsize=fontsize, transform=ax21.transAxes, zorder=6))
handles.append(ax22.text(x_start1, y_start1, '(d) Same as (b), but for Apr-Sept.', ha='left', va='top',
fontsize=fontsize, transform=ax22.transAxes, zorder=6))
fontsize_list = [fontsize,]*5
fontweight_list = ['normal', 'bold', 'normal', 'bold', 'normal']
handles += gu.string_partial_format(fig, ax13, x_start2, y_start2, 'left', 'top',
['(e) ', 'noSL-CNN', ' minus ', 'noSL-SG'],
['k', red, 'k', orange, 'k'], fontsize_list, fontweight_list)
handles += gu.string_partial_format(fig, ax14, x_start2, y_start2, 'left', 'top',
['(f) ', 'SL-CNN', ' minus ', 'SL-SG'],
['k', blue, 'k', cyan, 'k'], fontsize_list, fontweight_list)
handles += gu.string_partial_format(fig, ax23, x_start2, y_start2, 'left', 'top',
['(g) ', 'noSL-CNN', ' minus ', 'noSL-SG'],
['k', red, 'k', orange, 'k'], fontsize_list, fontweight_list)
handles += gu.string_partial_format(fig, ax24, x_start2, y_start2, 'left', 'top',
['(h) ', 'SL-CNN', ' minus ', 'SL-SG'],
['k', blue, 'k', cyan, 'k'], fontsize_list, fontweight_list)
handles += gu.string_partial_format(fig, ax15, x_start3, y_start3, 'left', 'top',
['(i) ', 'SL-SG', ' minus ', 'noSL-SG'],
['k', cyan, 'k', orange, 'k'], fontsize_list, fontweight_list)
handles += gu.string_partial_format(fig, ax16, x_start3, y_start3, 'left', 'top',
['(j) ', 'SL-CNN', ' minus ', 'noSL-CNN'],
['k', blue, 'k', red, 'k'], fontsize_list, fontweight_list)
handles += gu.string_partial_format(fig, ax25, x_start3, y_start3, 'left', 'top',
['(k) ', 'SL-SG', ' minus ', 'noSL-SG'],
['k', cyan, 'k', orange, 'k'], fontsize_list, fontweight_list)
handles += gu.string_partial_format(fig, ax26, x_start3, y_start3, 'left', 'top',
['(l) ', 'SL-CNN', ' minus ', 'noSL-CNN'],
['k', blue, 'k', red, 'k'], fontsize_list, fontweight_list)
for ax in AX_all:
ax = gu.ax_decorate_box(ax)
ax.xaxis.set_tick_params(labelsize=fontsize)
ax.yaxis.set_tick_params(labelsize=fontsize)
ax.set_xlim([0, 168])
ax.set_xticks(np.arange(12, 168, 24))
ax.axhline(0, xmin=0, xmax=1.0, linewidth=1.5, linestyle='-', color='k', zorder=3)
for d in range(1, 8):
ax.text(d/7.0-1/14, 0.02, 'd-{}'.format(int(d-1)), ha='center', va='bottom',
fontsize=fontsize, transform=ax.transAxes, zorder=6)
for i, ax in enumerate(AX_crps):
ax.set_ylim([-0.4, 0.6])
ax.set_yticks([-0.3, -0.2, -0.1, 0.0, 0.1, 0.2, 0.3, 0.4, 0.5])
if i > 1:
for y in np.arange(-0.3, 0.6, 0.1):
if np.abs(y-0)>0.01:
ax.axhline(y, xmin=0, xmax=1.0, linewidth=1.5, linestyle=':', color='0.5', zorder=2)
for day in np.arange(24, 168+24, 24):
ax.axvline(day, ymin=0, ymax=0.9, linewidth=1.5, linestyle=':', color='0.5', zorder=2)
else:
for y in np.arange(-0.3, 0.5, 0.1):
if np.abs(y-0)>0.01:
ax.axhline(y, xmin=0, xmax=1.0, linewidth=1.5, linestyle=':', color='0.5', zorder=2)
for day in np.arange(24, 168+24, 24):
ax.axvline(day, ymin=0, ymax=0.8, linewidth=1.5, linestyle=':', color='0.5', zorder=2)
for ax in AX_diff_sub1 + AX_diff_sub2 + AX_diff_sub3 + AX_diff_sub4:
ax.set_ylim([-0.02, 0.1])
ax.set_yticks([0.0, 0.02, 0.04, 0.06, 0.08,])
for y in np.arange(0.02, 0.1, 0.02):
ax.axhline(y, xmin=0, xmax=1.0, linewidth=1.5, linestyle=':', color='0.5')
for day in np.arange(24, 168+24, 24):
ax.axvline(day, ymin=0, ymax=5/6, linewidth=1.5, linestyle=':', color='0.5', zorder=5)
# for ax in [ax15, ax16, ax25, ax26]:
# ax.tick_params(labelbottom=True)
# ax.set_xticklabels(['d-0', 'd-1', 'd-2', 'd-3', 'd-4', 'd-5', 'd-6',])
ax11.tick_params(labelleft=True)
ax13.tick_params(labelleft=True)
ax15.tick_params(labelleft=True)
for i, key in enumerate(['base', 'bcnn', 'gfs']):
AX_crps[0].plot(fcst_leads[:56], CRPS_MEAN['{}_rain'.format(key)], **KW[key], zorder=4)
AX_crps[2].plot(fcst_leads[:56], CRPS_MEAN['{}_dry'.format(key)], **KW[key], zorder=4)
for i, key in enumerate(['sl', 'scnn', 'gfs']):
AX_crps[1].plot(fcst_leads[:56], CRPS_MEAN['{}_rain'.format(key)], **KW[key], zorder=4)
AX_crps[3].plot(fcst_leads[:56], CRPS_MEAN['{}_dry'.format(key)], **KW[key], zorder=4)
for i, key in enumerate(['rain', 'dry']):
diff_ = CRPS_MEAN['bcnn_{}'.format(key)]-CRPS_MEAN['base_{}'.format(key)]
AX_diff_sub1[i].bar(fcst_leads[:56], diff_, **kw_bar, zorder=2)
AX_diff_sub1[i].step(fcst_leads[:56], diff_, **kw_step, zorder=4)
AX_diff_sub1[i].vlines(fcst_leads[2]-1.5, ymin=0.0, ymax=diff_[2], color='k', linestyle='-', linewidth=1.5, zorder=5)
diff_ = CRPS_MEAN['scnn_{}'.format(key)]-CRPS_MEAN['sl_{}'.format(key)]
AX_diff_sub2[i].bar(fcst_leads[:56], diff_, **kw_bar, zorder=2)
AX_diff_sub2[i].step(fcst_leads[:56], diff_, **kw_step, zorder=4)
AX_diff_sub2[i].vlines(fcst_leads[2]-1.5, ymin=0.0, ymax=diff_[2], color='k', linestyle='-', linewidth=1.5, zorder=5)
diff_ = np.copy(CRPS_MEAN['sl_{}'.format(key)]-CRPS_MEAN['base_{}'.format(key)])
AX_diff_sub3[i].step(fcst_leads[:56], diff_, **kw_step, zorder=4)
AX_diff_sub3[i].vlines(fcst_leads[2]-1.5, ymin=0.0, ymax=diff_[2], color='k', linestyle='-', linewidth=1.5, zorder=5)
diff_[CRPS_wilcox['SL_BASE_{}'.format(key)] > 0.01] = np.nan
AX_diff_sub3[i].bar(fcst_leads[:56], diff_, **kw_bar, zorder=2)
diff_ = np.copy(CRPS_MEAN['scnn_{}'.format(key)]-CRPS_MEAN['bcnn_{}'.format(key)])
AX_diff_sub4[i].step(fcst_leads[:56], diff_, **kw_step, zorder=4)
AX_diff_sub4[i].vlines(fcst_leads[2]-1.5, ymin=0.0, ymax=diff_[2], color='k', linestyle='-', linewidth=1.5, zorder=5)
diff_[CRPS_wilcox['SCNN_BCNN_{}'.format(key)] > 0.01] = np.nan
AX_diff_sub4[i].bar(fcst_leads[:56], diff_, **kw_bar, zorder=2)
ax11.axvline(fcst_leads[2]-0.75, ymin=0.5, ymax=0.65, color='k', linestyle='-', linewidth=1.5, zorder=5)
handles.append(ax11.text(0.025, 0.475, '+9 hr', ha='left', va='top', fontsize=fontsize, transform=ax11.transAxes))
for handle in handles:
handle.set_bbox(dict(facecolor='w', pad=0, edgecolor='none', zorder=6))
ax_w = fig.add_axes([0.0, -0.075, 0.275, 0.05])
ax_w.set_axis_off()
ax_w.text(0, 1, '* CRP Skill Score (CRPSS) is calculated\n based on the 2000-2014 ERA5.',
ha='left', va='top', fontsize=fontsize, transform=ax_w.transAxes);
label_ = [' ',
' ',
' ',
' ',
' ']
handle_lines = []
handle_lines.append(mlines.Line2D([], [], label=label_[0], **KW['bcnn']))
handle_lines.append(mlines.Line2D([], [], label=label_[1], **KW['base']))
handle_lines.append(mlines.Line2D([], [], label=label_[2], **KW['scnn']))
handle_lines.append(mlines.Line2D([], [], label=label_[3], **KW['sl']))
handle_lines.append(mlines.Line2D([], [], label=label_[4], **KW['gfs']))
ax_lg1 = fig.add_axes([1.2/4.1, -0.075, 2.9/4.1, 0.05])
ax_lg1.set_axis_off()
LG1 = ax_lg1.legend(handles=handle_lines, bbox_to_anchor=(1, 0.75), ncol=5, loc=7, prop={'size':fontsize}, fancybox=False);
LG1.get_frame().set_facecolor('none')
LG1.get_frame().set_linewidth(0)
LG1.get_frame().set_alpha(1.0)
ax_lg1.text(0.04, 1, 'noSL-CNN', ha='left', va='top', fontsize=fontsize, fontweight='bold',
color=KW['bcnn']['color'], transform=ax_lg1.transAxes)
ax_lg1.text(0.04+0.175, 1, 'noSL-SG', ha='left', va='top', fontsize=fontsize, fontweight='bold',
color=KW['base']['color'], transform=ax_lg1.transAxes)
ax_lg1.text(0.04+0.325, 1, 'SL-CNN', ha='left', va='top', fontsize=fontsize, fontweight='bold',
color=KW['scnn']['color'], transform=ax_lg1.transAxes)
ax_lg1.text(0.04+0.485, 1, 'SL-SG', ha='left', va='top', fontsize=fontsize, fontweight='bold',
color=KW['sl']['color'], transform=ax_lg1.transAxes)
gu.string_partial_format(fig, ax_lg1, 0.06+0.6, 1, 'left', 'top', ['quantile mapped ', 'GEFS', ' reforecast'],
['k', gray, 'k'], [fontsize,]*3, ['normal', 'bold', 'normal']);
handle_legneds = []
handle_legneds.append(patches.Patch(facecolor='none', edgecolor='k', linewidth=1.5,
label='CRPSS difference'))
handle_legneds.append(patches.Patch(facecolor=gray, edgecolor='none',
label='One-sided Wilcoxon signed-rank test, p < 0.01'))
ax_lg2 = fig.add_axes([1.8/4.1, -0.075-0.01, 2.3/4.1, 0.03])
ax_lg2.set_axis_off()
LG2 = ax_lg2.legend(handles=handle_legneds, bbox_to_anchor=(0.0, 0.5), ncol=2, loc=6,
prop={'size':fontsize}, fancybox=False);
LG2.get_frame().set_facecolor('none')
LG2.get_frame().set_linewidth(0)
LG2.get_frame().set_alpha(1.0)
if need_publish:
# Save figure
fig.savefig(fig_dir+'AnEn_ERA_CRPS.png', format='png', **fig_keys)
# -
# ## Maps
# +
def aspc_cal(edge):
return (edge[3]-edge[2])/(edge[1]-edge[0])
edge_bc = [-141-2, -113.25, 48.25, 60]
r_bc = aspc_cal(edge_bc)
# -
# Cartopy map settings
scale_param = '50m' # 10m for publication quality
# US states and CAN-US boundary
PROVINCE = cfeature.NaturalEarthFeature(
category='cultural',
name='admin_1_states_provinces_lines',
scale=scale_param,
facecolor='none')
cmap_diff = cmap_pct
plt.rcParams['hatch.color'] = 'k'
fontsize = 14
# +
VLIM = [0.0, 0.1]
fig = plt.figure(figsize=(13, 4.0*(1/2.05)*13*r_bc), dpi=dpi_)
gs = gridspec.GridSpec(4, 3, height_ratios=[1, 1, 1, 1], width_ratios=[1, 0.05, 1])
ax1 = plt.subplot(gs[0, 0], projection=ccrs.PlateCarree())
ax2 = plt.subplot(gs[1, 0], projection=ccrs.PlateCarree())
ax3 = plt.subplot(gs[2, 0], projection=ccrs.PlateCarree())
ax4 = plt.subplot(gs[3, 0], projection=ccrs.PlateCarree())
ax5 = plt.subplot(gs[0, 2], projection=ccrs.PlateCarree())
ax6 = plt.subplot(gs[1, 2], projection=ccrs.PlateCarree())
ax7 = plt.subplot(gs[2, 2], projection=ccrs.PlateCarree())
ax8 = plt.subplot(gs[3, 2], projection=ccrs.PlateCarree())
plt.subplots_adjust(0, 0, 1, 1, hspace=0, wspace=0)
AX_maps = [ax1, ax2, ax3, ax4, ax5, ax6, ax7, ax8]
handles = []
fontsize_list = [fontsize,]*5
fontweight_list = ['normal', 'bold', 'normal', 'bold', 'normal']
x_start2, y_start2 = 0.02, 0.175
x_start3, y_start3 = 0.02, 0.175
handles += gu.string_partial_format(fig, ax1, x_start2, y_start2, 'left', 'top',
['(a) ', 'noSL-CNN', ' minus ', 'noSL-SG'],
['k', red, 'k', orange, 'k'], fontsize_list, fontweight_list)
handles.append(ax1.text(x_start2, x_start2, ' Oct-Mar, 2017-2019',
ha='left', va='bottom', fontsize=fontsize, transform=ax1.transAxes))
handles += gu.string_partial_format(fig, ax2, x_start2, y_start2, 'left', 'top',
['(b) ', 'SL-CNN', ' minus ', 'SL-SG'],
['k', blue, 'k', cyan, 'k'], fontsize_list, fontweight_list)
handles.append(ax2.text(x_start2, x_start2, ' Oct-Mar, 2017-2019',
ha='left', va='bottom', fontsize=fontsize, transform=ax2.transAxes))
handles += gu.string_partial_format(fig, ax3, x_start3, y_start3, 'left', 'top',
['(c) ', 'SL-SG', ' minus ', 'noSL-SG'],
['k', cyan, 'k', orange, 'k'], fontsize_list, fontweight_list)
handles.append(ax3.text(x_start2, x_start2, ' Oct-Mar, 2017-2019',
ha='left', va='bottom', fontsize=fontsize, transform=ax3.transAxes))
handles += gu.string_partial_format(fig, ax4, x_start3, y_start3, 'left', 'top',
['(d) ', 'SL-CNN', ' minus ', 'noSL-CNN'],
['k', blue, 'k', red, 'k'], fontsize_list, fontweight_list)
handles.append(ax4.text(x_start2, x_start2, ' Oct-Mar, 2017-2019',
ha='left', va='bottom', fontsize=fontsize, transform=ax4.transAxes))
handles += gu.string_partial_format(fig, ax5, x_start2, y_start2, 'left', 'top',
['(e) ', 'noSL-CNN', ' minus ', 'noSL-SG'],
['k', red, 'k', orange, 'k'], fontsize_list, fontweight_list)
handles.append(ax5.text(x_start2, x_start2, ' Apr-Sept, 2017-2019',
ha='left', va='bottom', fontsize=fontsize, transform=ax5.transAxes))
handles += gu.string_partial_format(fig, ax6, x_start2, y_start2, 'left', 'top',
['(f) ', 'SL-CNN', ' minus ', 'SL-SG'],
['k', blue, 'k', cyan, 'k'], fontsize_list, fontweight_list)
handles.append(ax6.text(x_start2, x_start2, ' Apr-Sept, 2017-2019',
ha='left', va='bottom', fontsize=fontsize, transform=ax6.transAxes))
handles += gu.string_partial_format(fig, ax7, x_start3, y_start3, 'left', 'top',
['(g) ', 'SL-SG', ' minus ', 'noSL-SG'],
['k', cyan, 'k', orange, 'k'], fontsize_list, fontweight_list)
handles.append(ax7.text(x_start2, x_start2, ' Apr-Sept, 2017-2019',
ha='left', va='bottom', fontsize=fontsize, transform=ax7.transAxes))
handles += gu.string_partial_format(fig, ax8, x_start3, y_start3, 'left', 'top',
['(h) ', 'SL-CNN', ' minus ', 'noSL-CNN'],
['k', blue, 'k', red, 'k'], fontsize_list, fontweight_list)
handles.append(ax8.text(x_start2, x_start2, ' Apr-Sept, 2017-2019',
ha='left', va='bottom', fontsize=fontsize, transform=ax8.transAxes))
# ax_t1 = fig.add_axes([0, 1.0, (1/2.1), 0.045])
# ax_t1.set_axis_off()
# handles.append(ax_t1.text(0.5, 1, 'The CRPSS differences in day-1 forecasts', ha='center', va='top',
# fontsize=fontsize, transform=ax_t1.transAxes))
# ax_t2 = fig.add_axes([1.1/2.1, 1.0, (1/2.1), 0.045])
# ax_t2.set_axis_off()
# handles.append(ax_t2.text(0.5, 1, 'The CRPSS differences in day-1 forecasts', ha='center', va='top',
# fontsize=fontsize, transform=ax_t2.transAxes))
for n, ax in enumerate(AX_maps):
ax.set_extent(edge_bc, ccrs.PlateCarree())
ax.add_feature(cfeature.COASTLINE.with_scale(scale_param), edgecolor='k', linewidth=1.0)
ax.add_feature(cfeature.BORDERS.with_scale(scale_param), linestyle='--', linewidth=2.5)
ax.add_feature(PROVINCE, edgecolor='k', linestyle=':', linewidth=2.5)
ax.spines['geo'].set_linewidth(2.5)
keys_p = ['bcnn_base_rain_p', 'scnn_sl_rain_p', 'sl_base_rain_p', '<KEY>',
'bcnn_base_dry_p', 'scnn_sl_dry_p', 'sl_base_dry_p', 'scnn_bcnn_dry_p']
keys1 = ['<KEY>', 'scnn_rain', 'sl_rain', 'scnn_rain',
'bcnn_dry', 'scnn_dry', 'sl_dry', 'scnn_dry']
keys2 = ['base_rain', 'sl_rain', 'base_rain', '<KEY>',
'base_dry', 'sl_dry', 'base_dry', 'bcnn_dry']
for i in range(8):
diff_ = np.copy(CRPS_GRID[keys1[i]]-CRPS_GRID[keys2[i]])
flag_ = np.copy(CRPS_GRID[keys_p[i]]>0.01)
diff_[flag_] = 0.0; diff_[land_mask_bc] = np.nan
flag_[land_mask_bc] = False
CS = AX_maps[i].pcolormesh(bc_lon, bc_lat, diff_, vmin=VLIM[0], vmax=VLIM[1], cmap=cmap_diff)
AX_maps[i].contourf(bc_lon, bc_lat, flag_, np.array([0.5, 1.5]), hatches=['///', 'none'], colors=('none',))
for ax in [ax1, ax2, ax5, ax6]:
# Alaska
ax.text(0.085, 0.8, 'Southeast\nAlaska', fontsize=14, ha='center', va='center', transform=ax.transAxes)
ax.arrow(0.18, 0.8, 0.02, 0.0, head_width=0.05, head_length=0.025,
linewidth=2.5, fc='k', ec='k', transform=ax.transAxes)
# N & C coast
ax.text(0.13, 0.52, 'North & Central\nBC Coast', fontsize=14, ha='center', va='center', transform=ax.transAxes)
ax.arrow(0.27, 0.52, 0.2, 0.0, head_width=0.05, head_length=0.025,
linewidth=2.5, fc='k', ec='k', transform=ax.transAxes)
# Fraser
ax.text(0.9, 0.5, 'Fraser River\nplateau', fontsize=14, ha='center', va='center', transform=ax.transAxes)
ax.arrow(0.79, 0.5, -0.12, 0, head_width=0.05, head_length=0.025,
linewidth=2.5, fc='k', ec='k', transform=ax.transAxes)
for ax in [ax1, ax2, ax3, ax4]:
# S-C
ax.text(0.4, 0.28, 'South Coast', fontsize=14, ha='right', va='center', transform=ax.transAxes)
ax.arrow(0.41, 0.26, 0.18, 0.0, head_width=0.05, head_length=0.025,
linewidth=2.5, fc='k', ec='k', transform=ax.transAxes)
for ax in [ax3, ax4]:
# Peace
ax.text(0.81, 0.73, 'Peace River', fontsize=14, ha='left', va='center', transform=ax.transAxes)
ax.arrow(0.79, 0.73, -0.16, 0.0, head_width=0.05, head_length=0.025,
linewidth=2.5, fc='k', ec='k', transform=ax.transAxes)
for ax in [ax5, ax6]:
# Northeast
ax.text(0.89, 0.8, 'Northeast BC', fontsize=14, ha='center', va='center', transform=ax.transAxes)
ax.arrow(0.82, 0.88, -0.07, 0, head_width=0.05, head_length=0.025,
linewidth=2.5, fc='k', ec='k', transform=ax.transAxes)
for ax in [ax3, ax4]:
# C-K
ax.text(0.9, 0.45, 'Columbia-\nKootenay', fontsize=14, ha='center', va='bottom', transform=ax.transAxes)
ax.arrow(0.89, 0.43, -0.04, -0.12, head_width=0.03, head_length=0.04,
linewidth=2.5, fc='k', ec='k', transform=ax.transAxes)
handle_legneds = []
handle_legneds.append(patches.Patch(facecolor='none', edgecolor='k', hatch='///',
label='One-sided Wilcoxon signed-rank test, p > 0.01'))
ax_lg1 = fig.add_axes([0, -0.04, 1/2.1-0.065, 0.025])
ax_lg1.set_axis_off()
LG2 = ax_lg1.legend(handles=handle_legneds, bbox_to_anchor=(1.0, 0.5), ncol=1, loc=7, prop={'size':fontsize}, fancybox=False);
LG2.get_frame().set_facecolor('none')
LG2.get_frame().set_linewidth(0)
LG2.get_frame().set_alpha(1.0)
ax_lg2 = fig.add_axes([0, -0.065, 1/2.1-0.065, 0.025])
ax_lg2.set_axis_off()
handles.append(ax_lg2.text(0.1175, 1.0, '(i.e., statistically insignificant CRPSS difference)', ha='left', va='top',
fontsize=fontsize, transform=ax_lg2.transAxes))
ax_base = fig.add_axes([1.1/2.1, -0.065, 1.0/2.1, 0.05])
[j.set_linewidth(0) for j in ax_base.spines.values()]
ax_base.tick_params(axis='both', left=False, top=False, right=False, bottom=False, \
labelleft=False, labeltop=False, labelright=False, labelbottom=False)
cax = inset_axes(ax_base, height='27.5%', width='100%', borderpad=0, loc=2)
CBar = plt.colorbar(CS, orientation='horizontal', extend='both', ticks=[0.0, 0.02, 0.04, 0.06, 0.08, 0.10], cax=cax) #, ,
CBar.ax.tick_params(axis='x', labelsize=14, direction='in', length=0)
CBar.set_label('CRPSS difference, 0.0067 per color scale', fontsize=14)
CBar.outline.set_linewidth(2.5)
if need_publish:
# Save figure
fig.savefig(fig_dir+'AnEn_ERA_CRPS_GRID.png', format='png', **fig_keys)
# -
# # Figure (old)
# +
# edge_bc = [-141, -113.25, 48.25, 60]
# r_bc = aspc_cal(edge_bc)
# blue = A[3, :]; cyan = A[2, :]; green = A[7, :]; red = A[-3, :]; gray = [0.5, 0.5, 0.5]
# k = 'k'
# C = [cyan, blue, green]
# S = ['-', '--', '-']
# VLIM = [0.1, 0.4]
# fig = plt.figure(figsize=(13, (3.1/2)*13*(1/2)*r_bc*2))
# gs = gridspec.GridSpec(4, 20, height_ratios=[1, 0.1, 1, 1], width_ratios=[0.5]+[1]*18+[0.5])
# ax5 = plt.subplot(gs[0, 1:10])
# ax6 = plt.subplot(gs[0, 10:-1])
# ax1 = plt.subplot(gs[2, :10], projection=ccrs.PlateCarree())
# ax2 = plt.subplot(gs[2, 10:], projection=ccrs.PlateCarree())
# ax3 = plt.subplot(gs[3, :10], projection=ccrs.PlateCarree())
# ax4 = plt.subplot(gs[3, 10:], projection=ccrs.PlateCarree())
# plt.subplots_adjust(0, 0, 1, 1, hspace=0, wspace=0)
# AX_lines = [ax5, ax6]
# AX_maps = [ax1, ax2, ax3, ax4]
# handle_text = []
# title_lines = ['(a) CRPSS comparisons\n Averaged in Oct-Mar, 2017-2019',
# '(b) Same as in (a), but for\n Apr-Sept, 2017-2019']
# title_maps = ['(c) SL-SG, averaged from\n day-1 fcst, Oct-Mar, 2017-2019',
# '(d) Same as in (c),\n but for Apr-Sept',
# '(e) SL-CNN, averaged from\n day-1 fcst, Oct-Mar, 2017-2019',
# '(f) Same as in (e),\n but for Apr-Sept']
# labels = ['noSL-SG', 'SL-SG', 'SL-CNN (ours)']
# for n, ax in enumerate(AX_maps):
# ax.set_extent(edge_bc, ccrs.PlateCarree())
# ax.add_feature(cfeature.COASTLINE.with_scale(scale_param), edgecolor='k', linewidth=1.5)
# ax.add_feature(cfeature.BORDERS.with_scale(scale_param), linestyle='--', linewidth=2.5)
# ax.add_feature(PROVINCE, edgecolor='k', linestyle=':', linewidth=2.5)
# ax.spines['geo'].set_linewidth(2.5)
# ax.text(0.01, 0.025, title_maps[n], ha='left', va='bottom', fontsize=14, transform=ax.transAxes, zorder=5)
# xlocs = [0.4, 0.5]
# ylocs = [0.97, 0.97]
# for i, ax in enumerate(AX_lines):
# ax = gu.ax_decorate(ax, left_flag=True, bottom_flag=True)
# ax.grid(False)
# ax.xaxis.set_tick_params(labelsize=14)
# ax.yaxis.set_tick_params(labelsize=14)
# [j.set_linewidth(2.5) for j in ax.spines.values()]
# ax.tick_params(axis="both", which="both",
# bottom=False, top=False, left=False, right=False,
# labelbottom=False, labelleft=True)
# # ------------------- #
# # ticks and day-1 to day-7 labels
# ax.set_ylim([0.025, 0.425])
# ax.set_yticks([0.1, 0.2, 0.3, 0.4])
# ax.set_xlim([0, 168])
# for d in range(1, 8):
# ax.text(d/7.0-1/14, 0, 'day-{}'.format(int(d-1)), ha='center', va='bottom',
# fontsize=14, transform=ax.transAxes )
# # ------------------- #
# # titles
# handle_text.append(ax.text(xlocs[i], ylocs[i], title_lines[i], ha='left', va='top',
# fontsize=14, transform=ax.transAxes, zorder=2))
# # ------------------- #
# # grid lines by hand
# ax5.axhline(0.1, xmin=0, xmax=1.0, linewidth=1.5, linestyle=':', color='0.5')
# ax5.axhline(0.2, xmin=0, xmax=1.0, linewidth=1.5, linestyle=':', color='0.5')
# ax5.axhline(0.3, xmin=0, xmax=3/7, linewidth=1.5, linestyle=':', color='0.5')
# ax5.axhline(0.4, xmin=0, xmax=3/7, linewidth=1.5, linestyle=':', color='0.5')
# ax6.axhline(0.1, xmin=0, xmax=1.0, linewidth=1.5, linestyle=':', color='0.5')
# ax6.axhline(0.2, xmin=0, xmax=1.0, linewidth=1.5, linestyle=':', color='0.5')
# ax6.axhline(0.3, xmin=0, xmax=1.0, linewidth=1.5, linestyle=':', color='0.5')
# ax6.axhline(0.4, xmin=0, xmax=1.0, linewidth=1.5, linestyle=':', color='0.5')
# for day in np.arange(24, 168+24, 24):
# if day == 72:
# ax5.axvline(day, ymin=0, ymax=0.755, linewidth=1.5, linestyle=':', color='0.5')
# ax6.axvline(day, ymin=0, ymax=1.0, linewidth=1.5, linestyle=':', color='0.5')
# elif day >= 96:
# ax5.axvline(day, ymin=0, ymax=0.5, linewidth=1.5, linestyle=':', color='0.5')
# ax6.axvline(day, ymin=0, ymax=1.0, linewidth=1.5, linestyle=':', color='0.5')
# else:
# ax5.axvline(day, ymin=0, ymax=1.0, linewidth=1.5, linestyle=':', color='0.5')
# ax6.axvline(day, ymin=0, ymax=1.0, linewidth=1.5, linestyle=':', color='0.5')
# # ------------------- #
# handle_sym = ax5.text(0.562, 1, '[*]', ha='left', va='top', fontsize=12, transform=ax5.transAxes, zorder=5)
# for handle in handle_text:
# handle.set_bbox(dict(facecolor='w', edgecolor='none', zorder=2))
# ax6.spines["right"].set_visible(True)
# ax6.tick_params(labelleft=False)
# ax6.tick_params(axis="both", which="both", labelleft=False)
# CS = ax1.pcolormesh(bc_lon, bc_lat, 1-sl_rain_plot/gfs_rain_plot, vmin=VLIM[0], vmax=VLIM[1], cmap=cmap_pct)
# ax3.pcolormesh(bc_lon, bc_lat, 1-cnn_rain_plot/gfs_rain_plot, vmin=VLIM[0], vmax=VLIM[1], cmap=cmap_pct)
# ax2.pcolormesh(bc_lon, bc_lat, 1-sl_dry_plot/gfs_dry_plot, vmin=VLIM[0], vmax=VLIM[1], cmap=cmap_pct)
# ax4.pcolormesh(bc_lon, bc_lat, 1-cnn_dry_plot/gfs_dry_plot, vmin=VLIM[0], vmax=VLIM[1], cmap=cmap_pct)
# for ax in [ax2, ax4]:
# ax.text(0.81, 0.7, 'Peace River', fontsize=14, ha='left', va='center', transform=ax.transAxes)
# ax.arrow(0.8, 0.7, -0.18, 0.0, head_width=0.05, head_length=0.025,
# linewidth=2.5, fc='k', ec='k', transform=ax.transAxes)
# ax.text(0.9, 0.45, 'Southern\nInterior', fontsize=14, ha='center', va='bottom', transform=ax.transAxes)
# ax.arrow(0.83, 0.49, -0.04, -0.12, head_width=0.03, head_length=0.04,
# linewidth=2.5, fc='k', ec='k', transform=ax.transAxes)
# for ax in [ax1, ax3]:
# ax.text(0.13, 0.53, 'North & Central\nBC Coast', fontsize=14, ha='center', va='center', transform=ax.transAxes)
# ax.arrow(0.27, 0.53, 0.17, 0.0, head_width=0.05, head_length=0.025,
# linewidth=2.5, fc='k', ec='k', transform=ax.transAxes)
# ax.text(0.4, 0.28, 'South Coast', fontsize=14, ha='right', va='center', transform=ax.transAxes)
# ax.arrow(0.41, 0.26, 0.16, 0.0, head_width=0.05, head_length=0.025,
# linewidth=2.5, fc='k', ec='k', transform=ax.transAxes)
# LINES = []
# methods = ['base', 'sl', 'cnn']
# AX_cate = [ax5, ax6]
# for i, cate in enumerate(cates):
# for j, method in enumerate(methods):
# LINES += AX_cate[i].plot(fcst_leads[:56],
# (1-CRPS_MEAN['{}_{}'.format(method, cate)]/CRPS_MEAN['gfs_{}'.format(cate)])[:56],
# color=C[j], linewidth=3.5, linestyle=S[j], label=labels[j], zorder=3)
# ax5.axvline(fcst_leads[2]-0.75, ymin=0.475, ymax=0.625, color='k', linestyle='-', linewidth=2.5)
# ax5.text(0.025, 0.425, '+9 hr', ha='left', va='top', fontsize=14, transform=ax5.transAxes)
# ax_lg1 = fig.add_axes([0.0225+0.22, 0.835+0.04, 0.175, 0.05])
# ax_lg1.set_axis_off()
# LG1 = ax_lg1.legend(handles=[LINES[2]], bbox_to_anchor=(1, 1), ncol=1, prop={'size':14}, fancybox=False);
# LG1.get_frame().set_facecolor('w')
# LG1.get_frame().set_linewidth(0)
# LG1.get_frame().set_alpha(1.0)
# ax_lg2 = fig.add_axes([0.02+0.22, 0.805+0.04, 0.25, 0.05])
# ax_lg2.set_axis_off()
# LG2 = ax_lg2.legend(handles=[LINES[1], LINES[0]], bbox_to_anchor=(1, 1), ncol=2, prop={'size':14}, fancybox=False);
# LG2.get_frame().set_facecolor('w')
# LG2.get_frame().set_linewidth(0)
# LG2.get_frame().set_alpha(1.0)
# ax_w = fig.add_axes([0.0, -0.06, 0.41, 0.05])
# ax_w.set_axis_off()
# ax_w.text(0, 1,
# '* CRP Skill Score (CRPSS) is calculated relative to\n the quantile mapped and enlarged GEFS reforecast (75 members).',
# ha='left', va='top', fontsize=14, transform=ax_w.transAxes);
# ax_base = fig.add_axes([0.6, -0.065, 0.3-0.015, 0.05])
# [j.set_linewidth(0) for j in ax_base.spines.values()]
# ax_base.tick_params(axis='both', left=False, top=False, right=False, bottom=False, \
# labelleft=False, labeltop=False, labelright=False, labelbottom=False)
# cax = inset_axes(ax_base, height='27.5%', width='100%', borderpad=0, loc=2)
# CBar = plt.colorbar(CS, orientation='horizontal', ticks=[0.05, 0.15, 0.25, 0.35,], extend='min', cax=cax) #,
# CBar.ax.tick_params(axis='x', labelsize=14, direction='in', length=0)
# CBar.set_label('CRPSS, 0.02 per color scale', fontsize=14)
# CBar.outline.set_linewidth(2.5)
# # # Save figure
# # fig.savefig(fig_dir+'AnEn_ERA_CRPS.png', format='png', **fig_keys)
# -
|
publish/ERA5_verif_old/PLOT_CRPS_ERA5.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
conj={2,5,6,3}
conj
conj.add(10)
conj
conj.add(18)
conj.add(20)
conj
# +
conj.add("z")
conj.add("y")
conj.add("a")
conj
# -
# ### vemos q lo ordenada todo
#
repetido={"a","a","a","a"}
repetido
con={2,2,2,5,6,8,4}
lista=set(con)
lista
|
Conjuntos.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Using `matplotlib` to display inline images
#
# In this notebook we will explore using `matplotlib` to display images in our notebooks, and work towards developing a reusable function to display 2D,3D, color, and label overlays for SimpleITK images.
#
# We will also look at the subtleties of working with image filters that require the input images' to be overlapping.
# %matplotlib inline
import matplotlib.pyplot as plt
import SimpleITK as sitk
# Download data to work on
# %run update_path_to_download_script
from downloaddata import fetch_data as fdata
# SimpleITK has a built in `Show` method which saves the image to disk and launches a user configurable program ( defaults to ImageJ ), to display the image.
# + simpleitk_error_allowed="Exception thrown in SimpleITK Show:"
img1 = sitk.ReadImage(fdata("cthead1.png"))
sitk.Show(img1, title="cthead1")
# + simpleitk_error_allowed="Exception thrown in SimpleITK Show:"
img2 = sitk.ReadImage(fdata("VM1111Shrink-RGB.png"))
sitk.Show(img2, title="Visible Human Head")
# -
nda = sitk.GetArrayViewFromImage(img1)
plt.imshow(nda)
nda = sitk.GetArrayViewFromImage(img2)
ax = plt.imshow(nda)
def myshow(img):
nda = sitk.GetArrayViewFromImage(img)
plt.imshow(nda)
myshow(img2)
myshow(sitk.Expand(img2, [10]*5))
# This image does not appear bigger.
#
# There are numerous improvements that we can make:
#
# - support 3d images
# - include a title
# - use physical pixel size for axis labels
# - show the image as gray values
def myshow(img, title=None, margin=0.05, dpi=80):
nda = sitk.GetArrayViewFromImage(img)
spacing = img.GetSpacing()
if nda.ndim == 3:
# fastest dim, either component or x
c = nda.shape[-1]
# the the number of components is 3 or 4 consider it an RGB image
if not c in (3,4):
nda = nda[nda.shape[0]//2,:,:]
elif nda.ndim == 4:
c = nda.shape[-1]
if not c in (3,4):
raise Runtime("Unable to show 3D-vector Image")
# take a z-slice
nda = nda[nda.shape[0]//2,:,:,:]
ysize = nda.shape[0]
xsize = nda.shape[1]
# Make a figure big enough to accommodate an axis of xpixels by ypixels
# as well as the ticklabels, etc...
figsize = (1 + margin) * ysize / dpi, (1 + margin) * xsize / dpi
fig = plt.figure(figsize=figsize, dpi=dpi)
# Make the axis the right size...
ax = fig.add_axes([margin, margin, 1 - 2*margin, 1 - 2*margin])
extent = (0, xsize*spacing[1], ysize*spacing[0], 0)
t = ax.imshow(nda,extent=extent,interpolation=None)
if nda.ndim == 2:
t.set_cmap("gray")
if(title):
plt.title(title)
myshow(sitk.Expand(img2,[2,2]), title="Big Visibile Human Head")
# ## Tips and Tricks for Visualizing Segmentations
#
# We start by loading a segmented image. As the segmentation is just an image with integral data, we can display the labels as we would any other image.
img1_seg = sitk.ReadImage(fdata("2th_cthead1.png"))
myshow(img1_seg, "Label Image as Grayscale")
# We can also map the scalar label image to a color image as shown below.
myshow(sitk.LabelToRGB(img1_seg), title="Label Image as RGB")
# Most filters which take multiple images as arguments require that the images occupy the same physical space. That is the pixel you are operating must refer to the same location. Luckily for us our image and labels do occupy the same physical space, allowing us to overlay the segmentation onto the original image.
myshow(sitk.LabelOverlay(img1, img1_seg), title="Label Overlayed")
# We can also overlay the labels as contours.
myshow(sitk.LabelOverlay(img1, sitk.LabelContour(img1_seg), 1.0))
# ## Tips and Tricks for 3D Image Visualization
#
# Now lets move on to visualizing real MRI images with segmentations. The Surgical Planning Laboratory at Brigham and Women's Hospital provides a wonderful Multi-modality MRI-based Atlas of the Brain that we can use.
#
# Please note, what is done here is for convenience and is not the common way images are displayed for radiological work.
img_T1 = sitk.ReadImage(fdata("nac-hncma-atlas2013-Slicer4Version/Data/A1_grayT1.nrrd"))
img_T2 = sitk.ReadImage(fdata("nac-hncma-atlas2013-Slicer4Version/Data/A1_grayT2.nrrd"))
img_labels = sitk.ReadImage(fdata("nac-hncma-atlas2013-Slicer4Version/Data/hncma-atlas.nrrd"))
myshow(img_T1)
myshow(img_T2)
myshow(sitk.LabelToRGB(img_labels))
size = img_T1.GetSize()
myshow(img_T1[:,size[1]//2,:])
slices =[img_T1[size[0]//2,:,:], img_T1[:,size[1]//2,:], img_T1[:,:,size[2]//2]]
myshow(sitk.Tile(slices, [3,1]), dpi=20)
nslices = 5
slices = [ img_T1[:,:,s] for s in range(0, size[2], size[0]//(nslices+1))]
myshow(sitk.Tile(slices, [1,0]))
# Let's create a version of the show methods which allows the selection of slices to be displayed.
def myshow3d(img, xslices=[], yslices=[], zslices=[], title=None, margin=0.05, dpi=80):
size = img.GetSize()
img_xslices = [img[s,:,:] for s in xslices]
img_yslices = [img[:,s,:] for s in yslices]
img_zslices = [img[:,:,s] for s in zslices]
maxlen = max(len(img_xslices), len(img_yslices), len(img_zslices))
img_null = sitk.Image([0,0], img.GetPixelID(), img.GetNumberOfComponentsPerPixel())
img_slices = []
d = 0
if len(img_xslices):
img_slices += img_xslices + [img_null]*(maxlen-len(img_xslices))
d += 1
if len(img_yslices):
img_slices += img_yslices + [img_null]*(maxlen-len(img_yslices))
d += 1
if len(img_zslices):
img_slices += img_zslices + [img_null]*(maxlen-len(img_zslices))
d +=1
if maxlen != 0:
if img.GetNumberOfComponentsPerPixel() == 1:
img = sitk.Tile(img_slices, [maxlen,d])
#TO DO check in code to get Tile Filter working with vector images
else:
img_comps = []
for i in range(0,img.GetNumberOfComponentsPerPixel()):
img_slices_c = [sitk.VectorIndexSelectionCast(s, i) for s in img_slices]
img_comps.append(sitk.Tile(img_slices_c, [maxlen,d]))
img = sitk.Compose(img_comps)
myshow(img, title, margin, dpi)
myshow3d(img_T1,yslices=range(50,size[1]-50,20), zslices=range(50,size[2]-50,20), dpi=30)
myshow3d(img_T2,yslices=range(50,size[1]-50,30), zslices=range(50,size[2]-50,20), dpi=30)
myshow3d(sitk.LabelToRGB(img_labels),yslices=range(50,size[1]-50,20), zslices=range(50,size[2]-50,20), dpi=30)
# We next visualize the T1 image with an overlay of the labels.
# + simpleitk_error_expected="Both images for LabelOverlayImageFilter don't match type or dimension!"
# Why doesn't this work? The images do overlap in physical space.
myshow3d(sitk.LabelOverlay(img_T1,img_labels),yslices=range(50,size[1]-50,20), zslices=range(50,size[2]-50,20), dpi=30)
# -
# Two ways to solve our problem: (1) resample the labels onto the image grid (2) resample the image onto the label grid. The difference between the two from a computation standpoint depends on the grid sizes and on the interpolator used to estimate values at non-grid locations.
#
# Note interpolating a label image with an interpolator that can generate non-label values is problematic as you may end up with an image that has more classes/labels than your original. This is why we only use the nearest neighbor interpolator when working with label images.
# Option 1: Resample the label image using the identity transformation
resampled_img_labels = sitk.Resample(img_labels, img_T1, sitk.Transform(), sitk.sitkNearestNeighbor,
0.0, img_labels.GetPixelID())
# Overlay onto the T1 image, requires us to rescale the intensity of the T1 image to [0,255] and cast it so that it can
# be combined with the color overlay (we use an alpha blending of 0.5).
myshow3d(sitk.LabelOverlay(sitk.Cast(sitk.RescaleIntensity(img_T1), sitk.sitkUInt8),resampled_img_labels, 0.5),
yslices=range(50,size[1]-50,20), zslices=range(50,size[2]-50,20), dpi=30)
# Option 2: Resample the T1 image using the identity transformation
resampled_T1 = sitk.Resample(img_T1, img_labels, sitk.Transform(), sitk.sitkLinear,
0.0, img_T1.GetPixelID())
# Overlay onto the T1 image, requires us to rescale the intensity of the T1 image to [0,255] and cast it so that it can
# be combined with the color overlay (we use an alpha blending of 0.5).
myshow3d(sitk.LabelOverlay(sitk.Cast(sitk.RescaleIntensity(resampled_T1), sitk.sitkUInt8),img_labels, 0.5),
yslices=range(50,size[1]-50,20), zslices=range(50,size[2]-50,20), dpi=30)
# Why are the two displays above different? (hint: in the calls to the "myshow3d" function the indexes of the y and z slices are the same).
# The ``myshow`` and ``myshow3d`` functions are really useful. They have been copied into a "myshow.py" file so that they can be imported into other notebooks.
|
Python/10_matplotlib's_imshow.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Image Captioning with LSTMs
# In the previous exercise you implemented a vanilla RNN and applied it to image captioning. In this notebook you will implement the LSTM update rule and use it for image captioning.
# +
# As usual, a bit of setup
import time, os, json
import numpy as np
import matplotlib.pyplot as plt
from cs231n.gradient_check import eval_numerical_gradient, eval_numerical_gradient_array
from cs231n.rnn_layers import *
from cs231n.captioning_solver import CaptioningSolver
from cs231n.classifiers.rnn import CaptioningRNN
from cs231n.coco_utils import load_coco_data, sample_coco_minibatch, decode_captions
from cs231n.image_utils import image_from_url
# %matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# for auto-reloading external modules
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
# %load_ext autoreload
# %autoreload 2
def rel_error(x, y):
""" returns relative error """
return np.max(np.abs(x - y) / (np.maximum(1e-8, np.abs(x) + np.abs(y))))
# -
# # Load MS-COCO data
# As in the previous notebook, we will use the Microsoft COCO dataset for captioning.
# +
# Load COCO data from disk; this returns a dictionary
# We'll work with dimensionality-reduced features for this notebook, but feel
# free to experiment with the original features by changing the flag below.
data = load_coco_data(pca_features=True)
# Print out all the keys and values from the data dictionary
for k, v in data.items():
if type(v) == np.ndarray:
print(k, type(v), v.shape, v.dtype)
else:
print(k, type(v), len(v))
# -
# # LSTM
# If you read recent papers, you'll see that many people use a variant on the vanilla RNN called Long-Short Term Memory (LSTM) RNNs. Vanilla RNNs can be tough to train on long sequences due to vanishing and exploding gradients caused by repeated matrix multiplication. LSTMs solve this problem by replacing the simple update rule of the vanilla RNN with a gating mechanism as follows.
#
# Similar to the vanilla RNN, at each timestep we receive an input $x_t\in\mathbb{R}^D$ and the previous hidden state $h_{t-1}\in\mathbb{R}^H$; the LSTM also maintains an $H$-dimensional *cell state*, so we also receive the previous cell state $c_{t-1}\in\mathbb{R}^H$. The learnable parameters of the LSTM are an *input-to-hidden* matrix $W_x\in\mathbb{R}^{4H\times D}$, a *hidden-to-hidden* matrix $W_h\in\mathbb{R}^{4H\times H}$ and a *bias vector* $b\in\mathbb{R}^{4H}$.
#
# At each timestep we first compute an *activation vector* $a\in\mathbb{R}^{4H}$ as $a=W_xx_t + W_hh_{t-1}+b$. We then divide this into four vectors $a_i,a_f,a_o,a_g\in\mathbb{R}^H$ where $a_i$ consists of the first $H$ elements of $a$, $a_f$ is the next $H$ elements of $a$, etc. We then compute the *input gate* $g\in\mathbb{R}^H$, *forget gate* $f\in\mathbb{R}^H$, *output gate* $o\in\mathbb{R}^H$ and *block input* $g\in\mathbb{R}^H$ as
#
# $$
# \begin{align*}
# i = \sigma(a_i) \hspace{2pc}
# f = \sigma(a_f) \hspace{2pc}
# o = \sigma(a_o) \hspace{2pc}
# g = \tanh(a_g)
# \end{align*}
# $$
#
# where $\sigma$ is the sigmoid function and $\tanh$ is the hyperbolic tangent, both applied elementwise.
#
# Finally we compute the next cell state $c_t$ and next hidden state $h_t$ as
#
# $$
# c_{t} = f\odot c_{t-1} + i\odot g \hspace{4pc}
# h_t = o\odot\tanh(c_t)
# $$
#
# where $\odot$ is the elementwise product of vectors.
#
# In the rest of the notebook we will implement the LSTM update rule and apply it to the image captioning task.
#
# In the code, we assume that data is stored in batches so that $X_t \in \mathbb{R}^{N\times D}$, and will work with *transposed* versions of the parameters: $W_x \in \mathbb{R}^{D \times 4H}$, $W_h \in \mathbb{R}^{H\times 4H}$ so that activations $A \in \mathbb{R}^{N\times 4H}$ can be computed efficiently as $A = X_t W_x + H_{t-1} W_h$
# # LSTM: step forward
# Implement the forward pass for a single timestep of an LSTM in the `lstm_step_forward` function in the file `cs231n/rnn_layers.py`. This should be similar to the `rnn_step_forward` function that you implemented above, but using the LSTM update rule instead.
#
# Once you are done, run the following to perform a simple test of your implementation. You should see errors on the order of `e-8` or less.
# +
N, D, H = 3, 4, 5
x = np.linspace(-0.4, 1.2, num=N*D).reshape(N, D)
prev_h = np.linspace(-0.3, 0.7, num=N*H).reshape(N, H)
prev_c = np.linspace(-0.4, 0.9, num=N*H).reshape(N, H)
Wx = np.linspace(-2.1, 1.3, num=4*D*H).reshape(D, 4 * H)
Wh = np.linspace(-0.7, 2.2, num=4*H*H).reshape(H, 4 * H)
b = np.linspace(0.3, 0.7, num=4*H)
next_h, next_c, cache = lstm_step_forward(x, prev_h, prev_c, Wx, Wh, b)
expected_next_h = np.asarray([
[ 0.24635157, 0.28610883, 0.32240467, 0.35525807, 0.38474904],
[ 0.49223563, 0.55611431, 0.61507696, 0.66844003, 0.7159181 ],
[ 0.56735664, 0.66310127, 0.74419266, 0.80889665, 0.858299 ]])
expected_next_c = np.asarray([
[ 0.32986176, 0.39145139, 0.451556, 0.51014116, 0.56717407],
[ 0.66382255, 0.76674007, 0.87195994, 0.97902709, 1.08751345],
[ 0.74192008, 0.90592151, 1.07717006, 1.25120233, 1.42395676]])
print('next_h error: ', rel_error(expected_next_h, next_h))
print('next_c error: ', rel_error(expected_next_c, next_c))
# -
# # LSTM: step backward
# Implement the backward pass for a single LSTM timestep in the function `lstm_step_backward` in the file `cs231n/rnn_layers.py`. Once you are done, run the following to perform numeric gradient checking on your implementation. You should see errors on the order of `e-7` or less.
# +
np.random.seed(231)
N, D, H = 4, 5, 6
x = np.random.randn(N, D)
prev_h = np.random.randn(N, H)
prev_c = np.random.randn(N, H)
Wx = np.random.randn(D, 4 * H)
Wh = np.random.randn(H, 4 * H)
b = np.random.randn(4 * H)
next_h, next_c, cache = lstm_step_forward(x, prev_h, prev_c, Wx, Wh, b)
dnext_h = np.random.randn(*next_h.shape)
dnext_c = np.random.randn(*next_c.shape)
fx_h = lambda x: lstm_step_forward(x, prev_h, prev_c, Wx, Wh, b)[0]
fh_h = lambda h: lstm_step_forward(x, prev_h, prev_c, Wx, Wh, b)[0]
fc_h = lambda c: lstm_step_forward(x, prev_h, prev_c, Wx, Wh, b)[0]
fWx_h = lambda Wx: lstm_step_forward(x, prev_h, prev_c, Wx, Wh, b)[0]
fWh_h = lambda Wh: lstm_step_forward(x, prev_h, prev_c, Wx, Wh, b)[0]
fb_h = lambda b: lstm_step_forward(x, prev_h, prev_c, Wx, Wh, b)[0]
fx_c = lambda x: lstm_step_forward(x, prev_h, prev_c, Wx, Wh, b)[1]
fh_c = lambda h: lstm_step_forward(x, prev_h, prev_c, Wx, Wh, b)[1]
fc_c = lambda c: lstm_step_forward(x, prev_h, prev_c, Wx, Wh, b)[1]
fWx_c = lambda Wx: lstm_step_forward(x, prev_h, prev_c, Wx, Wh, b)[1]
fWh_c = lambda Wh: lstm_step_forward(x, prev_h, prev_c, Wx, Wh, b)[1]
fb_c = lambda b: lstm_step_forward(x, prev_h, prev_c, Wx, Wh, b)[1]
num_grad = eval_numerical_gradient_array
dx_num = num_grad(fx_h, x, dnext_h) + num_grad(fx_c, x, dnext_c)
dh_num = num_grad(fh_h, prev_h, dnext_h) + num_grad(fh_c, prev_h, dnext_c)
dc_num = num_grad(fc_h, prev_c, dnext_h) + num_grad(fc_c, prev_c, dnext_c)
dWx_num = num_grad(fWx_h, Wx, dnext_h) + num_grad(fWx_c, Wx, dnext_c)
dWh_num = num_grad(fWh_h, Wh, dnext_h) + num_grad(fWh_c, Wh, dnext_c)
db_num = num_grad(fb_h, b, dnext_h) + num_grad(fb_c, b, dnext_c)
dx, dh, dc, dWx, dWh, db = lstm_step_backward(dnext_h, dnext_c, cache)
print('dx error: ', rel_error(dx_num, dx))
print('dh error: ', rel_error(dh_num, dh))
print('dc error: ', rel_error(dc_num, dc))
print('dWx error: ', rel_error(dWx_num, dWx))
print('dWh error: ', rel_error(dWh_num, dWh))
print('db error: ', rel_error(db_num, db))
# -
# # LSTM: forward
# In the function `lstm_forward` in the file `cs231n/rnn_layers.py`, implement the `lstm_forward` function to run an LSTM forward on an entire timeseries of data.
#
# When you are done, run the following to check your implementation. You should see an error on the order of `e-7` or less.
# +
N, D, H, T = 2, 5, 4, 3
x = np.linspace(-0.4, 0.6, num=N*T*D).reshape(N, T, D)
h0 = np.linspace(-0.4, 0.8, num=N*H).reshape(N, H)
Wx = np.linspace(-0.2, 0.9, num=4*D*H).reshape(D, 4 * H)
Wh = np.linspace(-0.3, 0.6, num=4*H*H).reshape(H, 4 * H)
b = np.linspace(0.2, 0.7, num=4*H)
h, cache = lstm_forward(x, h0, Wx, Wh, b)
expected_h = np.asarray([
[[ 0.01764008, 0.01823233, 0.01882671, 0.0194232 ],
[ 0.11287491, 0.12146228, 0.13018446, 0.13902939],
[ 0.31358768, 0.33338627, 0.35304453, 0.37250975]],
[[ 0.45767879, 0.4761092, 0.4936887, 0.51041945],
[ 0.6704845, 0.69350089, 0.71486014, 0.7346449 ],
[ 0.81733511, 0.83677871, 0.85403753, 0.86935314]]])
print('h error: ', rel_error(expected_h, h))
# -
# # LSTM: backward
# Implement the backward pass for an LSTM over an entire timeseries of data in the function `lstm_backward` in the file `cs231n/rnn_layers.py`. When you are done, run the following to perform numeric gradient checking on your implementation. You should see errors on the order of `e-8` or less. (For `dWh`, it's fine if your error is on the order of `e-6` or less).
# +
from cs231n.rnn_layers import lstm_forward, lstm_backward
np.random.seed(231)
N, D, T, H = 2, 3, 10, 6
x = np.random.randn(N, T, D)
h0 = np.random.randn(N, H)
Wx = np.random.randn(D, 4 * H)
Wh = np.random.randn(H, 4 * H)
b = np.random.randn(4 * H)
out, cache = lstm_forward(x, h0, Wx, Wh, b)
dout = np.random.randn(*out.shape)
dx, dh0, dWx, dWh, db = lstm_backward(dout, cache)
fx = lambda x: lstm_forward(x, h0, Wx, Wh, b)[0]
fh0 = lambda h0: lstm_forward(x, h0, Wx, Wh, b)[0]
fWx = lambda Wx: lstm_forward(x, h0, Wx, Wh, b)[0]
fWh = lambda Wh: lstm_forward(x, h0, Wx, Wh, b)[0]
fb = lambda b: lstm_forward(x, h0, Wx, Wh, b)[0]
dx_num = eval_numerical_gradient_array(fx, x, dout)
dh0_num = eval_numerical_gradient_array(fh0, h0, dout)
dWx_num = eval_numerical_gradient_array(fWx, Wx, dout)
dWh_num = eval_numerical_gradient_array(fWh, Wh, dout)
db_num = eval_numerical_gradient_array(fb, b, dout)
print('dx error: ', rel_error(dx_num, dx))
print('dh0 error: ', rel_error(dh0_num, dh0))
print('dWx error: ', rel_error(dWx_num, dWx))
print('dWh error: ', rel_error(dWh_num, dWh))
print('db error: ', rel_error(db_num, db))
# -
# # INLINE QUESTION
# Recall that in an LSTM the input gate $i$, forget gate $f$, and output gate $o$ are all outputs of a sigmoid function. Why don't we use the ReLU activation function instead of sigmoid to compute these values? Explain.
# # LSTM captioning model
#
# Now that you have implemented an LSTM, update the implementation of the `loss` method of the `CaptioningRNN` class in the file `cs231n/classifiers/rnn.py` to handle the case where `self.cell_type` is `lstm`. This should require adding less than 10 lines of code.
#
# Once you have done so, run the following to check your implementation. You should see a difference on the order of `e-10` or less.
# +
N, D, W, H = 10, 20, 30, 40
word_to_idx = {'<NULL>': 0, 'cat': 2, 'dog': 3}
V = len(word_to_idx)
T = 13
model = CaptioningRNN(word_to_idx,
input_dim=D,
wordvec_dim=W,
hidden_dim=H,
cell_type='lstm',
dtype=np.float64)
# Set all model parameters to fixed values
for k, v in model.params.items():
model.params[k] = np.linspace(-1.4, 1.3, num=v.size).reshape(*v.shape)
features = np.linspace(-0.5, 1.7, num=N*D).reshape(N, D)
captions = (np.arange(N * T) % V).reshape(N, T)
loss, grads = model.loss(features, captions)
expected_loss = 9.82445935443
print('loss: ', loss)
print('expected loss: ', expected_loss)
print('difference: ', abs(loss - expected_loss))
# -
# # Overfit LSTM captioning model
# Run the following to overfit an LSTM captioning model on the same small dataset as we used for the RNN previously. You should see a final loss less than 0.5.
# +
np.random.seed(231)
small_data = load_coco_data(max_train=50)
small_lstm_model = CaptioningRNN(
cell_type='lstm',
word_to_idx=data['word_to_idx'],
input_dim=data['train_features'].shape[1],
hidden_dim=512,
wordvec_dim=256,
dtype=np.float32,
)
small_lstm_solver = CaptioningSolver(small_lstm_model, small_data,
update_rule='adam',
num_epochs=50,
batch_size=25,
optim_config={
'learning_rate': 5e-3,
},
lr_decay=0.995,
verbose=True, print_every=10,
)
small_lstm_solver.train()
# Plot the training losses
plt.plot(small_lstm_solver.loss_history)
plt.xlabel('Iteration')
plt.ylabel('Loss')
plt.title('Training loss history')
plt.show()
# -
# # LSTM test-time sampling
# Modify the `sample` method of the `CaptioningRNN` class to handle the case where `self.cell_type` is `lstm`. This should take fewer than 10 lines of code.
#
# When you are done run the following to sample from your overfit LSTM model on some training and validation set samples. As with the RNN, training results should be very good, and validation results probably won't make a lot of sense (because we're overfitting).
for split in ['train', 'val']:
minibatch = sample_coco_minibatch(small_data, split=split, batch_size=2)
gt_captions, features, urls = minibatch
gt_captions = decode_captions(gt_captions, data['idx_to_word'])
sample_captions = small_lstm_model.sample(features)
sample_captions = decode_captions(sample_captions, data['idx_to_word'])
for gt_caption, sample_caption, url in zip(gt_captions, sample_captions, urls):
plt.imshow(image_from_url(url))
plt.title('%s\n%s\nGT:%s' % (split, sample_caption, gt_caption))
plt.axis('off')
plt.show()
|
assignment3/LSTM_Captioning.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # IEEE Fraud Feature Engineering and EDA
# _By <NAME>_
#
# - V1 - 20/08/2019 - First Commit <br>
#
# **Aim:** <br>
# Build Features for Credit Card Fraud Project
# + _kg_hide-output=true
# https://www.kaggle.com/arnocandel/python-datatable
# more information: http://github.com/h2oai/datatable
# !pip install --upgrade https://s3.amazonaws.com/artifacts.h2o.ai/releases/ai/h2o/pydatatable/0.8.0.dev115/x86_64-centos7/datatable-0.8.0.dev115-cp36-cp36m-linux_x86_64.whl
# Latest Pandas version
# !pip install -q 'pandas==0.25' --force-reinstall
# -
import datatable as dt
import pandas as pd
print("DataTable version:", dt.__version__)
print("Pandas version:", pd.__version__)
# + _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19"
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load in
import numpy as np # linear algebra
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
import os
from contextlib import contextmanager
import gc; gc.enable()
import pprint
import time
import datetime
import csv
import random
from sklearn import preprocessing
# Viz
import matplotlib.pyplot as plt
import seaborn as sns
# + _kg_hide-input=true
print("Define DF Schema..")
target_var = 'isFraud'
emails = {'gmail': 'google',
'att.net': 'att',
'twc.com': 'spectrum',
'scranton.edu': 'other',
'optonline.net': 'other',
'hotmail.co.uk': 'microsoft',
'comcast.net': 'other',
'yahoo.com.mx': 'yahoo',
'yahoo.fr': 'yahoo',
'yahoo.es': 'yahoo',
'charter.net': 'spectrum',
'live.com': 'microsoft',
'aim.com': 'aol',
'hotmail.de': 'microsoft',
'centurylink.net': 'centurylink',
'gmail.com': 'google',
'me.com': 'apple',
'earthlink.net': 'other',
'gmx.de': 'other',
'web.de': 'other',
'cfl.rr.com': 'other',
'hotmail.com': 'microsoft',
'protonmail.com': 'other',
'hotmail.fr': 'microsoft',
'windstream.net': 'other',
'outlook.es': 'microsoft',
'yahoo.co.jp': 'yahoo',
'yahoo.de': 'yahoo',
'servicios-ta.com': 'other',
'netzero.net': 'other',
'suddenlink.net': 'other',
'roadrunner.com': 'other',
'sc.rr.com': 'other',
'live.fr': 'microsoft',
'verizon.net': 'yahoo',
'msn.com': 'microsoft',
'q.com': 'centurylink',
'prodigy.net.mx': 'att',
'frontier.com': 'yahoo',
'anonymous.com': 'other',
'rocketmail.com': 'yahoo',
'sbcglobal.net': 'att',
'frontiernet.net': 'yahoo',
'ymail.com': 'yahoo',
'outlook.com': 'microsoft',
'mail.com': 'other',
'bellsouth.net': 'other',
'embarqmail.com': 'centurylink',
'cableone.net': 'other',
'hotmail.es': 'microsoft',
'mac.com': 'apple',
'yahoo.co.uk': 'yahoo',
'netzero.com': 'other',
'yahoo.com': 'yahoo',
'live.com.mx': 'microsoft',
'ptd.net': 'other',
'cox.net': 'other',
'aol.com': 'aol',
'juno.com': 'other',
'icloud.com': 'apple'}
us_emails = ['gmail', 'net', 'edu']
# +
def reduce_mem_usage(df, verbose=True):
print("\nMemeory Usage Before:")
print(df.info(memory_usage = 'deep'))
numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
for col in df.columns:
col_type = df[col].dtypes
if col_type in numerics:
c_min = df[col].min()
c_max = df[col].max()
if str(col_type)[:3] == 'int':
if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
df[col] = df[col].astype(np.int8)
elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:
df[col] = df[col].astype(np.int16)
elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:
df[col] = df[col].astype(np.int32)
elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:
df[col] = df[col].astype(np.int64)
else:
if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:
df[col] = df[col].astype(np.float16)
elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:
df[col] = df[col].astype(np.float32)
else:
df[col] = df[col].astype(np.float64)
print("Memeory Usage After:")
print(df.info(memory_usage = 'deep'))
return df
@contextmanager
def timer(name):
"""
Time Each Process
"""
t0 = time.time()
yield
print('\n[{}] done in {} Minutes'.format(name, round((time.time() - t0)/60,2)))
# + _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a" _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0"
def fraud_aggregate_function(dataframe):
# Credit https://www.kaggle.com/davidcairuz/feature-engineering-lightgbm-w-gpu
dataframe = (dataframe.assign(
TransactionAmt_to_mean_card1 = (dataframe['TransactionAmt'] / dataframe.groupby(['card1'])['TransactionAmt'].transform('mean')),
TransactionAmt_to_std_card1 = (dataframe['TransactionAmt'] / dataframe.groupby(['card1'])['TransactionAmt'].transform('std')),
TransactionAmt_to_std_card4 = (dataframe['TransactionAmt'] / dataframe.groupby(['card4'])['TransactionAmt'].transform('std')),
TransactionAmt_to_mean_card4 = (dataframe['TransactionAmt'] / dataframe.groupby(['card4'])['TransactionAmt'].transform('mean')),
id_02_to_mean_card1 = (dataframe['id_02'] / dataframe.groupby(['card1'])['id_02'].transform('mean')),
id_02_to_std_card1 = (dataframe['id_02'] / dataframe.groupby(['card1'])['id_02'].transform('std')),
id_02_to_std_card4 = (dataframe['id_02'] / dataframe.groupby(['card4'])['id_02'].transform('std')),
id_02_to_mean_card4 = (dataframe['id_02'] / dataframe.groupby(['card4'])['id_02'].transform('mean')),
D15_to_mean_card1 = (dataframe['D15'] / dataframe.groupby(['card1'])['D15'].transform('mean')),
D15_to_std_card1 = (dataframe['D15'] / dataframe.groupby(['card1'])['D15'].transform('std')),
D15_to_mean_card4 = (dataframe['D15'] / dataframe.groupby(['card4'])['D15'].transform('mean')),
D15_to_std_card4 = (dataframe['D15'] / dataframe.groupby(['card4'])['D15'].transform('std')),
D15_to_mean_addr1 = (dataframe['D15'] / dataframe.groupby(['addr1'])['D15'].transform('mean')),
D15_to_std_addr1 = (dataframe['D15'] / dataframe.groupby(['addr1'])['D15'].transform('std')),
TransactionAmt_Log = np.log(dataframe['TransactionAmt'])
))
# https://www.kaggle.com/c/ieee-fraud-detection/discussion/100499
for c in ['P_emaildomain', 'R_emaildomain']:
dataframe[c + '_bin'] = dataframe[c].map(emails)
dataframe[c + '_suffix'] = dataframe[c].map(lambda x: str(x).split('.')[-1])
dataframe[c + '_suffix'] = dataframe[c + '_suffix'].map(lambda x: x if str(x) not in us_emails else 'us')
return dataframe
# Device Features
def id_split(dataframe):
# https://www.kaggle.com/davidcairuz/feature-engineering-lightgbm-w-gpu
dataframe['device_name'] = dataframe['DeviceInfo'].str.split('/', expand=True)[0]
dataframe['device_version'] = dataframe['DeviceInfo'].str.split('/', expand=True)[1]
dataframe['OS_id_30'] = dataframe['id_30'].str.split(' ', expand=True)[0]
dataframe['version_id_30'] = dataframe['id_30'].str.split(' ', expand=True)[1]
dataframe['browser_id_31'] = dataframe['id_31'].str.split(' ', expand=True)[0]
dataframe['version_id_31'] = dataframe['id_31'].str.split(' ', expand=True)[1]
dataframe['screen_width'] = dataframe['id_33'].str.split('x', expand=True)[0]
dataframe['screen_height'] = dataframe['id_33'].str.split('x', expand=True)[1]
dataframe['id_34'] = dataframe['id_34'].str.split(':', expand=True)[1]
dataframe['id_23'] = dataframe['id_23'].str.split(':', expand=True)[1]
dataframe.loc[dataframe['device_name'].str.contains('SM', na=False), 'device_name'] = 'Samsung'
dataframe.loc[dataframe['device_name'].str.contains('SAMSUNG', na=False), 'device_name'] = 'Samsung'
dataframe.loc[dataframe['device_name'].str.contains('GT-', na=False), 'device_name'] = 'Samsung'
dataframe.loc[dataframe['device_name'].str.contains('Moto G', na=False), 'device_name'] = 'Motorola'
dataframe.loc[dataframe['device_name'].str.contains('Moto', na=False), 'device_name'] = 'Motorola'
dataframe.loc[dataframe['device_name'].str.contains('moto', na=False), 'device_name'] = 'Motorola'
dataframe.loc[dataframe['device_name'].str.contains('LG-', na=False), 'device_name'] = 'LG'
dataframe.loc[dataframe['device_name'].str.contains('rv:', na=False), 'device_name'] = 'RV'
dataframe.loc[dataframe['device_name'].str.contains('HUAWEI', na=False), 'device_name'] = 'Huawei'
dataframe.loc[dataframe['device_name'].str.contains('ALE-', na=False), 'device_name'] = 'Huawei'
dataframe.loc[dataframe['device_name'].str.contains('-L', na=False), 'device_name'] = 'Huawei'
dataframe.loc[dataframe['device_name'].str.contains('Blade', na=False), 'device_name'] = 'ZTE'
dataframe.loc[dataframe['device_name'].str.contains('BLADE', na=False), 'device_name'] = 'ZTE'
dataframe.loc[dataframe['device_name'].str.contains('Linux', na=False), 'device_name'] = 'Linux'
dataframe.loc[dataframe['device_name'].str.contains('XT', na=False), 'device_name'] = 'Sony'
dataframe.loc[dataframe['device_name'].str.contains('HTC', na=False), 'device_name'] = 'HTC'
dataframe.loc[dataframe['device_name'].str.contains('ASUS', na=False), 'device_name'] = 'Asus'
dataframe.loc[dataframe.device_name.isin(dataframe.device_name.value_counts()[dataframe.device_name.value_counts() < 200].index), 'device_name'] = "Others"
dataframe['had_id'] = 1
gc.collect()
return dataframe
def fraud_preprocessing(debug = None):
print("Starting Pre-Processing..")
with timer("Load Tables"):
folder_path = '../input/ieee-fraud-detection/'
# parse all files
with timer("Read Tables"):
# Read with DataTables (H20.ML)
train_identity = dt.fread(f'{folder_path}train_identity.csv')
test_identity = dt.fread(f'{folder_path}test_identity.csv')
train_transaction = dt.fread(f'{folder_path}train_transaction.csv')
test_transaction = dt.fread(f'{folder_path}test_transaction.csv')
# join frames
with timer("Join Identity Tables"):
train_identity.key = 'TransactionID'
test_identity.key = 'TransactionID'
train = train_transaction[:, :, dt.join(train_identity)]
test = test_transaction[:, :, dt.join(test_identity)]
with timer("To Pandas"):
if debug:
train = reduce_mem_usage(train.to_pandas().head(debug))
test = reduce_mem_usage(test.to_pandas().head(debug))
else:
train = reduce_mem_usage(train.to_pandas())
test = reduce_mem_usage(test.to_pandas())
traindex = train.index
testdex = test.index
original_cols = train.columns
# Get column groups
prefix_cols = {}
prefix = ['C','D','Device','M','Transaction','V','addr','card','dist','id']
for i,p in enumerate(prefix):
prefix_cols[p] = [x for x in train.columns.tolist() if x.startswith(prefix[i])]
with timer("Train/Test Split Feature Engineering"):
# Encoding - count encoding for both train and test
for feature in ['card1', 'card2', 'card3', 'card4', 'card5', 'card6', 'id_36']:
train[feature + '_count_full'] = train[feature].map(pd.concat([train[feature], test[feature]], ignore_index=True).value_counts(dropna=False))
test[feature + '_count_full'] = test[feature].map(pd.concat([train[feature], test[feature]], ignore_index=True).value_counts(dropna=False))
# Encoding - count encoding separately for train and test
for feature in ['id_01', 'id_31', 'id_33', 'id_36']:
train[feature + '_count_dist'] = train[feature].map(train[feature].value_counts(dropna=False))
test[feature + '_count_dist'] = test[feature].map(test[feature].value_counts(dropna=False))
# Aggregated Features
train = fraud_aggregate_function(train)
test = fraud_aggregate_function(test)
# Combine
y = train[target_var].copy()
df = pd.concat([train,test],axis = 0).reset_index()
del train, test
with timer("Whole Feature Engineering"):
START_DATE = '2017-12-01'
startdate = datetime.datetime.strptime(START_DATE, '%Y-%m-%d')
df = df.assign(
# New feature - decimal part of the transaction amount
TransactionAmt_decimal = ((df['TransactionAmt'] - df['TransactionAmt'].astype(int)) * 1000).astype(int),
# Count encoding for card1 feature.
# Explained in this kernel: https://www.kaggle.com/nroman/eda-for-cis-fraud-detection
card1_count_full = df['card1'].map(df['card1'].value_counts(dropna=False)),
# https://www.kaggle.com/fchmiel/day-and-time-powerful-predictive-feature
Transaction_day_of_week = np.floor((df['TransactionDT'] / (3600 * 24) - 1) % 7),
Transaction_hour = np.floor(df['TransactionDT'] / 3600) % 24,
TransactionDT = df['TransactionDT'].apply(lambda x: (startdate + datetime.timedelta(seconds = x))),
)
df = df.assign(
# Time of Day
dow = df['TransactionDT'].dt.dayofweek,
year = df['TransactionDT'].dt.year,
month = df['TransactionDT'].dt.month,
hour = df['TransactionDT'].dt.hour,
day = df['TransactionDT'].dt.day,
# All NaN
all_group_nan_sum = df.isnull().sum(axis=1) / df.shape[1],
all_group_0_count = (df == 0).astype(int).sum(axis=1) / (df.shape[1] - df.isnull().sum(axis=1))
)
# Create Features based on anonymised prefix groups
for p in prefix_cols:
column_set = prefix_cols[p]
# Take NA count
df[p + "group_nan_sum"] = df[column_set].isnull().sum(axis=1) / df[column_set].shape[1]
# Take SUM/Mean if numeric
numeric_cols = [x for x in column_set if df[x].dtype != object]
if numeric_cols:
df[p + "group_sum"] = df[column_set].sum(axis=1)
df[p + "group_mean"] = df[column_set].mean(axis=1)
# Zero Count
df[p + "group_0_count"] = (df[column_set] == 0).astype(int).sum(axis=1) / (df[column_set].shape[1] - df[p + "group_nan_sum"])
with timer("Rolling Features"):
prefix = ['C', 'card']
value = "TransactionAmt"
timevar = "TransactionDT"
for window in ['12h', '5d']:
with timer("TimeFrame: {}".format(window)):
for i, p in enumerate(prefix):
for var in prefix_cols[p]:
gb_var = [var]
df = pd.merge(df, (df.set_index(timevar)
.sort_values(timevar)
.groupby(gb_var)
.rolling(window)[value].sum()
.rename(gb_var[0] + "_AMT_" + window + "_sum")
.reset_index()
.drop_duplicates([timevar] + gb_var)),
on= [timevar] + gb_var, how= 'left')
df = pd.merge(df, (df.set_index(timevar)
.sort_values(timevar)
.groupby(gb_var)
.rolling(window)[value].mean()
.rename(gb_var[0] + "_AMT_" + window + "_mean")
.reset_index()
.drop_duplicates([timevar] + gb_var)),
on= [timevar] + gb_var, how= 'left')
df = pd.merge(df, (df.set_index(timevar)
.sort_values(timevar)
.groupby(gb_var)
.rolling(window)[value].count()
.rename(gb_var[0] + "_AMT_" + window + "_count")
.reset_index()
.drop_duplicates([timevar] + gb_var)),
on= [timevar] + gb_var, how= 'left')
with timer("Label Encode"):
categorical_cols = []
# Label Encoding
for f in df.columns:
if df[f].dtype=='object':
categorical_cols += [f]
lbl = preprocessing.LabelEncoder()
df[f] = lbl.fit_transform(df[f].astype(str))
df.fillna(-9,inplace=True)
df.set_index("TransactionID",inplace=True)
# One more memory reduction
df = reduce_mem_usage(df)
print("Total Shape: {} Rows, {} Columns".format(*df.shape))
return df, y, original_cols
# -
DEBUG = None # None for no debug, else number of rows
df, y, original_cols = fraud_preprocessing(debug = DEBUG)
df.loc[df.isFraud == 2, 'isFraud'] = np.nan
# + _kg_hide-output=true
with timer("Write Table"):
write_cols = [x for x in df.columns if x not in original_cols] + [target_var]
print("Writing {} Column".format(len(write_cols)))
df.loc[df[target_var].notnull(),write_cols].to_csv("train_fraud_fe_nb.csv", index = True)
df.loc[df[target_var].isnull(),write_cols].to_csv("test_fraud_fe_nb.csv", index = True)
# -
# Features for EDA
df['yrmth'] = df.year.astype(str) + df.month.map("{:02}".format)
df.loc[df.isFraud == 2, 'isFraud'] = np.nan
df['traintest'] = 'Test'
df.loc[df.isFraud.notnull(),'traintest'] = 'Train'
print("Are there redundant Transaction IDs?")
print(df.index.value_counts().value_counts())
# ### Train / Submission Time Split
# +
f, ax = plt.subplots(2,2, figsize = [12,10])
for tt in ['Train','Test']:
df.loc[df.traintest == tt,['all_group_nan_sum','TransactionDT']].set_index('TransactionDT')\
.resample('1d').count().plot(label = tt, ax = ax[0,0])
df.loc[df.traintest == tt,['all_group_nan_sum','TransactionDT']].set_index('TransactionDT')\
.resample('1d').mean().plot(label = tt, ax = ax[1,0])
df.loc[df.traintest == tt,['all_group_0_count','TransactionDT']].set_index('TransactionDT')\
.resample('1d').mean().plot(label = tt, ax = ax[1,1])
df.loc[df.traintest == tt,['TransactionAmt','TransactionDT']].set_index('TransactionDT')\
.resample('1d').mean().plot(label = tt, ax = ax[0,1])
ax[0,0].set_title("Observation Count: Train/ Test")
ax[0,0].set_ylabel("Count")
ax[1,0].set_title("Average Number of Missing Values in Rows: Train/ Test")
ax[1,0].set_ylabel("Percent of Rows Is Null")
ax[0,1].set_title("Average Number of Missing Values in Rows: Train/ Test")
ax[0,1].set_ylabel("Percent of Rows Is Null")
ax[1,1].set_title("Average Number of Zero Values in Rows: Train/ Test")
ax[1,1].set_ylabel("Percent of Rows Is Zero")
plt.tight_layout(pad=0)
plt.show()
# -
# ### Missing Values and Zeroes
# +
f, ax = plt.subplots(1,2,figsize = [12,5])
prefix = ['C','D','Device','M','Transaction','V','addr','card','dist','id']
for i, p in enumerate(prefix):
df.loc[df.traintest == tt,[p + "group_nan_sum",'TransactionDT']].set_index('TransactionDT')\
.resample('1d').mean().plot(label = "Missing", ax = ax[0])
df.loc[df.traintest == tt,[p + "group_0_count",'TransactionDT']].set_index('TransactionDT')\
.resample('1d').mean().plot(label = "Zero", ax = ax[1])
ax[0].get_legend().remove()
ax[1].legend(prefix,fontsize='large', loc='center left',bbox_to_anchor=(1, 0.5))
ax[0].set_title("Proportion of Data Missing by Column Group")
ax[1].set_title("Proportion of Data Equal Zero by Column Group")
ax[0].set_ylabel("Proportion Missing")
ax[1].set_ylabel("Proportion Zero")
plt.tight_layout(pad=1)
plt.show()
# -
# ### Rolling Averages
# ### Univariate Exploration
# +
cols = ['card1_count_full', 'card1','card2','card2_count_full']
plot_df = df[cols + ['isFraud']]
t_r,t_c = 2, 2
f, axes = plt.subplots(t_r,t_c, figsize = [12,8],sharex=False, sharey=False)
row,col = 0,0
for c in cols:
if col == t_c:
col = 0
row += 1
sns.kdeplot(plot_df.loc[plot_df.isFraud == 0, c], shade = True, alpha = 0.6, color = 'black', ax = axes[row,col], label = 'Not Fraud')
sns.kdeplot(plot_df.loc[plot_df.isFraud == 1, c], shade = True, alpha = 0.6, color = 'lime', ax = axes[row,col], label = 'Fraud')
axes[row,col].set_title('{} and Fraud Distribution'.format(c.title()))
col+=1
plt.tight_layout(pad=0)
plt.show()
del plot_df
# +
# Missing Values Pattern
# Hourly Pattern
# Individual's susepticality to fraud (explore ID)
# Is there a way to see how close various fraud claims are?
# Create a CPU kernel where I can experiment with features and LOFO..
# Smash all data together.
|
ieee-fraud-feature-engineering-and-eda.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: morpholingo
# language: python
# name: morpholingo
# ---
# # Exploratory Data Analysis
#
# ## English language Wikipedia, diversity by content size
#
# This notebook is used for exploratory data analysis of token count, vocabulary size, and lexical diversity by content length in UTF-8 encoded bytes. Token count is defined as the response from the NLTK word_tokenize function. Vocabulary size is defined as the case-sensitive count of unique alphabetic word tokens with different casing defined as different words. Lexical diversity is defined as the ratio of case-sensitive unique tokens relative to the total content token count.
#
# The data were prepared by pulling 1000 random English language Wikipedia articles as serialized JSON data with the Wikipedia API, culling to a total of 645 articles by removing all stub articles (defined as an exclusion criterion in this study), and cleaning the content text with the `clean` function in this notebook.
#
# Execution requires unpacking of the `data` archive in this directory.
# +
import json
from pathlib import Path
import re
from bs4 import BeautifulSoup
from matplotlib import pyplot as plt
from nltk import FreqDist
from nltk.tokenize import sent_tokenize, word_tokenize
from nltk.util import bigrams
import numpy as np
import pandas as pd
from scipy import stats
jsondir = Path("data")
json_list = jsondir.glob("*.json")
REF_RE = re.compile(r"\[\d{1,3}\]")
def utf8len(s):
return len(s.encode('utf-8'))
def clean(text):
# remove "[edit]" strings that are used for content editing
cleaned_content = text.replace("[edit]", "")
# remove "[\d]" style reference strings
cleaned_content = re.sub(REF_RE, "", cleaned_content)
# remove "[citation neeeded]" editorial strings
cleaned_content = cleaned_content.replace("[citation needed]", "")
return cleaned_content
def get_vocab_size(content):
vocab_list = set(token for token in content if token.isalpha())
return len(vocab_list)
def get_lexical_diversity(content):
"""Returns a case-sensitive lexical diversity measure. We want to keep case forms
of the same word as these are considered different tokens in this corpus."""
return len(set(content)) / len(content)
lexical_diversity_list = []
vocab_size_list = []
content_size_list = []
tokens_count_list = []
file_list = []
for json_file in json_list:
content_list = []
is_a_stub = False
with open(json_file, "r") as f:
json_obj = json.loads(f.read())
html_text = json_obj["parse"]["text"]
soup = BeautifulSoup(html_text, "lxml")
# get text
contents = soup.find_all("p")
for content in contents:
cleaned_content = clean(content.text)
content_list.append(cleaned_content)
full_str = "\n".join(content_list)
content_size = utf8len(full_str)
tokens = word_tokenize(full_str)
lex_div = get_lexical_diversity(tokens)
vocab_size = get_vocab_size(tokens)
file_list.append(f"{json_file}")
content_size_list.append(content_size)
lexical_diversity_list.append(lex_div)
vocab_size_list.append(vocab_size)
tokens_count_list.append(len(tokens))
print(len(content_size_list))
print(len(tokens_count_list))
print(len(lexical_diversity_list))
print(len(vocab_size_list))
# +
ldf = pd.DataFrame(
{'title': file_list,
'tokens': tokens_count_list,
'lexical_diversity': lexical_diversity_list,
'vocabulary_size': vocab_size_list,
'content_size': content_size_list
})
ldf.sort_values(by='tokens', ascending=False).head(25)
# +
print("Summary statistics")
print(ldf.describe())
print("\n\nMedian values")
print(ldf.median())
# -
ldf.corr(method="pearson")
# +
plt.rcParams['figure.figsize'] = [12, 8]
ax = ldf.plot(kind="scatter", x="content_size", y="tokens", alpha=0.1)
ax.set_xlabel("Content Size (B)")
ax.set_ylabel("Tokens")
# -
ax = ldf.plot(kind="scatter", x="content_size", y="vocabulary_size", alpha=0.1)
ax.set_xlabel("Content Size (B)")
ax.set_ylabel("Vocabulary Size")
ax = ldf.plot(kind="scatter", x="content_size", y="lexical_diversity", alpha=0.1)
ax.set_xlabel("Content Size (B)")
ax.set_ylabel("Lexical Diversity")
|
src/wikipedia/en/wikipedia-lexdiversity/EDA_Wikipedia_Diversity_Size.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# # Langages de script - Python
# ## Cours 5 - exceptions, logs, modules timeit et argparse
# ### M2 Ingénierie Multilingue - INaLCO
# #### <NAME> - <EMAIL>
#
# + [markdown] slideshow={"slide_type": "slide"}
# # Exceptions
#
# * Les exceptions sont utilisées pour signaler que quelque chose d'anormal se produit, une erreur généralement
#
# * Les messages d'erreur générés par les exceptions donnent des détails précieux sur les erreurs (type, emplacement, ...)
#
# * Si elles sont levées par Python vous pouvez capturer les exceptions (`try`, `except`) et modifier le comportement de votre script en fonction
#
# * Vous pouvez également lever des exceptions avec `raise`
#
# + slideshow={"slide_type": "-"}
chifoumi = ("pierre", "feuille", "ciseaux")
user_val = input("pierre, feuille, ciseaux ? ").lower()
if user_val not in chifoumi:
raise ValueError("pierre, feuille ou ciseaux et puis c'est tout")
# + [markdown] slideshow={"slide_type": "slide"}
# # Capturer les exceptions
#
# * Trop facile
#
# + slideshow={"slide_type": "-"}
val = [1, 2 ,3]
try:
print(val[1984])
except IndexError:
print("Erreur d'indice. Mais le roman est bien")
# + [markdown] slideshow={"slide_type": "subslide"}
# * À condition de sélectionner la bonne classe d'exception
#
# + slideshow={"slide_type": "-"}
dico = {'michel': 'H', 'michèle': 'F'}
try:
print(dico[1984])
except IndexError:
print("Erreur d'indice. Mais le roman est bien")
# + [markdown] slideshow={"slide_type": "fragment"}
# * Les exceptions Python sont organisées en hiérarchie (voir [ici](https://docs.python.org/3/library/exceptions.html#bltin-exceptions)), on finit par s'y retrouver
#
# + [markdown] slideshow={"slide_type": "slide"}
# # Capturer les exceptions
#
# * Utiliser la classe mère `Exception` réduit la précision de l'information sur l'erreur produite
#
# + slideshow={"slide_type": "-"}
l = []
try:
print(dico[l[5]])
except Exception:
print("Soit la clé n'existe pas, soit l'indice n'existe pas")
# + [markdown] slideshow={"slide_type": "subslide"}
# * Il est possible de capturer plusieurs types d'exceptions différentes
#
# + slideshow={"slide_type": "-"}
l = [1,2,3,4,5]
try:
print(dico[l[5]])
except IndexError:
print("L'indice n'existe pas")
except KeyError:
print("La clé n'existe pas")
# + [markdown] slideshow={"slide_type": "slide"}
# # Pour finir sur les exceptions
#
# * Le bloc `else` sera exécuté si aucune exception n'est levée
#
# * Le bloc `finally` sera exécuté dans tous les cas de figure
#
# + slideshow={"slide_type": "-"}
l = [1,2,3,4,5,0]
try:
print(dico[l[5]])
except IndexError:
print("L'indice n'existe pas")
except KeyError:
print("La clé n'existe pas")
else:
print("OK tout roule")
finally:
print("C'est fini, vous pouvez rentrer")
# + [markdown] slideshow={"slide_type": "slide"}
# # Logs
#
# On peut toujours générer des logs manuellement à coups de `if` et de `print` mais ça devient vite fastidieux
#
# Python embarque un module qui est fait pour ça : [`logging`](https://docs.python.org/3/howto/logging.html)
#
# + slideshow={"slide_type": "-"}
import logging
logging.basicConfig(filename='monscript.log',level=logging.INFO)
logging.debug('Celui-là ne sera pas dans les logs')
logging.info('Celui-là oui')
# + [markdown] slideshow={"slide_type": "fragment"}
# On peut formatter les log pour une sortie différente, souvent plus adaptée :
# + slideshow={"slide_type": "-"}
import logging
logging.basicConfig(filename='monscript.log', level=logging.INFO, format="%(levelname)s\t%(asctime)s\t%(message)s")
logging.debug('Celui-là ne sera pas dans les logs')
logging.info('Celui-là oui')
# + [markdown] slideshow={"slide_type": "fragment"}
# Voir [ici](https://docs.python.org/3/library/logging.html#logging.basicConfig) pour configurer `logging.basicConfig` et [là](https://docs.python.org/3/library/logging.html#logrecord-attributes) pour voir ce que l'on peut mettre pour l'argument `format`
# + [markdown] slideshow={"slide_type": "slide"}
# # Module timeit
#
# * `timeit` permet de mesurer le temps d'éxécution de portions de code ou de fonctions
#
# * La mesure du temps d'éxécution se fait en microseconde (*usec*), en milliseconde (*msec*) ou en secondes (*sec*)
#
# * Ce module s'utilise en ligne de commande, dans une console (i)python ou via des appels dans un script
#
# * Lors de l'appel en ligne de commande, le module détermine automatiquement le nombre de répétitions à réaliser
#
# ```bash
# $ python3 -m timeit "[x**2 for x in range(100)]"
# 10000 loops, best of 3: 38.7 usec per loop
# ```
#
# + [markdown] slideshow={"slide_type": "slide"}
# # Module timeit
#
# * La fonction la plus souvent utilisée est `timeit.timeit`
#
# * Ce n'est pas la seule, voir la [doc](https://docs.python.org/3.6/library/timeit.html)
#
# + slideshow={"slide_type": "-"}
import timeit
def main():
time_char_in_str = timeit.timeit('str="le chat"; char="a"; char in str', number=10000)
print("Time char in str : {}".format(time_char_in_str))
time_find = timeit.timeit("""\
str = "le chat"
char="a"
char.find(str)
""", number=10000)
print("Time find : {}".format(time_find))
main()
# + [markdown] slideshow={"slide_type": "slide"}
# # Module timeit
#
# * Vous pouvez donner accès aux fonctions individuellement via le paramètre 'setup'
#
# * Le plus pratique est d'utiliser le paramètre `global=globals()` pour lui passer l'espace de nom du script
#
# + slideshow={"slide_type": "-"}
import timeit
def f(n):
res = list()
for x in range(n):
res.append(x**2)
return res
def g(n):
return [x**2 for x in range(n)]
def main():
try:
print(timeit.timeit('f(10)', number=100000))
except NameError:
print("f n'est pas trouvée!") # on peut retirer le "try/catch" pour s'en convaincre
print(timeit.timeit('f(10)', number=100000, setup="from __main__ import f"))
print(timeit.timeit('g(10)', number=100000, globals=globals()))
main()
# + [markdown] slideshow={"slide_type": "subslide"}
# * Dans une console ipython vous pouvez utiliser les mots clés magiques `%timeit` et `%%timeit`
#
# + slideshow={"slide_type": "-"}
def f(n):
res = list()
for x in range(n):
res.append(x**2)
return res
# + slideshow={"slide_type": "-"}
# %timeit -n 100000 "f(10)"
# -
# %%timeit
import random
l = [random.random() for xxx in range(100000)]
max(l)
# %%timeit import random; l = [random.random() for xxx in range(100000)]
max(l)
# + [markdown] slideshow={"slide_type": "slide"}
# # Module argparse
#
# * [`sys.argv`](https://docs.python.org/3.6/library/sys.html#sys.argv) permet de récupérer la liste des arguments passés à un script Python
#
# * Le module [`argparse`](https://docs.python.org/3.6/library/argparse.html) est un parseur qui vous permettra de construire des interfaces en ligne de commande plus riches et plus simples à utiliser.
# Commencer avec le [tutorial](https://docs.python.org/3.6/howto/argparse.html#id1)
#
# + slideshow={"slide_type": "-"}
import argparse
parser = argparse.ArgumentParser(description="A useless script")
parser.add_argument("-v", "--verbose", help="verbose mode", action="store_true")
parser.add_argument("file", help="input file")
args = vars(parser.parse_args())
print(args['file'])
# + slideshow={"slide_type": "subslide"}
bash
> python3 args.py
usage: args.py [-h] [-v] file
args.py: error: the following arguments are required: file
# + [markdown] slideshow={"slide_type": "fragment"}
# On peut également utiliser `--help` ou `-h` pour afficher l'aide
# +
bash
> python3 args.py -h
usage: args.py [-h] [-v] file
A useless script
positional arguments:
file input file
optional arguments:
-h, --help show this help message and exit
-v, --verbose verbose mode
# + [markdown] slideshow={"slide_type": "slide"}
# # Pas de *main* en Python ?
#
# Vous trouverez fréquemment le test suivant dans les scripts Python :
#
# + slideshow={"slide_type": "-"}
if __name__ == '__main__':
instruction1
instruction2
# + [markdown] slideshow={"slide_type": "-"}
# ou
#
# + slideshow={"slide_type": "-"}
def main():
instruction
if __name__ == '__main__':
main()
# + [markdown] slideshow={"slide_type": "subslide"}
# Cela évite que le code sous le test ne soit exécuté lors de l'import du script :
# `__name__` est une variable créée automatiquement qui vaut `__main__` si le script a été appelé en ligne de commande, le nom du script s'il a été importé.
# + [markdown] slideshow={"slide_type": "fragment"}
# Accessoirement cela permet d'organiser son code et de le rendre plus lisible
# Désormais je vous ~~recommande vivement~~ demande de l'inclure dans tous vos scripts
# + [markdown] slideshow={"slide_type": "slide"}
# # Exos
#
# 3. Résoudre [Scrabble](https://www.codingame.com/training/medium/scrabble)
#
|
python-5.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Data Labelling Analysis (DLA) Dataset E
#import libraries
import warnings
warnings.filterwarnings("ignore")
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import os
print('Libraries imported!!')
# +
#define directory of functions and actual directory
HOME_PATH = '' #home path of the project
FUNCTIONS_DIR = 'EVALUATION FUNCTIONS/RESEMBLANCE'
ACTUAL_DIR = os.getcwd()
#change directory to functions directory
os.chdir(HOME_PATH + FUNCTIONS_DIR)
#import functions for data labelling analisys
from data_labelling import mix_data
from data_labelling import split_data
from data_labelling import DataPreProcessor
from data_labelling import ClassificationModels
#change directory to actual directory
os.chdir(ACTUAL_DIR)
print('Functions imported!!')
# -
# ## 1. Read real and synthetic datasets
# In this part real and synthetic datasets are read.
#Define global variables
DATA_TYPES = ['Real','GM','SDV','CTGAN','WGANGP']
SYNTHESIZERS = ['GM','SDV','CTGAN','WGANGP']
FILEPATHS = {'Real' : HOME_PATH + 'REAL DATASETS/TRAIN DATASETS/E_PimaIndiansDiabetes_Real_Train.csv',
'GM' : HOME_PATH + 'SYNTHETIC DATASETS/GM/E_PimaIndiansDiabetes_Synthetic_GM.csv',
'SDV' : HOME_PATH + 'SYNTHETIC DATASETS/SDV/E_PimaIndiansDiabetes_Synthetic_SDV.csv',
'CTGAN' : HOME_PATH + 'SYNTHETIC DATASETS/CTGAN/E_PimaIndiansDiabetes_Synthetic_CTGAN.csv',
'WGANGP' : HOME_PATH + 'SYNTHETIC DATASETS/WGANGP/E_PimaIndiansDiabetes_Synthetic_WGANGP.csv'}
categorical_columns = ['Outcome']
data = dict()
#iterate over all datasets filepaths and read each dataset
for name, path in FILEPATHS.items() :
data[name] = pd.read_csv(path)
for col in categorical_columns :
data[name][col] = data[name][col].astype('category')
data
# ## 2. Mix real data with synthetic data
mixed_data = dict()
for name in SYNTHESIZERS :
mixed_data[name] = mix_data(data['Real'], data[name])
mixed_data
# - 0 for real data
# - 1 for synthetic data
# ## 2. Split train and test data
train_len = 0.8
train_data = dict()
test_data = dict()
for name in SYNTHESIZERS :
print(name)
train_data[name], test_data[name] = split_data(mixed_data[name], train_len)
print(train_data[name].shape, test_data[name].shape)
print('Train data', train_data[name].groupby('Label').size())
print('Test data', test_data[name].groupby('Label').size())
print('##############################################')
# ## 3. Train Classifiers
# +
categorical_columns = ['Outcome']
numerical_columns = data['Real'].select_dtypes(include=['int64','float64']).columns.tolist()
categories = [np.array([0, 1])]
#initialize classifiers
classifiers_all = dict()
data_preprocessors = dict()
target = 'Label'
for name in SYNTHESIZERS :
print(name)
classifiers_all[name] = ClassificationModels()
data_preprocessors[name] = DataPreProcessor(categorical_columns, numerical_columns, categories)
x_train = data_preprocessors[name].preprocess_train_data(train_data[name].iloc[:, train_data[name].columns != target])
y_train = train_data[name].loc[:, target]
classifiers_all[name].train_classifiers(x_train, y_train)
print('####################################################')
# -
# ## 5. Evaluate Classifiers
# +
results_all = dict()
for name in SYNTHESIZERS :
print(name)
x_test = data_preprocessors[name].preprocess_test_data(test_data[name].loc[:, test_data[name].columns != target])
print(x_test.shape)
y_test = test_data[name].loc[:, target]
classifiers_all[name].evaluate_classifiers(x_test, y_test)
print('####################################################')
# -
# ## 6. Analyse models results
# +
fig, axs = plt.subplots(nrows=1, ncols=4, figsize=(8, 2.5))
axs_idxs = [[0,0], [0,1], [1,0], [1,1]]
axs_idxs = [0, 1, 2, 3]
idx = dict(zip(SYNTHESIZERS,axs_idxs))
for name in SYNTHESIZERS :
ax_plot = axs[idx[name]]
classifiers_all[name].plot_classification_metrics(ax_plot)
ax_plot.set_title(name, fontsize=10)
plt.tight_layout()
fig.savefig('DATA LABELLING RESULTS/CLASSIFICATION_METRICS.svg', bbox_inches='tight')
# -
|
notebooks/Dataset E - Pima Indians Diabetes/Synthetic data evaluation/Resemblance/4_Data_Labelling_Resemblance_DatasetE.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Notebook for plotting Corona data from SSI
#
# This Notebook plots regional data for Copenhagen.
# It should be rather easy to change to a different region.
#
# Run SSI_get_data first to get a dataset of the right format.
#
# Data are read from subfolder : data
# Date of current dataset is read from: data_date.dat
# %matplotlib notebook
import matplotlib.pyplot as plt
import pandas as pd
from pathlib import Path
from datetime import date, timedelta
import pickle
# +
#Limit dates to plot
N_days=60 # number of days to include
#N_days=150 # number of days to include
bad_cutoff=4000 # minimum number of tests to consider good
#bad_cutoff=100
# define region
region="Copenhagen"
# +
# Read date of last data-download
f = open("data_date.dat", 'rb')
date_str=pickle.load(f)
f.close()
print("Dataset is from: " + date_str)
# -
#define the file to read
datafolder=Path("data/")
datafile=datafolder / "Test_pos_over_time.csv"
# +
# Read datafile
#define the file to read
datafolder=Path("data/")
datafile=datafolder / "Municipality_cases_time_series.csv"
# Converts index to date
# Notice handeling of danish format of the numbers (both decimal and thousands)
df_cases=pd.read_csv(datafile, sep=';', parse_dates=['date_sample'], index_col=['date_sample'],error_bad_lines=False, engine='python', decimal=',', thousands='.')
# look at Copenhagen
df_cases_sel=df_cases[region]
# +
#define the file to read
datafolder=Path("data/")
datafile=datafolder / "Municipality_tested_persons_time_series.csv"
# Skips last two lines (which does not convert to date) and converts index to date
# Notice handeling of danish format of the numbers (both decimal and thousands)
df_tests=pd.read_csv(datafile, sep=';', parse_dates=['PrDate_adjusted'], index_col=['PrDate_adjusted'],error_bad_lines=False, engine='python', decimal=',', thousands='.')
df_tests_sel=df_tests[region]
# -
# merge data based on dates (CHECK the result)
df=df_cases_sel.to_frame().merge(df_tests_sel.to_frame(),left_index=True, right_index=True)
df=df.rename(columns={region+"_x": "NewPositive", region+"_y": "NotPrevPos"})
df
# +
# calculate some more numbers
# Positive emperical scaled by number of tests to power of 0.7
# This scaling is based on results in
# SSI "Ekspertrapport af d. 23. oktober 2020 Incidens og fremskrivning af COVID-19 tilfælde"
# https://www.ssi.dk/-/media/ssi-files/ekspertrapport-af-den-23-oktober-2020-incidens-og-fremskrivning-af-covid19-tilflde.pdf?la=da
def calcScaledNumber (row):
if row.NotPrevPos > 0 :
return row.NewPositive / (row.NotPrevPos**0.7) * 10000**0.7 / 10000 *100#Normalized positiv procent to 50000 tests
else:
return 0
df['ScaledNumber']=df.apply(lambda row: calcScaledNumber(row), axis=1)
# Recalculate Positiv procent to get more decimals for plotting
def calcPosPct (row):
if row.NotPrevPos > 0 :
return row.NewPositive / row.NotPrevPos * 100
else:
return 0
df['PosPct']=df.apply(lambda row: calcPosPct(row), axis=1)
# +
# for easy plot make a sub data frame with selected number of days
df_sel=df[date.today()-timedelta(days=N_days):]
# and make index for "bad" datapoints
bad_idx=df_sel['NotPrevPos']<bad_cutoff
# -
# Define a common title including date from file
title_str='SSI COVID-19 data, tilfælde opgjort på prøvetagningsdato \n' + region + '\n'
title_str += date_str
# +
axs=[None]*4 #define axs list as empty 4 entries
fig = plt.figure(figsize=(7, 15))
axs[0] = plt.subplot(411)
axs[1] = plt.subplot(412,sharex=axs[0])
axs[2] = plt.subplot(413,sharex=axs[0])
axs[3] = plt.subplot(414,sharex=axs[0])
df_sel.plot(ax=axs[0],y='PosPct',title=title_str,label='NewPositive / NotPrevPosTested * 100',style='.');
df_sel[bad_idx].plot(ax=axs[0],y='PosPct',style='.',color='red',label='NewPositive / NotPrevPosTested * 100 (Tested<'+ str(bad_cutoff) + ')');
axs[0].set_ylabel("%");
axs[0].set_ylim(0,5.5)
axs[0].tick_params(which='both', bottom=True, top=True, left=True, right=True, direction='in')
df_sel.plot(ax=axs[1], y='ScaledNumber',label='NewPositive/NotPrevPosTested^0.7 * 10.000^0.7 / 10.000 *100',style='.');
df_sel[bad_idx].plot(ax=axs[1],y='ScaledNumber',style='.',color='red', label=' (Tested<'+ str(bad_cutoff) + ')');
axs[1].set_ylabel("Positiv Procent [Estimated for 10.000 tests]");
axs[1].tick_params(which='both', bottom=True, top=True, left=True, right=True, direction='in')
axs[1].set_ylim(0,5.5)
df_sel.plot(ax=axs[2],y='NewPositive',style='.');
df_sel[bad_idx].plot(ax=axs[2],y='NewPositive',style='.',color='red',label='NewPositive (Tested<'+ str(bad_cutoff) + ')');
axs[2].tick_params(which='both', bottom=True, top=True, left=True, right=True, direction='in')
df_sel.plot(ax=axs[3],y='NotPrevPos',label='Tested (NotPrevPos)',style='.');
df_sel[bad_idx].plot(ax=axs[3],y='NotPrevPos',style='.',color='red',label='Tested<'+ str(bad_cutoff) + '');
axs[3].tick_params(which='both', bottom=True, top=True, left=True, right=True, direction='in')
# -
|
SSI_kommune_plots.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R [conda env:R]
# language: R
# name: conda-env-R-r
# ---
# +
suppressWarnings({
library(lmerTest)
library(lme4)
library(ggplot2)
library(dplyr)
library(tidyr)
library(sjPlot)
library(IRdisplay)
library(car)
library(ggeffects)
library(gridExtra)
library(repr)
})
options(repr.plot.width=4, repr.plot.height=3)
# -
# # Investigating MHWs + Phytoplankton Performance Response via LMMs
#
# <NAME>, May 2020
#
# **Purpose**: To experiment with linear mixed effects models to examine whether a significant relationship exists between latitude, yearly mean sea surface temperature, and performance ratio during marine heatwave events.
#
# **Method**: Build a linear mixed model of performance ratio with the following effects:
#
# *Fixed*:
# * Second-degree polynomial of Latitude *(perhaps also abs(lat))*
# * mean_sst_event_year_local
# * season
# * *interaction terms*
#
#
# *Random*:
# * phytoplankton isolate
#
#
# Perhaps season should be a fixed effect, especially since it likely does have control over the direction + shape of the curve.
#
# ## Data
#
# We compute individual isolate performance for all local heatwaves in [this notebook](../Individual_Isolate_performance.ipynb), and use these data below.
mhwPerformance <- read.csv("../tmax_only_isolate_performance_withnegative.csv")
mhwPerformance = mhwPerformance %>% mutate(isolate = factor(isolate))
head(mhwPerformance)
# We need to adjust for the effect of hemisphere to "align" seasons and convert to "season" factor variables from "peak_doy" column:
mhwPerformance[between(mhwPerformance$lat, -90, 0), 'peak_doy'] = (mhwPerformance[between(mhwPerformance$lat, -90, 0), 'peak_doy'] - 180) %% 365
mhwPerformance = mhwPerformance %>%
mutate(season =
case_when(
(between(peak_doy, 0, 77) | between(peak_doy, 355, 366)) ~ "winter", # January 1 - March XX or December XX - December 31
between(peak_doy, 78, 170) ~ "spring", # March XX - June XX
between(peak_doy, 171, 295) ~ "summer", # June XX - September XX
between(peak_doy, 296, 354) ~ "fall" # September XX - December XX
)
)
head(mhwPerformance %>% select(peak_doy, season))
# We'll clean up by dropping NAs and `Inf`s
# We'll also **scale the input variables** for later, but the initial models will use the raw values:
mhwPerformance = mhwPerformance %>%
mutate(
lat_scaled = scale(lat),
sst_scaled = scale(current_year_sst_mean),
abslat_scaled = scale(abs(lat)),
abslat = abs(lat)
)
# ## Models
#
# Let's define the standard model formulations here:
# +
formula <- performance_diff_mean ~ (poly(lat_scaled, 2) + sst_scaled + season)^2
formula_unscaled <- performance_diff_mean ~ (poly(lat, 2) + current_year_sst_mean + season)^2
formula_abslat <- performance_diff_mean ~ (abslat_scaled + sst_scaled + season)^2
formula_abslat_unscaled <- performance_diff_mean ~ (abslat + current_year_sst_mean + season)^2
# -
# And the random effects formulations:
# +
formula_re <- performance_diff_mean ~ (poly(lat_scaled, 2) + sst_scaled + season)^2 + (1|isolate)
formula_unscaled_re <- performance_diff_mean ~ (poly(lat, 2) + current_year_sst_mean + season)^2 + (1|isolate)
formula_abslat_re <- performance_diff_mean ~ (abslat_scaled + sst_scaled + season)^2 + (1|isolate)
formula_abslat_unscaled_re <- performance_diff_mean ~ (abslat + current_year_sst_mean + season)^2 + (1|isolate)
# -
# We'll start with a simple linear model with all terms.
simple_lm = lm(formula, data=mhwPerformance)
summary(simple_lm)
baseline_hline = geom_hline(yintercept=0, linetype='dashed', color='blue', size=0.6, alpha=0.6)
# ## Simple Model with Random Effects
#
# Only `isolate` as random effect for now:
simple_re = lmer(
formula_re,
data=mhwPerformance
)
summary(simple_re)
simple_re_plots = plot_model(simple_re, type='pred')
simple_re_plots$lat + baseline_hline
simple_re_plots$current_year_sst_mean + baseline_hline
simple_re_plots$season + baseline_hline
plot_model(simple_re, type='re') +
xlab("Isolate ID") +
ylab('random effect')+
ylim(-0.2, 0.2)
# ## More Intense Heatwaves =? Stronger Signal?
#
# There's probably a lot of noise from shorter events here.
#
# Lets take a look at the distribution of heatwaves by intensity:
median(mhwPerformance$intensity_mean)
ggplot(mhwPerformance, aes(x=intensity_mean)) +
geom_histogram() +
geom_vline(xintercept=median(mhwPerformance$intensity_mean))
# We can use ~1.5 C as our threshold (median?)
intensity_threshold = 1.5
intenseMhwPerformance = mhwPerformance %>% filter(intensity_mean > intensity_threshold)
head(intenseMhwPerformance)
nrow(intenseMhwPerformance)
# **Simple Model**
simple_intense_lm = lm(
formula,
data=intenseMhwPerformance)
summary(simple_intense_lm)
# **Simple Random Effects**
simple_intense_re = lmer(
formula_re,
data=intenseMhwPerformance
)
summary(simple_intense_re)
# ## Absolute Latitude instead of Poly(2)
abslat_re = lmer(
formula_abslat_re,
data=mhwPerformance
)
summary(abslat_re)
abslat_intense_re = lmer(
formula_abslat_re,
data=intenseMhwPerformance,
)
summary(abslat_intense_re)
plot_model(abslat_re, type='pred')
plot_model(abslat_intense_re, type='pred')
# ## Produce Plots
# ### Absolute Latitude
#
# **Coefficient Plots, Simple and Intense**
png("./tmax_only_abslat_compare_coefs_diff.png", width=1440, height=950, res=180)
y_limit = ylim(-1, 1)
simple_coefs = plot_model(abslat_re, ci.lvl=NA, show.values = TRUE, value.offset=.4) + y_limit + ggtitle("A) All MHWs")
intense_coefs = plot_model(abslat_intense_re,show.values = TRUE, value.offset=.4, ci.lvl=NA)+ y_limit + theme(axis.text.y = element_blank()) + ggtitle("B) Intense MHWs")
grid.arrange(simple_coefs, intense_coefs, nrow=1, widths=c(1.7, 1) )
dev.off()
# **Coefficient Table**
tab_model(
abslat_re, abslat_intense_re,
show.stat=TRUE, use.viewer=FALSE,
dv.labels=c("Performance Difference [all events]", "Performance Difference [intense events]"),
file = "tmax_only_abslat_coef_compare_table.html"
)
# **Predictions, All MHW**
# +
png("./tmax_only_abslat_preds_preddiff.png", width=1440, height=700, res=180)
y_limit = ylim(-1, 1)
seas = plot(ggeffect(abslat_re, terms='season')) +
baseline_hline +
xlab("Season") +
ylab("Performance Difference") +
ggtitle("A) Season") +
y_limit
seaslat = plot(ggeffect(abslat_re, terms=c('abslat_scaled', 'season [summer, winter]'))) +
baseline_hline +
xlab("Absolute Latitude [scaled]") +
ylab("Performance Difference") +
ggtitle("B) Latitude") +
y_limit
grid.arrange(seas, seaslat, nrow=1, widths=c(1,1.3))
dev.off()
# seaslat
# -
# **Predictions, Intense MHW**
# +
png("./tmax_only_abslat_preds_intense_preddiff.png", width=1440, height=700, res=180)
y_limit = ylim(-1, 1)
seas = plot(ggeffect(abslat_intense_re, terms='season')) +
baseline_hline +
xlab("Season") +
ylab("Performance Difference") +
ggtitle("A) Season") +
y_limit
seaslat = plot(ggeffect(abslat_intense_re, terms=c('abslat_scaled', 'season [summer, winter]'))) +
baseline_hline +
xlab("Absolute Latitude [scaled]") +
ylab("Performance Difference") +
ggtitle("B) Latitude") +
y_limit
grid.arrange(seas, seaslat, nrow=1, widths=c(1,1.3))
# dev.off()
# seaslat
# -
# ### Regular Latitude
# **Coefficients**
png("./tmax_only_polylat_compare_coefs_diff.png", width=1440, height=950, res=180)
y_limit = ylim(-5.5,5.5)
simple_coefs = plot_model(simple_re, ci.lvl=NA, show.values = TRUE, value.offset=.4) + y_limit + ggtitle("A) All MHWs")
intense_coefs = plot_model(simple_intense_re,show.values = TRUE, value.offset=.4, ci.lvl=NA)+ y_limit + theme(axis.text.y = element_blank()) + ggtitle("B) Intense MHWs")
grid.arrange(simple_coefs, intense_coefs, nrow=1, widths=c(2, 1) )
dev.off()
# **Coefficients Table**
tab_model(
simple_re, simple_intense_re,
show.stat=TRUE, use.viewer=FALSE,
dv.labels=c("Performance Difference [all events]", "Performance Difference [intense events]"),
file = "tmax_only_polylat_coef_compare_table.html"
)
# **Predictions, all MHW**
# +
png("./tmax_only_polylat_preds_preddiff.png", width=1440, height=700, res=180)
y_limit = ylim(-1, 1)
seas = plot(ggeffect(simple_re, terms='season')) +
baseline_hline +
xlab("Season") +
ylab("Performance Difference") +
ggtitle("A) Season") +
y_limit
seaslat = plot(ggeffect(simple_re, terms=c('lat_scaled', 'season [summer, winter]'))) +
baseline_hline +
xlab("Latitude [scaled]") +
ylab("Performance Difference") +
ggtitle("B) Latitude") +
y_limit
grid.arrange(seas, seaslat, nrow=1, widths=c(1,1.3))
dev.off()
# seaslat
# -
# **Predictions, Intense MHW**
# +
png("./tmax_only_polylat_preds_intense_preddiff.png", width=1440, height=700, res=180)
seas = plot(ggeffect(simple_intense_re, terms='season')) +
baseline_hline +
xlab("Season") +
ylab("Performance Difference") +
ggtitle("A) Season") +
ylim(-0.5, 0.5)
seaslat = plot(ggeffect(simple_intense_re, terms=c('lat_scaled', 'season [summer, winter]'))) +
baseline_hline +
xlab("Latitude [scaled]") +
ylab("Performance Difference") +
ggtitle("B) Latitude") +
ylim(-0.5, 0.5)
grid.arrange(seas, seaslat, nrow=1, widths=c(1,1.3))
dev.off()
# seaslat
# -
# ## Individual Models per Season
winter_only = mhwPerformance %>% filter(season == "winter")
winter_re = lmer(performance_diff_mean ~ (abslat + sst_scaled)^2 + (1 | isolate), data=winter_only, )
summary(winter_re)
summer_only = mhwPerformance %>% filter(season == "summer")
summer_re = lmer(performance_diff_mean ~ (abslat + sst_scaled)^2 + (1 | isolate), data=summer_only, )
summary(summer_re)
spring_only = mhwPerformance %>% filter(season == "spring")
spring_re = lmer(performance_diff_mean ~ (abslat + sst_scaled)^2 + (1 | isolate), data=spring_only, )
summary(spring_re)
fall_only = mhwPerformance %>% filter(season == "fall")
fall_re = lmer(performance_diff_mean ~ (abslat + sst_scaled)^2 + (1 | isolate), data=fall_only, )
summary(fall_re)
tab_model(
winter_re,
spring_re,
summer_re,
fall_re,
show.stat=TRUE,
use.viewer=FALSE,
dv.labels=c("Winter Only", "Spring Only", "Summer Only", "Autumn Only"),
file = "compare_season_models.html"
)
# +
theme_black = function(base_size = 12, base_family = "") {
theme_grey(base_size = base_size, base_family = base_family) %+replace%
theme(
# Specify axis options
axis.line = element_blank(),
axis.text.x = element_text(size = base_size*0.8, color = "white", lineheight = 0.9),
axis.text.y = element_text(size = base_size*0.8, color = "white", lineheight = 0.9),
axis.ticks = element_line(color = "white", size = 0.2),
axis.title.x = element_text(size = base_size, color = "white", margin = margin(0, 10, 0, 0)),
axis.title.y = element_text(size = base_size, color = "white", angle = 90, margin = margin(0, 10, 0, 0)),
axis.ticks.length = unit(0.3, "lines"),
# Specify legend options
legend.background = element_rect(color = NA, fill = "black"),
legend.key = element_rect(color = "white", fill = "black"),
legend.key.size = unit(1.2, "lines"),
legend.key.height = NULL,
legend.key.width = NULL,
legend.text = element_text(size = base_size*0.8, color = "white"),
legend.title = element_text(size = base_size*0.8, face = "bold", hjust = 0, color = "white"),
legend.position = "right",
legend.text.align = NULL,
legend.title.align = NULL,
legend.direction = "vertical",
legend.box = NULL,
# Specify panel options
panel.background = element_rect(fill = "black", color = NA),
panel.border = element_rect(fill = NA, color = "white"),
panel.grid.major = element_line(color = "grey35"),
panel.grid.minor = element_line(color = "grey20"),
panel.margin = unit(0.5, "lines"),
# Specify facetting options
strip.background = element_rect(fill = "grey30", color = "grey10"),
strip.text.x = element_text(size = base_size*0.8, color = "white"),
strip.text.y = element_text(size = base_size*0.8, color = "white",angle = -90),
# Specify plot options
plot.background = element_rect(color = "black", fill = "black"),
plot.title = element_text(size = base_size*1.2, color = "white"),
plot.margin = unit(rep(1, 4), "lines")
)
}
# -
|
analysis/modeling/Phytoplankton-LMM_difference.R.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] colab_type="text" id="Za8-Nr5k11fh"
# ##### Copyright 2018 The TensorFlow Authors.
# + cellView="form" colab_type="code" id="Eq10uEbw0E4l" colab={}
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + [markdown] colab_type="text" id="Tt5S6SiPi7ze"
# # Forecasting with a CNN
# + [markdown] colab_type="text" id="W2iENc3Nh6g7"
# <table class="tfo-notebook-buttons" align="left">
# <td>
# <a target="_blank" href="https://colab.research.google.com/github/tensorflow/examples/blob/master/courses/udacity_intro_to_tensorflow_for_deep_learning/l08c09_forecasting_with_cnn.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
# </td>
# <td>
# <a target="_blank" href="https://github.com/tensorflow/examples/blob/master/courses/udacity_intro_to_tensorflow_for_deep_learning/l08c09_forecasting_with_cnn.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
# </td>
# </table>
# + [markdown] colab_type="text" id="vidayERjaO5q"
# ## Setup
# + colab_type="code" id="sTkN9Uw4v6V8" colab={}
from __future__ import absolute_import, division, print_function, unicode_literals
# + colab_type="code" id="pKgUyVL5I1fR" colab={}
try:
# Use the %tensorflow_version magic if in colab.
# %tensorflow_version 2.x
except Exception:
pass
# + colab_type="code" id="gqWabzlJ63nL" colab={}
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
keras = tf.keras
# + colab_type="code" id="cg1hfKCPldZG" colab={}
def plot_series(time, series, format="-", start=0, end=None, label=None):
plt.plot(time[start:end], series[start:end], format, label=label)
plt.xlabel("Time")
plt.ylabel("Value")
if label:
plt.legend(fontsize=14)
plt.grid(True)
def trend(time, slope=0):
return slope * time
def seasonal_pattern(season_time):
"""Just an arbitrary pattern, you can change it if you wish"""
return np.where(season_time < 0.4,
np.cos(season_time * 2 * np.pi),
1 / np.exp(3 * season_time))
def seasonality(time, period, amplitude=1, phase=0):
"""Repeats the same pattern at each period"""
season_time = ((time + phase) % period) / period
return amplitude * seasonal_pattern(season_time)
def white_noise(time, noise_level=1, seed=None):
rnd = np.random.RandomState(seed)
return rnd.randn(len(time)) * noise_level
def seq2seq_window_dataset(series, window_size, batch_size=32,
shuffle_buffer=1000):
series = tf.expand_dims(series, axis=-1)
ds = tf.data.Dataset.from_tensor_slices(series)
ds = ds.window(window_size + 1, shift=1, drop_remainder=True)
ds = ds.flat_map(lambda w: w.batch(window_size + 1))
ds = ds.shuffle(shuffle_buffer)
ds = ds.map(lambda w: (w[:-1], w[1:]))
return ds.batch(batch_size).prefetch(1)
def model_forecast(model, series, window_size):
ds = tf.data.Dataset.from_tensor_slices(series)
ds = ds.window(window_size, shift=1, drop_remainder=True)
ds = ds.flat_map(lambda w: w.batch(window_size))
ds = ds.batch(32).prefetch(1)
forecast = model.predict(ds)
return forecast
# + colab_type="code" id="iL2DDjV3lel6" colab={}
time = np.arange(4 * 365 + 1)
slope = 0.05
baseline = 10
amplitude = 40
series = baseline + trend(time, slope) + seasonality(time, period=365, amplitude=amplitude)
noise_level = 5
noise = white_noise(time, noise_level, seed=42)
series += noise
plt.figure(figsize=(10, 6))
plot_series(time, series)
plt.show()
# + colab_type="code" id="Zmp1JXKxk9Vb" colab={}
split_time = 1000
time_train = time[:split_time]
x_train = series[:split_time]
time_valid = time[split_time:]
x_valid = series[split_time:]
# + [markdown] colab_type="text" id="DI2GlupZ8OJW"
# ## Preprocessing With 1D-Convolutional Layers
# + colab_type="code" id="6GFE82ci8OJW" colab={}
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
window_size = 30
train_set = seq2seq_window_dataset(x_train, window_size,
batch_size=128)
model = keras.models.Sequential([
keras.layers.Conv1D(filters=32, kernel_size=5,
strides=1, padding="causal",
activation="relu",
input_shape=[None, 1]),
keras.layers.LSTM(32, return_sequences=True),
keras.layers.LSTM(32, return_sequences=True),
keras.layers.Dense(1),
keras.layers.Lambda(lambda x: x * 200)
])
lr_schedule = keras.callbacks.LearningRateScheduler(
lambda epoch: 1e-8 * 10**(epoch / 20))
optimizer = keras.optimizers.SGD(lr=1e-8, momentum=0.9)
model.compile(loss=keras.losses.Huber(),
optimizer=optimizer,
metrics=["mae"])
history = model.fit(train_set, epochs=100, callbacks=[lr_schedule])
# + colab_type="code" id="Uungw0H58OJX" colab={}
plt.semilogx(history.history["lr"], history.history["loss"])
plt.axis([1e-8, 1e-4, 0, 30])
# + colab_type="code" id="UG7cO0yr8OJY" colab={}
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
window_size = 30
train_set = seq2seq_window_dataset(x_train, window_size,
batch_size=128)
valid_set = seq2seq_window_dataset(x_valid, window_size,
batch_size=128)
model = keras.models.Sequential([
keras.layers.Conv1D(filters=32, kernel_size=5,
strides=1, padding="causal",
activation="relu",
input_shape=[None, 1]),
keras.layers.LSTM(32, return_sequences=True),
keras.layers.LSTM(32, return_sequences=True),
keras.layers.Dense(1),
keras.layers.Lambda(lambda x: x * 200)
])
optimizer = keras.optimizers.SGD(lr=1e-5, momentum=0.9)
model.compile(loss=keras.losses.Huber(),
optimizer=optimizer,
metrics=["mae"])
model_checkpoint = keras.callbacks.ModelCheckpoint(
"my_checkpoint.h5", save_best_only=True)
early_stopping = keras.callbacks.EarlyStopping(patience=50)
model.fit(train_set, epochs=500,
validation_data=valid_set,
callbacks=[early_stopping, model_checkpoint])
# + colab_type="code" id="BlqzLwfn8OJa" colab={}
model = keras.models.load_model("my_checkpoint.h5")
# + colab_type="code" id="Pj0rpT-48OJc" colab={}
rnn_forecast = model_forecast(model, series[:, np.newaxis], window_size)
rnn_forecast = rnn_forecast[split_time - window_size:-1, -1, 0]
# + colab_type="code" id="3vnDU8wm8OJd" colab={}
plt.figure(figsize=(10, 6))
plot_series(time_valid, x_valid)
plot_series(time_valid, rnn_forecast)
# + colab_type="code" id="W6tWOoE88OJe" colab={}
keras.metrics.mean_absolute_error(x_valid, rnn_forecast).numpy()
# + [markdown] colab_type="text" id="kfPTmghd8OJe"
# ## Fully Convolutional Forecasting
# + colab_type="code" id="4-cPF5CX8OJf" colab={}
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
window_size = 64
train_set = seq2seq_window_dataset(x_train, window_size,
batch_size=128)
model = keras.models.Sequential()
model.add(keras.layers.InputLayer(input_shape=[None, 1]))
for dilation_rate in (1, 2, 4, 8, 16, 32):
model.add(
keras.layers.Conv1D(filters=32,
kernel_size=2,
strides=1,
dilation_rate=dilation_rate,
padding="causal",
activation="relu")
)
model.add(keras.layers.Conv1D(filters=1, kernel_size=1))
lr_schedule = keras.callbacks.LearningRateScheduler(
lambda epoch: 1e-4 * 10**(epoch / 30))
optimizer = keras.optimizers.Adam(lr=1e-4)
model.compile(loss=keras.losses.Huber(),
optimizer=optimizer,
metrics=["mae"])
history = model.fit(train_set, epochs=100, callbacks=[lr_schedule])
# + colab_type="code" id="GfWVZ8k-8OJf" colab={}
plt.semilogx(history.history["lr"], history.history["loss"])
plt.axis([1e-4, 1e-1, 0, 30])
# + colab_type="code" id="WVrxlzbk8OJg" colab={}
keras.backend.clear_session()
tf.random.set_seed(42)
np.random.seed(42)
window_size = 64
train_set = seq2seq_window_dataset(x_train, window_size,
batch_size=128)
valid_set = seq2seq_window_dataset(x_valid, window_size,
batch_size=128)
model = keras.models.Sequential()
model.add(keras.layers.InputLayer(input_shape=[None, 1]))
for dilation_rate in (1, 2, 4, 8, 16, 32):
model.add(
keras.layers.Conv1D(filters=32,
kernel_size=2,
strides=1,
dilation_rate=dilation_rate,
padding="causal",
activation="relu")
)
model.add(keras.layers.Conv1D(filters=1, kernel_size=1))
optimizer = keras.optimizers.Adam(lr=3e-4)
model.compile(loss=keras.losses.Huber(),
optimizer=optimizer,
metrics=["mae"])
model_checkpoint = keras.callbacks.ModelCheckpoint(
"my_checkpoint.h5", save_best_only=True)
early_stopping = keras.callbacks.EarlyStopping(patience=50)
history = model.fit(train_set, epochs=500,
validation_data=valid_set,
callbacks=[early_stopping, model_checkpoint])
# + colab_type="code" id="eNwWZB0d8OJh" colab={}
model = keras.models.load_model("my_checkpoint.h5")
# + colab_type="code" id="PgYwn9VM8OJi" colab={}
cnn_forecast = model_forecast(model, series[..., np.newaxis], window_size)
cnn_forecast = cnn_forecast[split_time - window_size:-1, -1, 0]
# + colab_type="code" id="MCgshNPx8OJi" colab={}
plt.figure(figsize=(10, 6))
plot_series(time_valid, x_valid)
plot_series(time_valid, cnn_forecast)
# + colab_type="code" id="epK1gFEN8OJj" colab={}
keras.metrics.mean_absolute_error(x_valid, cnn_forecast).numpy()
# + colab_type="code" id="af27sP3-FAFi" colab={}
|
Time Series Forecasting with CNN and Wavenet.ipynb
|
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Evaluation and submission analysis for DeepRacer
#
# This notebook has been built based on the `DeepRacer Log Analysis.ipynb` provided by the AWS DeepRacer Team. It has been reorganised and expanded to provide new views on the evaluation/racing data in a cleaner way, without the helper code which was moved into utility `.py` files.
#
# **You will find this notebook most useful for race submissions reviews and because of that it is mostly focusing on this goal.**
#
# ## Usage
#
# I am assuming here that you have already become familiar with `Training_analysis.ipynb`. Therefore descriptions that you will find here may be missing some bits if already described in there.
#
# Since this file can change in the future, I recommend that you make its copy and reorganize it to your liking. This way you will not lose your changes and you'll be able to add things as you please.
#
# **This notebook isn't complete.** What I find interesting in the logs may not be what you will find interesting and useful. I recommend you get familiar with the tools and try hacking around to get the insights that suit your needs.
#
# ## Contributions
#
# As usual, your ideas are very welcome and encouraged so if you have any suggestions either bring them to [the AWS DeepRacer Community](http://join.deepracing.io) or share as code contributions.
#
# ## Training environments
#
# Depending on whether you're running your evaluations through the console or using the local setup, and on which setup for local training you're using, your experience will vary. As much as I would like everything to be taylored to your configuration, there may be some problems that you may face. If so, please get in touch through [the AWS DeepRacer Community](http://join.deepracing.io).
#
# For race submissions it is much more straightforward.
#
# ## Requirements
#
# Before you start using the notebook, you will need to install some dependencies. If you haven't yet done so, have a look at [The README.md file](/edit/README.md#running-the-notebooks) to find what you need to install.
#
# Apart from the install, you also have to configure your programmatic access to AWS. Have a look at the guides below, AWS resources will lead you by the hand:
#
# AWS CLI: https://docs.aws.amazon.com/cli/latest/userguide/cli-chap-configure.html
#
# Boto Configuration: https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html
#
# ## Credits
#
# I would like to thank [the AWS DeepRacer Community](http://join.deepracing.io) for all the feedback about the notebooks. If you'd like, follow [my blog](https://codelikeamother.uk) where I tend to write about my experiences with AWS DeepRacer.
#
# # Log Analysis
#
# Let's get to it.
#
# ## Imports
#
# Run the imports block below:
# +
from deepracer.tracks import TrackIO, Track
from deepracer.logs import DeepRacerLog as cw, \
AnalysisUtils as au, \
SimulationLogsIO as slio, \
EvaluationUtils as eu, \
PlottingUtils as pu
# Ignore deprecation warnings we have no power over
import warnings
warnings.filterwarnings('ignore')
# -
# ## Load waypoints for the track you want to run analysis on
#
# You will notice files for racing tracks. They are community best-effort versions made to make the visualisation in the logs less confusing. They may be slightly differing from reality, we don't know for sure. We do not have access to actual npy files that AWS use in the league.
#
# Tracks Available:
# +
# !ls tracks/
tu = TrackIO()
# +
track: Track = tu.load_track("reinvent_base")
track.road_poly
# -
# ## Load all race submission logs
#
# **WARNING:** If you do not specify `not_older_than` parameter, all evaluation logs will be downloaded. They aren't as big as the training logs, but there is a lot of them.
#
# That said you can download all and then it will only download new ones unless you use force=True.
#
# There are also `not_older_than` and `older_than` parameters so you can choose to fetch all logs from a given period and compare them against each other. Just remember memory is finite.
#
# As mentioned, this method always fetches a list of log streams and then downloads only ones that haven't been downloaded just yet. You can therefore use it to fetch that list and load all the files from the path provided.
#
# Side note: if you want to download evaluation logs from AWS DeepRacer Console, this will be a bit more tricky. Evaluation logs are grouped together with training logs in same group `/aws/robomaker/SimulationJobs` and there isn't an obvious way to recognise which ones they are. That said, in `Evaluation Run Analysis` section below you have the ability to download a single evaluation file.
# + tags=[]
# replace this with the directory name inside your 'logs' directory
stream_name = 'test15-evaluation'
# For the purpose of generating the notebook in a reproducible way
# logs download has been commented out.
logs = [('logs/test15-evaluation/logs/evaluation/evaluation-20211119131748-82cMoKbnSGWL-ymsi1yFDQ-robomaker.log', 'test15-evaluation')]
# warning: old logs have old data structure, you will get 'list index out of range' error
#logs = [('logs/deepracer-eval-sim-sample.log', 'sim-sample')]
#logs = cw.download_all_logs(
# 'logs/deepracer-eval-',
# '/aws/deepracer/leaderboard/SimulationJobs',
# not_older_than="2019-07-01 07:00",
# older_than="2019-07-01 12:00"
#)
# -
# Loads all the logs from the above time range
bulk = slio.load_a_list_of_logs(logs)
# ## Parse logs and visualize
#
# You will notice in here that reward graps are missing, as are many others from the training. These have been trimmed down for clarity.
#
# Do not get tricked though - this notebook provides features that the training one doesn't have, such as batch visualisation of race submission laps.
#
# Side note: Evaluation/race logs contain a reward field but it's not connected to your reward. It is there most likely to ensure logs have consistent structure to make their parsing easier. The value appears to be dependand on distance of the car from the centre of the track. As such it provides no value and is not visualised in this notebook.
# +
simulation_agg = au.simulation_agg(bulk, 'stream', add_tstamp=True, is_eval=True)
complete_ones = simulation_agg[simulation_agg['progress']==100]
# This gives the warning about ptp method deprecation. The code looks as if np.ptp was used, I don't know how to fix it.
au.scatter_aggregates(simulation_agg, is_eval=True)
if complete_ones.shape[0] > 0:
au.scatter_aggregates(complete_ones, "Complete ones", is_eval=True)
# -
# ## Data in tables
# View fifteen most progressed attempts
simulation_agg.nlargest(15, 'progress')
# View fifteen fastest complete laps
complete_ones.nsmallest(15, 'time')
# View ten most recent lap attempts
simulation_agg.nlargest(10, 'tstamp')
# ## Plot all the evaluation laps
#
# The method below plots your evaluation attempts. Just note that that is a time consuming operation and therefore I suggest using `min_distance_to_plot` to just plot some of them.
#
# If you would like to, in a below section of this article you can load a single log file to evaluate this.
#
# In the example below training track data was used for plotting the borders. Since then the community has put a lot of effort into preparing files that resemble the racing ones.
#
# If you want to plot a single lap, scroll down for an example which lets you do a couple more tricks.
pu.plot_evaluations(bulk, track)
# ## Single lap
# Below you will find some ideas of looking at a single evaluation lap. You may be interested in a specific part of it. This isn't very robust but can work as a starting point. Please submit your ideas for analysis.
#
# This place is a great chance to learn more about [Pandas](https://pandas.pydata.org/pandas-docs/stable/) and about how to process data series.
# Load a single lap
lap_df = bulk[(bulk['episode']==0) & (bulk['stream']==stream_name)]
# We're adding a lot of columns here to the episode. To speed things up, it's only done per a single episode, so others will currently be missing this information.
#
# Now try using them as a `graphed_value` parameter.
# +
lap_df.loc[:,'distance']=((lap_df['x'].shift(1)-lap_df['x']) ** 2 + (lap_df['y'].shift(1)-lap_df['y']) ** 2) ** 0.5
lap_df.loc[:,'time']=lap_df['tstamp'].astype(float)-lap_df['tstamp'].shift(1).astype(float)
lap_df.loc[:,'speed']=lap_df['distance']/(100*lap_df['time'])
lap_df.loc[:,'acceleration']=(lap_df['distance']-lap_df['distance'].shift(1))/lap_df['time']
lap_df.loc[:,'progress_delta']=lap_df['progress'].astype(float)-lap_df['progress'].shift(1).astype(float)
lap_df.loc[:,'progress_delta_per_time']=lap_df['progress_delta']/lap_df['time']
pu.plot_grid_world(lap_df, track, graphed_value='reward')
# -
# ## Evaluation Run Analysis
#
# Debug your evaluation runs or analyze the laps. By providing the evaluation simulation id you can fetch a single log file and use it. You can do the same for race submission but I recommend using the bulk solution above. If you still want to do it, make sure to add `log_group = "/aws/robomaker/leaderboard/SimulationJobs"` to `download_log` call.
eval_sim = stream_name
eval_fname = 'logs//deepracer-eval-%s.log' % eval_sim
cw.download_log(eval_fname, stream_prefix=eval_sim)
eval_fname = "logs/test15-evaluation/logs/evaluation/evaluation-20211119131748-82cMoKbnSGWL-ymsi1yFDQ-robomaker.log"
# !head $eval_fname
eval_df = slio.load_pandas(eval_fname)
eval_df.head()
# ### Grid World Analysis
# The code below visualises laps from a single log file just like the one above visualises it in bulk for many.
eu.analyse_single_evaluation(eval_df, track)
|
Evaluation_analysis.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # BLU12 - Learning Notebook- Part 3 of 3 - Advanced Topics
# +
import os
from collections import defaultdict
import warnings
warnings.filterwarnings("ignore")
import numpy as np
import pandas as pd
from surprise import Dataset, Reader
from surprise.model_selection import train_test_split
from surprise.prediction_algorithms import KNNBasic
from scipy.sparse import coo_matrix, dok_matrix
from make_data import make_data
from export_ratings import export_ratings
# -
# # 1 Implicit Feedback
#
# Most times, RS algorithms ingest implicit feedback, i.e., unary ratings, to understand user preferences.
#
# In such cases, the unary data indicates whether a user $u \in U$ performed a given action (e.g., click, purchase).
#
# Afterward, this data is used on its own or combined with explicit ratings.
#
# In a way, ratings from unary data are ratings $r_{ui} \in S = \{1\}$, i.e., with a singleton or unit set of possible ratings $S$.
#
# Absent ratings $r_ui \notin R$ indicates that we have no information relating the user $u$ to the item $i$, just like before.
#
# (Perhaps the user purchased the item somewhere else, or the user didn't click the item because he didn't see it.)
#
# 
#
# We make, however, some distinctions.
#
# ## 1.1 Example
#
# We generated some fake unary data, using the [Faker](https://faker.readthedocs.io/en/master/) package.
#
# In `Learning Notebooks/make_data.py`, you find the `make_data()` function that generates two COO sparse matrices.
#
# This function is exactly like in the learning materials and exercises from [BLU11](https://github.com/LDSSA/batch2-blu11), so we don't repeat it.
users, items, clicks, purchases = make_data()
clicks
purchases
# The data contains exactly 50 users and 50 items, i.e., $|U| = 50$ and $|I| = 50$.
#
# We include 500 clicks and 500 purchases for us to play with.
# ## 1.2 Duplicate Entries
#
# For starters, the user $u \in U$ can perform an action, i.e., implicitly rate, multiple times for the same item $i$.
#
# This violates the assumptions of the matrix $R$, so upstream consolidation is required, enforcing one rating $r_ui$ for each pair $(u, i) \in U \times I$.
#
# Again, let $A$ be set of unary ratings, i.e., $a_{ui} \in S = \{1\}$, for user-item pairs $(u, i) \in U \times I$, which contains duplicate pairs.
#
# A common technique is to sum together duplicate entries, as:
#
# $$\sum\limits_{(u, i) \in U \times I} a_{ui}$$
#
# As we've seen in [BLU11](https://github.com/LDSSA/batch2-blu11), this is the default behavior when we convert from COO to CSR.
clicks_ = clicks.tocsr()
clicks_
# The reduction from 500 to 460 stored element in the matrix is due to the consolidation.
#
# We can confirm this by calling `.max()` on it.
clicks_.max()
purchases_ = purchases.tocsr()
purchases_
purchases_.max()
# Another conventional technique is to use the logarithm of the sum, instead.
#
# $$\log{\sum\limits_{(u, i) \in U \times I} a_{ui}}$$
#
# The log transformation is particularly useful with right-skewed distributions, i.e., not centered, with a peak on the left and a tail on the right.
#
# (Imagine a user $u$ with few clicks on many items and many of clicks on a few items, which is very common.)
#
# We can apply this quickly if so we choose, by applying the logaritm element-wise on the resulting matrix.
clicks_.log1p()
purchases.log1p()
# ## 1.3 Inferring Ratings
#
# Also, since we have multiple signals relating the user $u$ to item $i$, we have to consolidate them into a single rating.
#
# Different signals (e.g., impressions, clicks, purchases) have distinct signal-to-noise ratios and levels of intent and, thus, may require different weights.
#
# Consider the set $D$, containing all types of implicit feedback, e.g., $D = \{Click, Purchase\}$, with the associated weights $W$.
#
# We can compute the ratings $r_{ui}$, for $(u, i) \in U \times I$, as:
#
# $$r_{ui} = \sum\limits_{(u, i) \in U \times I} \Big(\sum\limits_{d \in D} w_d \cdot a_{ui}^d \Big)$$
#
# In our example, we attribute more relevance to purchases than clicks.
#
# (Please note that Python silently converts from COO to CSR, summing together duplicate entries by default.)
# +
def make_ratings(c, p, w_c, w_p):
return w_c * c + w_p * p
ratings = make_ratings(clicks, purchases, .3, .7)
ratings
# -
# ## 1.4 Exporting Ratings
#
# Once we have final ratings, it's good practice to export them in long-form, using the `'uid,iid,rating'` convention.
#
# We can do this easily, by converting back to COO and use the `.row`, `.col` and `.data` attributes.
# +
ratings_ = ratings.tocoo()
uid = np.array([users[row] for row in ratings_.row], dtype='O')
iid = np.array([items[col] for col in ratings_.col], dtype='O')
# -
data = ratings_.data
# For full implementation detail and NumPy nitty gritty, refer to `Learning Notebooks/export_ratings.py`.
export_ratings(users, items, ratings)
# From here onwards, we can use all the RS techniques we have learned.
#
# (Including using the Surprise package.)
# # 2 Generating top-*N* Lists
#
# Often, we task the RS with recommending a list $L_u$, containing $N$ items likely to be of interest to an active user $u$.
#
# This type of output is particularly frequent in the presence of implicit feedback and unary data, as ratings loose meaning *per se*.
#
# How can we generate such a list $L_u$, using Surprise?
dataset = Dataset.load_builtin('ml-100k')
R_train = dataset.build_full_trainset()
# We will use the `KNNBasic` to generate predictions, with all the defaults.
#
# (This may take a few minutes.)
# +
knn = KNNBasic()
knn.fit(R_train)
R_test = R_train.build_anti_testset()
R_pred = knn.test(R_test)
# -
# From the Surprise documentation, [this](https://surprise.readthedocs.io/en/stable/FAQ.html#how-to-get-the-top-n-recommendations-for-each-user) is the recommended way to extract a top-$N$ list for each user.
#
# (Slightly adapted, so that we can use it in the future).
# +
def get_top_n(predictions, n=10):
top_n = defaultdict(list)
for uid, iid, true_r, est, _ in predictions:
top_n[uid].append((iid, est))
for uid, user_ratings in top_n.items():
user_ratings.sort(key=lambda x: x[1], reverse=True)
top_n[uid] = [x[0] for x in user_ratings[:n]]
return pd.DataFrame.from_dict(data=top_n, orient='index')
L = get_top_n(R_pred, n=10)
L.head()
# -
# This way, we generate a ranked list of recommendations $L_u$ for each user $u \in U$, in a convenient format:
# * One row per user, indexed with the `uid`
# * One column per recommendation, ordered by the estimated ranking.
#
# Now, we learn how to evaluate algorithms focused on learning top-$N$ lists.
# # 3 Evaluation Metrics for top-*N* Lists
#
# When ratings are not available, i.e., with unary data, measuring the rating prediction accuracy isn't possible.
#
# In these cases, evaluation is done using $R_{train}$ to learn $L_u$ and evaluating on $R_{test}$
#
# Let $T_u \subset I_u \cap I_{test}$ the subset of test items that the user $u$ found relevant, e.g., rated positively, clicked, purchased.
#
# ## 3.1 Precision
#
# Precision measures how many recommended items are relevant, out of all recommended items to the user $u$.
#
# $$Precision(L_u) = \frac{|L_u \cap T_u |}{|L_u|}$$
#
# To evaluate the RS as a whole, we average the precision for all active users $u \in U$.
#
# $$Precision(L) = \frac{\sum\limits_{u \in U} Precision(L_u)}{|U|}$$
#
# ## 3.2 Recall
#
# Recall, on the other side, relates to how many relevant were recommended, out of all relevant items for the user $u$.
#
# $$Recall(L_u) = \frac{|L_u \cap T_u |}{|T_u|}$$
#
# Again, to evaluate the TS we average the results of all active users $u \in U$.
#
# $$Recall(L) = \frac{\sum\limits_{u \in U} Recall(L_u)}{|U|}$$
#
# ## 3.3 Average Precision (AP)
#
# Precision and recall ignore the ordering. Therefore we need a ranking metric.
#
# To understand average precision, we must start with Precision@k and Recall@k, i.e., precision and recall up to cut-off $k$.
#
# In other words, we consider only the subset of recommendations $L_u^k \subset L_u$ from rank 1 through rank $k \leqslant N$.
#
# $$PrecisionAtk(L_u) = \frac{|L_u^k \cap T_u |}{|L_u^k|}$$
#
# $$RecallAtk(L_u) = \frac{|L_u^k \cap T_u |}{|T_u|}$$
#
# The AP is a ranking metric, measuring the frequency of relevant recommendations.
#
# $$APatN(L_u) = \frac{\sum\limits_{k = 1}^N (PrecisionAtk(L_u) \cdot relevant(k^{th})}{|T_u|}$$
#
# The $relevant(k^{th})$ bit is a boolean value, indicating whether the $k$-th element is relevant, or not.
#
# Every hit is valued as how many correct recommendations $|L_u^k \cap T_u|$ we have up to the rank $k$, out of all recommendations $|L_u^k|$.
#
# A first interpretation is that the AP increases only with correct recommendations (what a surprise!).
#
# Also, early hits, i.e., front-loading correct recommendations, carry over and are continuously rewarded.
#
# Finally, the AP can never decrease as you increase $N$.
#
# There is, however, an alternative formula for AP, in terms of both precision and the change in recall from the subset $k$ − 1 to the $k$-th.
#
# $$APatN(L_u) = \sum\limits_{k=1}^NPrecisionAtk(L_u) * \Delta RecallAtk(L_u)$$
#
# ## 3.4 Mean Average Precision (mAP)
#
# The Average Precision (AP) is further averaged over all users and reported as a single score.
#
# $$mAPatN(L) = \frac{\sum\limits_{u \in U} APatN(L_u)}{|U|}$$
#
# This way, we use a metric that considers both the number and the ranking of hits, i.e., useful recommendations.
#
# In this last section, we learned how to use unary data, make predictions based on it and how to evaluate our algorithms.
#
# Time to practice!
|
Learning Notebooks/BLU12 - Learning Notebook - Part 3 of 3 - Advanced Topics.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + _cell_guid="d46ba3fd-26f1-4635-b2f9-fca916ff3066" _uuid="21f3ccd962d1556dc2346699d45a29e9ef791367"
import pandas as pd
import numpy as np
# Preprocessing
from sklearn.preprocessing import LabelEncoder
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import confusion_matrix, roc_curve, auc, classification_report
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
# Visualisation libraries
## Text
from colorama import Fore, Back, Style
## seaborn
import seaborn as sns
sns.set_context('paper', rc={'font.size':12,'axes.titlesize':14,'axes.labelsize':12})
sns.set_style('white')
## matplotlib
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse, Polygon
import matplotlib.gridspec as gridspec
import matplotlib.colors
from pylab import rcParams
plt.style.use('seaborn-whitegrid')
plt.rcParams['figure.figsize'] = 14, 8
plt.rcParams['axes.labelsize'] = 14
plt.rcParams['xtick.labelsize'] = 12
plt.rcParams['ytick.labelsize'] = 12
plt.rcParams['text.color'] = 'k'
# %matplotlib inline
# -
# Spooky Author Identification Dataset Natural Language Processing
# ============================
#
# # Dataset
#
# In this article, we would like work on a [natural language processing](https://en.wikipedia.org/wiki/Natural_language_processing) (NLP) project. In doing so, we use a dataset from [Kaggle.com](https://www.kaggle.com/c/spooky-author-identification/overview).
#
# <div class="alert alert-block alert-info">
# <font size="+2"><b>Spooky Author Identification Dataset</b></font>
# </div>
#
#
# The dataset contains text from works of fiction written by spooky authors of the public domain: <NAME>, HP Lovecraft and <NAME>. The data was prepared by chunking larger texts into sentences using CoreNLP's MaxEnt sentence tokenizer, so you may notice the odd non-sentence here and there. Your objective is to accurately identify the author of the sentences in the test set.
#
# ### File descriptions
#
# * **train.csv** - the training set
# * **test.csv** - the test set
# * **sample_submission.csv** - a sample submission file in the correct format
#
# ### Data fields
# * **id** - a unique identifier for each sentence
# * **text** - some text written by one of the authors
# * **author** - the author of the sentence (EAP: <NAME>, HPL: HP Lovecraft; MWS: <NAME>)
#
# ## Loading the Datasets
# + _cell_guid="367e0329-7aeb-4f39-b1a9-d7395bdca993" _uuid="d6ea63db0ad0db09b25c35601391b71564601699"
Data = pd.read_csv('spooky-author-identification/train.csv')
Data.columns = [x.title().replace('Id','ID') for x in Data]
Pred = pd.read_csv('spooky-author-identification/test.csv')
Pred.columns = [x.title().replace('Id','ID') for x in Pred]
def Data_info(Inp, Only_NaN = False):
Out = Inp.dtypes.to_frame(name='Data Type').sort_values(by=['Data Type'])
Out = Out.join(Inp.isnull().sum().to_frame(name = 'Number of NaN Values'), how='outer')
Out['Percentage'] = np.round(100*(Out['Number of NaN Values']/Inp.shape[0]),2)
if Only_NaN:
Out = Out.loc[Out['Number of NaN Values']>0]
return Out
Line = 100*'='
print(Back.BLACK + Fore.GREEN + Style.NORMAL + 'Data Dataset:' +
Style.RESET_ALL + Fore.BLUE + Style.NORMAL + ' %s' % Line + Style.RESET_ALL)
display(Data.head())
display(Data_info(Data))
print(Back.BLACK + Fore.MAGENTA + Style.NORMAL + 'Pred Dataset:' +
Style.RESET_ALL + Fore.BLUE + Style.NORMAL + ' %s' % Line + Style.RESET_ALL)
display(Pred.head())
display(Data_info(Pred))
print(Fore.BLUE + Style.NORMAL + '%s' % (Line + Line[:16]) + Style.RESET_ALL)
# -
# # Problem Description
#
# The object of the exercise is to recognize/predict the author of a text by creating an NLP model.
# # Modeling
#
# First off, let's define $X$ and $y$ sets.
# ## Preprocessing
#
# In this section, we use [**sklearn.preprocessing.LabelEncoder**](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.LabelEncoder.html) to encode the **author** from the *Data**.
# +
X = Data['Text'].values
# Encoding
le_author = LabelEncoder()
y = le_author.fit_transform(Data['Author'].values)
# Map
print(Back.BLACK + Fore.YELLOW + Style.NORMAL + 'Author Map:' + Style.RESET_ALL)
Author_map = {'Author': le_author.classes_, 'Code': np.unique(y)}
display(pd.DataFrame(Author_map).style.hide_index())
# -
# ## Train and Test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
pd.DataFrame(data={'Set':['X_train','X_test','y_train','y_test'],
'Shape':[X_train.shape, X_test.shape, y_train.shape, y_test.shape]}).set_index('Set').T
# ## TF-IDF features
#
# Moreover, we would use [**sklearn.feature_extraction.text.TfidfVectorizer**](https://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.TfidfVectorizer.html) to convert **Text** data to a matrix of **TF-IDF features**.
Tfidf_Vec = TfidfVectorizer(min_df=3, max_features=None,
strip_accents='unicode', analyzer='word',token_pattern=r'\w{1,}',
ngram_range=(1, 3), use_idf=1,smooth_idf=1,sublinear_tf=1,
stop_words = 'english')
_ = Tfidf_Vec.fit(list(X))
X_train_TFIDFvec = Tfidf_Vec.transform(X_train)
X_test_TFIDFvec = Tfidf_Vec.transform(X_test)
# ## Count Vectorizer
#
# An alternative approach would be using [**sklearn.feature_extraction.text.CountVectorizer**](https://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.CountVectorizer.html) to convert **Text** data to a matrix of token counts.
# +
Count_Vec = CountVectorizer(analyzer='word',token_pattern=r'\w{1,}',
ngram_range=(1, 3), stop_words = 'english')
_ = Count_Vec.fit(list(X))
X_train_cv = Count_Vec.transform(X_train)
X_test_cv = Count_Vec.transform(X_test)
# -
# ## Logistic Regression (using TF-IDF features)
#
# We can use [**sklearn.linear_model.LogisticRegression**](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html).
# Logistic Regression
logr = LogisticRegression(C = 1.0, max_iter = 500, n_jobs = -1)
_ = logr.fit(X_train_TFIDFvec, y_train)
# ### Classification Report
Labels = Author_map['Author'].tolist()
y_pred = logr.predict_proba(X_test_TFIDFvec)
Results = pd.DataFrame(classification_report(y_test, y_pred.argmax(axis=1), target_names= Labels, output_dict=True)).T
display(Results.round(2))
# ### Receiver Operating Characteristic (ROC) Curve
# +
Temp = pd.get_dummies(y_test).values
y_score = logr.decision_function(X_test_TFIDFvec)
n_classes = y_score.shape[1]
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(Temp[:, i], y_score[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(Temp.ravel(), y_score.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
# ROC curve
fig, ax = plt.subplots(1, 1, figsize=(6, 6))
_ = ax.plot(fpr[2], tpr[2], lw=2, label = 'AUC = %0.2f' % roc_auc[2])
_ = ax.plot([0, 1], [0, 1],'r--', lw=2)
delta = 0.01
_ = ax.set_xlim([-delta,1+delta])
_ = ax.set_ylim([-delta,1+delta])
_ = ax.set_xlabel('False Positive Rate (FPR)')
_ = ax.set_ylabel('True Positive Rate (TPR)')
_ = ax.set_title('Receiver Operating Characteristic (ROC) Curve')
_ = ax.legend(loc="lower right", fontsize = 16)
del Temp, y_score, n_classes, fpr, tpr, roc_auc, delta
# -
# ### Confusion Matrix
# +
# Train set
y_pred = logr.predict_proba(X_train_TFIDFvec)
Confusion_Matrix = confusion_matrix(y_train, y_pred.argmax(axis=1))
fig, ax = plt.subplots(1, 2, figsize=(15, 5))
fig.suptitle('Train Set', fontsize = 18)
_ = sns.heatmap(Confusion_Matrix, annot=True, annot_kws={"size": 14}, cmap="Blues", ax = ax[0],
linewidths = 0.2, cbar_kws={"shrink": 1})
_ = ax[0].set_xlabel('Predicted labels')
_ = ax[0].set_ylabel('True labels');
_ = ax[0].set_title('Confusion Matrix');
_ = ax[0].xaxis.set_ticklabels(Labels)
_ = ax[0].yaxis.set_ticklabels(Labels)
Confusion_Matrix = Confusion_Matrix.astype('float') / Confusion_Matrix.sum(axis=1)[:, np.newaxis]
_ = sns.heatmap(Confusion_Matrix, annot=True, annot_kws={"size": 14}, cmap="Greens", ax = ax[1],
linewidths = 0.2, vmin=0, vmax=1, cbar_kws={"shrink": 1})
_ = ax[1].set_xlabel('Predicted labels')
_ = ax[1].set_ylabel('True labels');
_ = ax[1].set_title('Normalized Confusion Matrix');
_ = ax[1].xaxis.set_ticklabels(Labels)
_ = ax[1].yaxis.set_ticklabels(Labels)
# Test set
y_pred = logr.predict_proba(X_test_TFIDFvec)
Confusion_Matrix = confusion_matrix(y_test, y_pred.argmax(axis=1))
fig, ax = plt.subplots(1, 2, figsize=(15, 5))
fig.suptitle('Test Set', fontsize = 18)
_ = sns.heatmap(Confusion_Matrix, annot=True, annot_kws={"size": 14}, cmap="Blues", ax = ax[0],
linewidths = 0.2, cbar_kws={"shrink": 1})
_ = ax[0].set_xlabel('Predicted labels')
_ = ax[0].set_ylabel('True labels');
_ = ax[0].set_title('Confusion Matrix');
_ = ax[0].xaxis.set_ticklabels(Labels)
_ = ax[0].yaxis.set_ticklabels(Labels)
Confusion_Matrix = Confusion_Matrix.astype('float') / Confusion_Matrix.sum(axis=1)[:, np.newaxis]
_ = sns.heatmap(Confusion_Matrix, annot=True, annot_kws={"size": 14}, cmap="Greens", ax = ax[1],
linewidths = 0.2, vmin=0, vmax=1, cbar_kws={"shrink": 1})
_ = ax[1].set_xlabel('Predicted labels')
_ = ax[1].set_ylabel('True labels');
_ = ax[1].set_title('Normalized Confusion Matrix');
_ = ax[1].xaxis.set_ticklabels(Labels)
_ = ax[1].yaxis.set_ticklabels(Labels)
# -
# ## Logistic Regression (using Count Vectorizer)
# Logistic Regression
logr = LogisticRegression(C = 1.0, max_iter = 500, n_jobs = -1)
_ = logr.fit(X_train_cv, y_train)
# ### Classification Report
Labels = Author_map['Author'].tolist()
y_pred = logr.predict_proba(X_test_cv)
Results = pd.DataFrame(classification_report(y_test, y_pred.argmax(axis=1), target_names= Labels, output_dict=True)).T
display(Results.round(2))
# ### Receiver Operating Characteristic (ROC) Curve
# +
Temp = pd.get_dummies(y_test).values
y_score = logr.decision_function(X_test_cv)
n_classes = y_score.shape[1]
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(Temp[:, i], y_score[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(Temp.ravel(), y_score.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
# ROC curve
fig, ax = plt.subplots(1, 1, figsize=(6, 6))
_ = ax.plot(fpr[2], tpr[2], lw=2, label = 'AUC = %0.2f' % roc_auc[2])
_ = ax.plot([0, 1], [0, 1],'r--', lw=2)
delta = 0.01
_ = ax.set_xlim([-delta,1+delta])
_ = ax.set_ylim([-delta,1+delta])
_ = ax.set_xlabel('False Positive Rate (FPR)')
_ = ax.set_ylabel('True Positive Rate (TPR)')
_ = ax.set_title('Receiver Operating Characteristic (ROC) Curve')
_ = ax.legend(loc="lower right", fontsize = 16)
del Temp, y_score, n_classes, fpr, tpr, roc_auc, delta
# -
# ### Confusion Matrix
# +
# Train set
y_pred = logr.predict_proba(X_train_cv)
Confusion_Matrix = confusion_matrix(y_train, y_pred.argmax(axis=1))
fig, ax = plt.subplots(1, 2, figsize=(15, 5))
fig.suptitle('Train Set', fontsize = 18)
_ = sns.heatmap(Confusion_Matrix, annot=True, annot_kws={"size": 14}, cmap="Blues", ax = ax[0],
linewidths = 0.2, cbar_kws={"shrink": 1})
_ = ax[0].set_xlabel('Predicted labels')
_ = ax[0].set_ylabel('True labels');
_ = ax[0].set_title('Confusion Matrix');
_ = ax[0].xaxis.set_ticklabels(Labels)
_ = ax[0].yaxis.set_ticklabels(Labels)
Confusion_Matrix = Confusion_Matrix.astype('float') / Confusion_Matrix.sum(axis=1)[:, np.newaxis]
_ = sns.heatmap(Confusion_Matrix, annot=True, annot_kws={"size": 14}, cmap="Greens", ax = ax[1],
linewidths = 0.2, vmin=0, vmax=1, cbar_kws={"shrink": 1})
_ = ax[1].set_xlabel('Predicted labels')
_ = ax[1].set_ylabel('True labels');
_ = ax[1].set_title('Normalized Confusion Matrix');
_ = ax[1].xaxis.set_ticklabels(Labels)
_ = ax[1].yaxis.set_ticklabels(Labels)
# Test set
y_pred = logr.predict_proba(X_test_cv)
Confusion_Matrix = confusion_matrix(y_test, y_pred.argmax(axis=1))
fig, ax = plt.subplots(1, 2, figsize=(15, 5))
fig.suptitle('Test Set', fontsize = 18)
_ = sns.heatmap(Confusion_Matrix, annot=True, annot_kws={"size": 14}, cmap="Blues", ax = ax[0],
linewidths = 0.2, cbar_kws={"shrink": 1})
_ = ax[0].set_xlabel('Predicted labels')
_ = ax[0].set_ylabel('True labels');
_ = ax[0].set_title('Confusion Matrix');
_ = ax[0].xaxis.set_ticklabels(Labels)
_ = ax[0].yaxis.set_ticklabels(Labels)
Confusion_Matrix = Confusion_Matrix.astype('float') / Confusion_Matrix.sum(axis=1)[:, np.newaxis]
_ = sns.heatmap(Confusion_Matrix, annot=True, annot_kws={"size": 14}, cmap="Greens", ax = ax[1],
linewidths = 0.2, vmin=0, vmax=1, cbar_kws={"shrink": 1})
_ = ax[1].set_xlabel('Predicted labels')
_ = ax[1].set_ylabel('True labels');
_ = ax[1].set_title('Normalized Confusion Matrix');
_ = ax[1].xaxis.set_ticklabels(Labels)
_ = ax[1].yaxis.set_ticklabels(Labels)
# -
# ## GridSearch Logistic Regression
# Let's try the Logistic Regression again; however this time with the benefit of using [**GridSearchCV**](https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GridSearchCV.html)
# +
# Parameters
param_grid = {'tol': [10.0**x for x in np.arange(-2, -4, -1)], 'C': [1, 2 , 4, 6, 10],}
# Logistic Regression
logr = LogisticRegression(max_iter = 1e4)
# Searching over specified parameter values for an estimator.
grid_model = GridSearchCV(logr, param_grid, n_jobs=-1)
# Fitting
_ = grid_model.fit(X_train_TFIDFvec, y_train)
# +
display(pd.DataFrame({'Best Score': [grid_model.best_score_],
'Best Paramerers': [str(grid_model.best_params_)],
'Accuracy': [grid_model.score(X_test_TFIDFvec,y_test)]}).round(4).style.hide_index())
display(pd.DataFrame(grid_model.cv_results_)[['rank_test_score',
'params','mean_test_score']].sort_values(by=['rank_test_score']).style.hide_index()\
.background_gradient(cmap='YlGn', subset=['mean_test_score']).set_precision(4))
Temp = [str(x) for x in grid_model.cv_results_['params']]
Temp = [s.replace('{', '') for s in Temp]
Temp = [s.replace('}', '') for s in Temp]
Temp = [s.replace("'", '') for s in Temp]
# Plot the results of the grid search.
fig, ax = plt.subplots(1, 2, figsize=(12.5, 7))
# left
_ = ax[0].errorbar(x= Temp,
y=grid_model.cv_results_['mean_test_score'],
yerr=grid_model.cv_results_['std_test_score'], uplims=True, lolims=True)
_ = ax[0].set(xlabel='Paramerers', title='Classification accuracy')
_ = ax[0].set_xticklabels(labels = Temp, rotation=90, fontsize = 10)
# Right
_ = ax[1].errorbar(x= Temp,
y=grid_model.cv_results_['mean_fit_time'],
yerr=grid_model.cv_results_['std_fit_time'], color='r', uplims=True, lolims=True)
_ = ax[1].set(xlabel='Paramerers', title='Fit time (with caching)')
_ = ax[1].set_xticklabels(labels = Temp, rotation=90, fontsize = 10)
fig.tight_layout()
# -
# Therefore,
# Logistic Regression
logr = LogisticRegression(C = grid_model.best_params_['C'],
tol = grid_model.best_params_['tol'],
max_iter = 1e4, n_jobs=-1)
_ = logr.fit(X_train_TFIDFvec, y_train)
# ### Classification Report
Labels = Author_map['Author'].tolist()
y_pred = logr.predict_proba(X_test_TFIDFvec)
Results = pd.DataFrame(classification_report(y_test, y_pred.argmax(axis=1), target_names= Labels, output_dict=True)).T
display(Results.round(2))
# ### Receiver Operating Characteristic (ROC) Curve
# +
Temp = pd.get_dummies(y_test).values
y_score = logr.decision_function(X_test_TFIDFvec)
n_classes = y_score.shape[1]
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(Temp[:, i], y_score[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(Temp.ravel(), y_score.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
# ROC curve
fig, ax = plt.subplots(1, 1, figsize=(6, 6))
_ = ax.plot(fpr[2], tpr[2], lw=2, label = 'AUC = %0.2f' % roc_auc[2])
_ = ax.plot([0, 1], [0, 1],'r--', lw=2)
delta = 0.01
_ = ax.set_xlim([-delta,1+delta])
_ = ax.set_ylim([-delta,1+delta])
_ = ax.set_xlabel('False Positive Rate (FPR)')
_ = ax.set_ylabel('True Positive Rate (TPR)')
_ = ax.set_title('Receiver Operating Characteristic (ROC) Curve')
_ = ax.legend(loc="lower right", fontsize = 16)
del Temp, y_score, n_classes, fpr, tpr, roc_auc, delta
# -
# ### Confusion Matrix
# +
# Train set
y_pred = logr.predict_proba(X_train_TFIDFvec)
Confusion_Matrix = confusion_matrix(y_train, y_pred.argmax(axis=1))
fig, ax = plt.subplots(1, 2, figsize=(15, 5))
fig.suptitle('Train Set', fontsize = 18)
_ = sns.heatmap(Confusion_Matrix, annot=True, annot_kws={"size": 14}, cmap="Blues", ax = ax[0],
linewidths = 0.2, cbar_kws={"shrink": 1})
_ = ax[0].set_xlabel('Predicted labels')
_ = ax[0].set_ylabel('True labels');
_ = ax[0].set_title('Confusion Matrix');
_ = ax[0].xaxis.set_ticklabels(Labels)
_ = ax[0].yaxis.set_ticklabels(Labels)
Confusion_Matrix = Confusion_Matrix.astype('float') / Confusion_Matrix.sum(axis=1)[:, np.newaxis]
_ = sns.heatmap(Confusion_Matrix, annot=True, annot_kws={"size": 14}, cmap="Greens", ax = ax[1],
linewidths = 0.2, vmin=0, vmax=1, cbar_kws={"shrink": 1})
_ = ax[1].set_xlabel('Predicted labels')
_ = ax[1].set_ylabel('True labels');
_ = ax[1].set_title('Normalized Confusion Matrix');
_ = ax[1].xaxis.set_ticklabels(Labels)
_ = ax[1].yaxis.set_ticklabels(Labels)
# Test set
y_pred = logr.predict_proba(X_test_TFIDFvec)
Confusion_Matrix = confusion_matrix(y_test, y_pred.argmax(axis=1))
fig, ax = plt.subplots(1, 2, figsize=(15, 5))
fig.suptitle('Test Set', fontsize = 18)
_ = sns.heatmap(Confusion_Matrix, annot=True, annot_kws={"size": 14}, cmap="Blues", ax = ax[0],
linewidths = 0.2, cbar_kws={"shrink": 1})
_ = ax[0].set_xlabel('Predicted labels')
_ = ax[0].set_ylabel('True labels');
_ = ax[0].set_title('Confusion Matrix');
_ = ax[0].xaxis.set_ticklabels(Labels)
_ = ax[0].yaxis.set_ticklabels(Labels)
Confusion_Matrix = Confusion_Matrix.astype('float') / Confusion_Matrix.sum(axis=1)[:, np.newaxis]
_ = sns.heatmap(Confusion_Matrix, annot=True, annot_kws={"size": 14}, cmap="Greens", ax = ax[1],
linewidths = 0.2, vmin=0, vmax=1, cbar_kws={"shrink": 1})
_ = ax[1].set_xlabel('Predicted labels')
_ = ax[1].set_ylabel('True labels');
_ = ax[1].set_title('Normalized Confusion Matrix');
_ = ax[1].xaxis.set_ticklabels(Labels)
_ = ax[1].yaxis.set_ticklabels(Labels)
# -
# # Predictions
# Using the model, we can now predict the text available from the **Pred** (Test) dataset.
# +
X_pred = Tfidf_Vec.transform(Pred.Text.values)
y_pred = logr.predict_proba(X_pred)
Predictions = pd.concat([Pred, pd.DataFrame(y_pred, columns = Labels)], axis =1)
Predictions
# -
# ***
|
natural_language_processing/Spooky_Author_Identification_Dataset_NLP_using_LogReg.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
import tensorflow as tf
import numpy as np
class SOM(object):
"""
2-D Self-Organizing Map with Gaussian Neighbourhood function
and linearly decreasing learning rate.
"""
#To check if the SOM has been trained
_trained = False
def __init__(self, m, n, dim, n_iterations=100, alpha=None, sigma=None):
"""
Initializes all necessary components of the TensorFlow
Graph.
m X n are the dimensions of the SOM. 'n_iterations' should
should be an integer denoting the number of iterations undergone
while training.
'dim' is the dimensionality of the training inputs.
'alpha' is a number denoting the initial time(iteration no)-based
learning rate. Default value is 0.3
'sigma' is the the initial neighbourhood value, denoting
the radius of influence of the BMU while training. By default, its
taken to be half of max(m, n).
"""
#Assign required variables first
self._m = m
self._n = n
if alpha is None:
alpha = 0.3
else:
alpha = float(alpha)
if sigma is None:
sigma = max(m, n) / 2.0
else:
sigma = float(sigma)
self._n_iterations = abs(int(n_iterations))
##INITIALIZE GRAPH
self._graph = tf.Graph()
##POPULATE GRAPH WITH NECESSARY COMPONENTS
with self._graph.as_default():
##VARIABLES AND CONSTANT OPS FOR DATA STORAGE
#Randomly initialized weightage vectors for all neurons,
#stored together as a matrix Variable of size [m*n, dim]
self._weightage_vects = tf.Variable(tf.random_normal(
[m*n, dim]))
#Matrix of size [m*n, 2] for SOM grid locations
#of neurons
self._location_vects = tf.constant(np.array(
list(self._neuron_locations(m, n))))
##PLACEHOLDERS FOR TRAINING INPUTS
#We need to assign them as attributes to self, since they
#will be fed in during training
#The training vector
self._vect_input = tf.placeholder("float", [dim])
#Iteration number
self._iter_input = tf.placeholder("float")
##CONSTRUCT TRAINING OP PIECE BY PIECE
#Only the final, 'root' training op needs to be assigned as
#an attribute to self, since all the rest will be executed
#automatically during training
#To compute the Best Matching Unit given a vector
#Basically calculates the Euclidean distance between every
#neuron's weightage vector and the input, and returns the
#index of the neuron which gives the least value
bmu_index = tf.argmin(tf.sqrt(tf.reduce_sum(
tf.pow(tf.sub(self._weightage_vects, tf.pack(
[self._vect_input for i in range(m*n)])), 2), 1)),
0)
#This will extract the location of the BMU based on the BMU's
#index
slice_input = tf.pad(tf.reshape(bmu_index, [1]),
np.array([[0, 1]]))
bmu_loc = tf.reshape(tf.slice(self._location_vects, slice_input,
tf.constant(np.array([1, 2]))),
[2])
#To compute the alpha and sigma values based on iteration
#number
learning_rate_op = tf.sub(1.0, tf.div(self._iter_input,
self._n_iterations))
_alpha_op = tf.mul(alpha, learning_rate_op)
_sigma_op = tf.mul(sigma, learning_rate_op)
#Construct the op that will generate a vector with learning
#rates for all neurons, based on iteration number and location
#wrt BMU.
bmu_distance_squares = tf.reduce_sum(tf.pow(tf.sub(
self._location_vects, tf.pack(
[bmu_loc for i in range(m*n)])), 2), 1)
neighbourhood_func = tf.exp(tf.neg(tf.div(tf.cast(
bmu_distance_squares, "float32"), tf.pow(_sigma_op, 2))))
learning_rate_op = tf.mul(_alpha_op, neighbourhood_func)
#Finally, the op that will use learning_rate_op to update
#the weightage vectors of all neurons based on a particular
#input
learning_rate_multiplier = tf.pack([tf.tile(tf.slice(
learning_rate_op, np.array([i]), np.array([1])), [dim])
for i in range(m*n)])
weightage_delta = tf.mul(
learning_rate_multiplier,
tf.sub(tf.pack([self._vect_input for i in range(m*n)]),
self._weightage_vects))
new_weightages_op = tf.add(self._weightage_vects,
weightage_delta)
self._training_op = tf.assign(self._weightage_vects,
new_weightages_op)
##INITIALIZE SESSION
self._sess = tf.Session()
##INITIALIZE VARIABLES
init_op = tf.initialize_all_variables()
self._sess.run(init_op)
def _neuron_locations(self, m, n):
"""
Yields one by one the 2-D locations of the individual neurons
in the SOM.
"""
#Nested iterations over both dimensions
#to generate all 2-D locations in the map
for i in range(m):
for j in range(n):
yield np.array([i, j])
def train(self, input_vects):
"""
Trains the SOM.
'input_vects' should be an iterable of 1-D NumPy arrays with
dimensionality as provided during initialization of this SOM.
Current weightage vectors for all neurons(initially random) are
taken as starting conditions for training.
"""
#Training iterations
for iter_no in range(self._n_iterations):
#Train with each vector one by one
for input_vect in input_vects:
self._sess.run(self._training_op,
feed_dict={self._vect_input: input_vect,
self._iter_input: iter_no})
#Store a centroid grid for easy retrieval later on
centroid_grid = [[] for i in range(self._m)]
self._weightages = list(self._sess.run(self._weightage_vects))
self._locations = list(self._sess.run(self._location_vects))
for i, loc in enumerate(self._locations):
centroid_grid[loc[0]].append(self._weightages[i])
self._centroid_grid = centroid_grid
self._trained = True
def get_centroids(self):
"""
Returns a list of 'm' lists, with each inner list containing
the 'n' corresponding centroid locations as 1-D NumPy arrays.
"""
if not self._trained:
raise ValueError("SOM not trained yet")
return self._centroid_grid
def map_vects(self, input_vects):
"""
Maps each input vector to the relevant neuron in the SOM
grid.
'input_vects' should be an iterable of 1-D NumPy arrays with
dimensionality as provided during initialization of this SOM.
Returns a list of 1-D NumPy arrays containing (row, column)
info for each input vector(in the same order), corresponding
to mapped neuron.
"""
if not self._trained:
raise ValueError("SOM not trained yet")
to_return = []
for vect in input_vects:
min_index = min([i for i in range(len(self._weightages))],
key=lambda x: np.linalg.norm(vect-
self._weightages[x]))
to_return.append(self._locations[min_index])
return to_return
|
som/som_example.ipynb
|
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.7.0
# language: julia
# name: julia-1.7
# ---
# # MATH50003 Numerical Analysis: Problem Sheet 7
#
# This problem sheet explores condition numbers, indefinite integration,
# and Euler's method.
#
# Questions marked with a ⋆ are meant to be completed without using a computer.
# Problems are denoted A/B/C to indicate their difficulty.
using LinearAlgebra, Plots, Test
# ## 1. Two-point boundary value problems
#
# **Problem 1.1 (C)** Construct a finite-difference approximation to the
# forced Helmholtz equation
# $$
# \begin{align*}
# u(0) &= 0 \\
# u(1) &= 0 \\
# u'' + k^2 u &= {\rm e}^x
# \end{align*}
# $$
# and find an $n$ such the error is less than $10^{-4}$ when compared
# with the true solution for $k=10$:
# $$
# u(x) = (-\cos(k x) + {\rm e}^x \cos(k x)^2 + \cot(k) \sin(k x) - {\rm e} \cos(k) \cot(k) \sin(k x) - {\rm e} \sin(k) \sin(k x) + {\rm e}^x \sin(k x)^2)/(1 + k^2)
# $$
# +
function helm(k, n)
x = range(0, 1; length = n)
h = step(x)
# TODO: Create a SymTridiagonal discretisation
T = SymTridiagonal(ones(n-2)*(-2/h^2 + k^2),ones(n-3)*1/h^2)
u = T \ exp.(x[2:end-1])
[0; u; 0]
end
k = 10
u = x -> (-cos(k*x) + exp(x)cos(k*x)^2 + cot(k)sin(k*x) - ℯ*cos(k)cot(k)sin(k*x) - ℯ*sin(k)sin(k*x) + exp(x)sin(k*x)^2)/(1 + k^2)
n = 2048 # TODO: choose n to get convergence
x = range(0, 1; length=n)
@test norm(helm(k, n) - u.(x)) ≤ 1E-4
# -
# **Problem 1.2 (A)** Discretisations can also be used to solve eigenvalue problems.
# Consider the Schrödinger equation with quadratic oscillator:
# $$
# u(-L) = u(L) = 0, -u'' + x^2 u = λ u
# $$
# (a) Use the finite-difference approximation to discretise this equation as eigenvalues of a
# matrix. Hint: write
# $$
# \begin{align*}
# u(-L) = 0 \\
# -u'' + x^2 u - λu = 0\\
# u(L) = 0
# \end{align*}
# $$
# and discretise as before, doing row eliminations to arrive at a symmetric tridiagonal
# matrix eigenvalue problem.
# (b) Approximate the eigenvalues using `eigvals(A)` (which returns the eigenvalues of a
# matrix `A`) with $L = 10$.
# Can you conjecture their exact value if $L = ∞$? Hint: they are integers and the eigenvalues
# closest to zero are most accurate.
#
# **SOLUTION**
# We discretise on a grid $u_1,u_2,…,u_n$ for an evenly spaced grid between $[-L,L]$, with
# step size $h = 2L/(n-1)$. That is, we have the equations:
# $$
# \begin{bmatrix}
# 1 \\
# -1/h^2 & 2/h^2 + x_2^2 - λ & -1/h^2 \\
# & ⋱ & ⋱ & ⋱ \\
# && -1/h^2 & 2/h^2 + x_{n-1}^2 - λ & -1/h^2 \\
# &&&& 1 \end{bmatrix}
# \begin{bmatrix} u_1 \\ \vdots \\ u_n \end{bmatrix} = 0
# $$
# Row eliminations at the top and bottom reduce this equation to:
# $$
# \begin{bmatrix}
# 2/h^2 + x_2^2 & -1/h^2 \\
# & ⋱ & ⋱ & ⋱ \\
# && -1/h^2 & 2/h^2 + x_{n-1}^2 \end{bmatrix}
# \begin{bmatrix} u_2 \\ \vdots \\ u_{n-1} \end{bmatrix} = λ\begin{bmatrix} u_2 \\ \vdots \\ u_{n-1} \end{bmatrix}
# $$
# This is a standard eigenvalue problem and we can compute the eigenvalues using `eigvals`:
L = 10
n = 1000
x = range(-L,L; length=n)
h = step(x)
eigvals(SymTridiagonal(fill(2/h^2,n-2) + x[2:end-1].^2, fill(-1/h^2, n-3)))
# On inspection of the smallest values, it seems that the positive odd integers are the eigenvalues for $L = \infty$. Increasing $L$ (and also $n$) it becomes more obvious:
L = 100
n = 10000
x = range(-L,L; length = n)
h = step(x)
A = SymTridiagonal(x[2:end-1] .^ 2 .+ 2/h^2,ones(n-3)* (-1)/h^2)
sort((eigvals(A)))[1:20]
# **Problem 1.3⋆ (A)** Consider Helmholtz with Neumann conditions:
# $$
# u'(0) = c_0 \\
# u'(1) = c_1 \\
# u_{xx} + k^2 u = f(x)
# $$
# Write down the finite difference approximation approximating
# $u(x_k) ≈ u_k$ on
# an evenly spaced grid $x_k = (k-1)/(n-1)$ for $k=1,…,n$
# using the first order derivative approximation conditions:
# $$
# \begin{align*}
# u'(0) &≈ (u_2-u_1)/h = c_0 \\
# u'(1) &≈ (u_n-u_{n-1})/h = c_1
# \end{align*}
# $$
# Use pivoting to reduce the equation to one involving a
# symmetric tridiagonal matrix.
#
# **SOLUTION**
#
# We have, with $u(x_k) = u_k$ (and using $\kappa$ instead of $k$ in the equation $u_{xx} + k^2u = f(x)$ so as to avoid confusion with the indices):
# \begin{align*}
# \frac{u_2 - u_1}{h} &= c_0, \\
# \frac{u_{k-1} - 2u_k + u_{k+1}}{h^2} + \kappa^2u_k &= f(x_k), \hspace{5mm} \textrm{ for } k=2:n-1\\
# \frac{u_n - u_{n-1}}{h} &= c_1,
# \end{align*}
# which we write in matrix form as:
#
# $$
# \left[\begin{matrix}
# -\frac{1}{h} & \frac{1}{h} \\
# \frac{1}{h^2} & \kappa^2 - \frac{2}{h^2} & \frac{1}{h^2} \\
# &\ddots & \ddots & \ddots \\
# &&\frac{1}{h^2} & \kappa^2 - \frac{2}{h^2} & \frac{1}{h^2} \\
# &&& -\frac{1}{h} & \frac{1}{h}
# \end{matrix}
# \right] \mathbf{u} = \left[\begin{matrix}
# c_0 \\ f(x_2)\\ \vdots \\f(x_{n-1}) \\ c_1
# \end{matrix}\right],
# $$
# which we can make symmetric tridiagonal by multiplying the first row by $1/h$ and the final row by $-1/h$:
# $$
# \left[\begin{matrix}
# -\frac{1}{h^2} & \frac{1}{h^2} \\
# \frac{1}{h^2} & \kappa^2 - \frac{2}{h^2} & \frac{1}{h^2} \\
# &\ddots & \ddots & \ddots \\
# &&\frac{1}{h^2} & \kappa^2 - \frac{2}{h^2} & \frac{1}{h^2} \\
# &&& \frac{1}{h^2} & -\frac{1}{h^2}
# \end{matrix}
# \right] \mathbf{u} = \left[\begin{matrix}
# \frac{c_0}{h} \\ f(x_2)\\ \vdots \\f(x_{n-1}) \\ -\frac{c_1}{h}
# \end{matrix}\right],
# $$
#
#
#
# ## 2. Convergence
#
# **Problem 2.1⋆ (B)** For the equation
# $$
# \begin{align*}
# u(0) &= c_0 \\
# u' + au &= f(x)
# \end{align*}
# $$
# where $a ∈ ℝ$ and $0 ≤ x ≤ 1$,
# prove convergence as $n → ∞$ for the method constructed in PS6 using the approximation
# where we take the average of the two grid points:
# $$
# {u'(x_{k+1}) + u'(x_k) \over 2} ≈ {u_{k+1} - u_k \over h}.
# $$
#
# **SOLUTION**
# Using the approximation from PS6 we obtain
#
# $${f(x_{k+1}) + f(x_k)\over 2} = { u'(x_{k+1}) + u'(x_k) \over 2} + {a(u(x_{k+1}) + u(x_k)) \over 2}≈ {(u_{k+1} - u_k ) \over h} + {a u_{k+1}\over 2} + {a u_k \over 2}$$
#
# So we get
#
# $$\left(\frac{a}{2}-\frac{1}{h}\right)u_k + \left(\frac{a}{2}+\frac{1}{h}\right)u_{k+1} = \frac{f(x_{k+1})+f(x_k)}{2}$$
#
# We want to prove that $\sup_{k=1,...,n-1}|u(x_k) - u_{k}|$ converges to 0 as $n\to \infty$.
#
# Take $\hat u = [u_0,...,u_{n-1}]^T$ and rewrite the system as
#
# $$\hat L \hat u = \begin{bmatrix} c_0 \\ \hat fᶠ \end{bmatrix}$$
#
# where $f_k = {f(x_{k})+f(x_{k-1}) \over 2}$, $k=1,...,n-1$ and
#
# $$\hat L =
# \begin{bmatrix}
# 1 \\
# {a\over 2} - {1 \over h} & {a\over 2} + {1 \over h} \\
# & {a\over 2} - {1 \over h} & {a\over 2} + {1 \over h}\\
# && \ddots & \ddots \\
# &&& {a\over 2} - {1 \over h} & {a\over 2} + {1 \over h}
# \end{bmatrix}$$
#
# Note that $\hat L$ is lower bidiagonal.
#
# Now, similarly to Euler's methods convergence theorem, we study consistency and stability.
#
# ##### Consistency:
# Our discretisation approximates the true equation.
#
# $$
# \begin{align*}
# L̂ u &= \begin{bmatrix} c_0 \\
# {u(x_1) - u(x_0) \over h} + {a\over2}(u(x_1) + u(x_0)) \\
# ⋮ \\
# {u(x_{n-1}) - u(x_{n-2}) \over h} + {a\over2}(u(x_{n-1}) + u(x_{n-2}))\end{bmatrix}
# = \begin{bmatrix} c_0 \\
# \frac{1}{2}\left({u(x_1) - u(x_0) \over h} +{u(x_1) - u(x_0) \over h} + {a}(u(x_1) + u(x_0))\right) \\
# ⋮ \\
# \frac{1}{2}\left({u(x_{n-1}) - u(x_{n-2}) \over h} + {u(x_1) - u(x_0) \over h} + {a}(u(x_{n-1}) + u(x_{n-2}))\right)\end{bmatrix} \\
# &= \begin{bmatrix} c_0 \\
# \frac{1}{2}\left(u'(x_0) + a u(x_0) + u''(τ_0) h + u'(x_1) + a u(x_1) + u''(σ_1) h \right)\\
# ⋮ \\
# \frac{1}{2}\left(u'(x_{n-2}) + a u(x_{n-2}) + u''(τ_{n-2}) h + u'(x_{n-1}) + a u(x_{n-1}) + u''(σ_{n-1}) h \right) \end{bmatrix} =
# \begin{bmatrix} c_0 \\
# {f(x_0)+f(x_1)\over 2} + {u''(τ_0)+u''(σ_1)\over 2} h \\
# ⋮ \\
# {f(x_{n-2})+f(x_{n-1})\over 2} + {u''(τ_{n-2})+u''(σ_{n-1})\over 2} h \end{bmatrix} \\
# &=
# \begin{bmatrix} c_0 \\ \hat fᶠ \end{bmatrix} + \begin{bmatrix} 0 \\ δ \end{bmatrix}
# \end{align*}
# $$
#
# where $x_k ≤ τ_k, σ_k ≤ x_{k+1}$, and uniform boundedness implies that $\|δ\|_∞ = O(h)$
#
# ##### Stability:
# The inverse does not blow up the error.
#
# $$
# \hat L = \underbrace{\begin{bmatrix} 1 \\ & \left({a\over 2} + {1 \over h}\right) \\ && ⋱ \\ &&& \left({a\over 2} + {1 \over h}\right) \end{bmatrix}}_D
# \underbrace{\begin{bmatrix} 1 \\ \left({a\over 2} + {1 \over h}\right)^{-1}\left({a\over 2} - {1 \over h}\right) & 1 \\ & ⋱ & ⋱ \\ && \left({a\over 2} + {1 \over h}\right)^{-1}\left({a\over 2} - {1 \over h}\right) &1 \end{bmatrix}}_{ L}
# $$
#
# Thus, we have
# $$
# \| L^{-1}\|_{1 → ∞} ≤ \left|
# \left({a\over 2} + {1 \over h}\right)^{-1}\left({a\over 2} - {1 \over h}\right)
# \right|^{n-1} = O(1)
# $$
# as $n → ∞$, where one can take logarithms and use L'Hopitals rule to show that it actually tends to a limit.
# Note that
# $$
# \left|{a\over 2} + {1 \over h}\right|^{-1} = \left|{h \over {ah \over 2} + 1}\right|\le 2h
# $$
# for sufficiently small $h$ (or large $n$).
# Combining stability and consistency we have, for sufficently small $h$,
# $$
# \|𝐮ᶠ - 𝐮\|_∞ = \|\hat L^{-1} (\hat L𝐮ᶠ - \hat L𝐮)\|_∞ = \| L^{-1} D^{-1} \begin{bmatrix} 0 \\ δ \end{bmatrix} \|_∞ ≤ 2h \| L^{-1}\|_{1 → ∞} \|δ\|_1 = O(h).
# $$
#
# **Problem 2.2⋆ (A)** Consider the matrices
# $$
# L = \begin{bmatrix} 1 \\
# -a_1 & 1 \\
# & -a_2 & 1\\
# && ⋱ & ⋱ \\
# &&& -a_{n-1} & 1
# \end{bmatrix}, \qquad T = \begin{bmatrix} 1 \\
# -a & 1 \\
# & -a & 1\\
# && ⋱ & ⋱ \\
# &&& -a & 1
# \end{bmatrix}.
# $$
# By writing down the inverse explicitly prove that if $|a_k| ≤ a$ then
# $$
# \|L^{-1}\|_{1 → ∞} ≤ \|T^{-1}\|_{1 → ∞}.
# $$
# Use this to prove convergence as $n → ∞$ of forward Euler for
# $$
# \begin{align*}
# u(0) &= c_0 \\
# u'(x) - a(x)u(x) &= f(x)
# \end{align*}
# $$
#
# **SOLUTION**
#
#
# Since
#
# $$L^{-1}=
# \begin{bmatrix}
# 1 & 0 & 0 & 0 & 0 &... & 0\\
# a_1 & 1 & 0 & 0 & 0 &... & 0\\
# a_1a_2 & a_2 & 1 & 0 & 0 &... & 0\\
# a_1a_2a_3 & a_2a_3 & a_3 & 1 & 0 & ... & 0\\
# \vdots & \vdots & \ddots & \ddots & \ddots & \ddots & \vdots \\
# \vdots & \vdots & & \ddots & \ddots & 1 & 0 \\
# \prod_{i=1}^{n-1}a_i & \prod_{i=2}^{n-1}a_i & ... & ... & a_{n-2}a_{n-1} & a_{n-1} & 1
# \end{bmatrix}$$
#
# and
#
# $$T^{-1}=
# \begin{bmatrix}
# 1 & 0 & 0 & 0 & 0 &... & 0\\
# a & 1 & 0 & 0 & 0 &... & 0\\
# a^2 & a & 1 & 0 & 0 &... & 0\\
# a^3 & a^2 & a & 1 & 0 & ... & 0\\
# \vdots & \vdots & \ddots & \ddots & \ddots & \ddots & \vdots \\
# \vdots & \vdots & & \ddots & \ddots & 1 & 0 \\
# a^{n-1} & a^{n-2} & ... & ... & a^2 & a & 1
# \end{bmatrix}$$
#
# Then, $\forall x$
#
# $$\|T^{-1}x\|_{\infty}=\max_i|(T^{-1}x)_i|= \max_i \left|x_i +\sum_{j=1}^{i-1}a^{i-j}x_j \right| = \begin{cases}
# 1 & if \ a\in[0,1] \\
# a^{n-1} & if \ a\ge 1
# \end{cases}$$
#
# since, given $b=\max\{1,a\}$,
#
# $$\max_i \left|x_i +\sum_{j=1}^{i-1}a^{i-j}x_j \right|\le \max_i \left(|x_i| +\sum_{j=1}^{i-1}\left|a^{i-j}x_j \right| \right) \le b^n\sum_{j=1}^n |x_j| = b^n\|x\|_1$$
#
# thus,
#
# $\|T^{-1}\|_{1\to\infty} = \sup_{x\ne 0} \frac{\|T^{-1}x\|_\infty}{\|x\|_1}\le b^n$ and, in particular,
# $$\|T^{-1}\|_{1\to\infty}= b^n$$
#
# since $$\frac{\|T^{-1}x\|_\infty}{\|x\|_1}=b^n$$ it is obtained using
# $$x=\begin{cases}e_1 & b=1 \\ e_n & b=a \end{cases}$$
#
# Moreover, $|a_j|\le b$, $\forall j=1,...,n$, thus,
#
# $$\|L^{-1}x\|_{\infty}=\max_i|(L^{-1}x)_i|= \max_i \left|x_i +\sum_{j=1}^{i-1}a_{j}...a_{i-1}x_j \right| \le \max_i |x_i| +\sum_{j=1}^{i-1}|a_{j}...a_{i-1}x_j | \le b^n\|x\|_1$$
#
# Hence,
#
# $$\|L^{-1}\|_{1\to \infty}=\sup_{x} \frac{\|L^{-1}x\|_{\infty}}{\|x\|_{1}} \le b^n = \|T^{-1}\|_{1 \to \infty}$$
#
#
# Now we prove convergence for the forward Euler method as $n → ∞$ for
#
# $$
# \begin{align*}
# u(0) &= c_0 \\
# u'(x) &= a(x)u(x) + f(x)
# \end{align*}
# $$
#
# Using equidistanced (with step $h$) points $x_0,...,x_{n-1}$, we use the approximations $u(x_k) \approx u_k$, where $u_0 = c_0$ and
# $$u_{k+1} = u_k + h\left(a(x_k)u_k + f(x_k)\right)$$
#
# In order to study convergence we consider the limit as $n\to\infty$ of
# $$\sup_{i=1,...,n-1} |u_i - u(x_i)|$$
#
# Similarly to Euler's methods convergence theorem, we study consistency and stability.
#
# In order to apply the theorem we note that we can define $a_k=a(x_k)$, $k=1,...n-1$ and we have that for every $k$, $|a_k|\le a:= max_{i=1,n-1}|a_i|$.
#
# ##### Consistency:
# Our discretisation approximates the true equation.
#
# $$\hat Lu = \begin{bmatrix} c_0 \\
# {u(x_1) - u(x_0) \over h} - a_1 u(x_0) \\
# \vdots \\
# {u(x_{n-1}) - u(x_{n-2}) \over h} - a_{n-1} u(x_{n-2})\end{bmatrix} =
# \begin{bmatrix} c_0 \\
# u'(x_0) - a_1 u(x_0) + u''(τ_0) h \\
# \vdots \\
# u'(x_{n-2}) - a_{n-1} u(x_{n-2}) + u''(τ_{n-2}) h\end{bmatrix} =
# \begin{bmatrix} c_0 \\
# f(x_0) + u''(τ_0) h \\
# \vdots \\
# f(x_{n-2}) + u''(τ_{n-2}) h \end{bmatrix} =
# \begin{bmatrix} c_0 \\ 𝐟ᶠ \end{bmatrix} + \begin{bmatrix} 0 \\ δ \end{bmatrix}$$
#
# where $x_k ≤ τ_k ≤ x_{k+1}$, and uniform boundedness implies that $\|δ\|_∞ = O(h)$
#
# ##### Stability:
# The inverse does not blow up the error.
# First write, for $l_k = 1-a_k$
#
# $$\hat L = \underbrace{\begin{bmatrix} 1 \\ & h^{-1} \\ && ⋱ \\ &&& h^{-1} \end{bmatrix}}_D \underbrace{\begin{bmatrix} 1 \\ -l_1 & 1 \\ & ⋱ & ⋱ \\ && -l_{n-1} &1 \end{bmatrix}}_{ L}$$
#
# Thus, we have $\| L^{-1}\|_{1 → ∞} ≤ \|T^{-1}\|_{1 → ∞} = O(1)$
#
# Combining stability and consistency we have
# $$\|𝐮ᶠ - 𝐮\|_∞ = \|\hat L^{-1} (\hat L𝐮ᶠ - \hat L𝐮)\|_∞ = \| L^{-1} D^{-1} \begin{bmatrix} 0 \\ δ \end{bmatrix} \|_∞ ≤ h \| L^{-1}\|_{1 → ∞} \|δ\|_1 = O(h)$$
#
#
#
#
#
# ## 3. Fourier series
#
# **Problem 3.1⋆ (C)** Give explicit formulae for $f̂_k$ and $f̂_k^n$ for the following functions:
# $$
# \cos θ, \cos 4θ, \sin^4θ, {3 \over 3 - {\rm e}^{\rm i θ}}, {1 \over 1 - 2{\rm e}^{\rm i θ}}
# $$
# Hint: You may wish to try the change of variables $z = {\rm e}^{-{\rm i}θ}$.
#
# **SOLUTION**
#
# 1. Just expand in complex exponentials to find that
# $$
# \cos θ = {\exp({\rm i} θ) + \exp(-{\rm i} θ) \over 2}
# $$
# that is $f̂_1 = f̂_{-1} = 1/2$, $f̂_k = 0$ otherwise.
# Therefore for $p∈ ℤ$ we have
# $$
# \begin{align*}
# f̂_k^1 &= f̂_1 + f̂_{-1} = 1 \\
# f̂_{2p}^2 &= 0, f̂_{2p+1}^2 = f̂_1 + f̂_{-1} = 1 \\
# f̂_{1+np}^n &= f̂_{-1+np}^n = 1/2, f̂_k^n = 0 \hbox{otherwise} \\
# \end{align*}
# $$
# 2. Similarly
# $$
# \cos 4 θ = {\exp(4{\rm i} θ) + \exp(-4{\rm i} θ) \over 2}
# $$
# that is $f̂_4 = f̂_{-4} = 1/2$, $f̂_k = 0$ otherwise.
# Therefore for $p∈ ℤ$ we have
# $$
# \begin{align*}
# f̂_p^1 &= f̂_4 + f̂_{-4} = 1 \\
# f̂_{2p}^2 &= f̂_4 + f̂_{-4} = 1, f̂_{2p+1}^2 = 0 \\
# f̂_{3p}^3 &= 0, f̂_{3p±1}^3 = f̂_{±4} =1/2 \\
# f̂_{4p}^4 &= f̂_{-4} + f̂_4 = 1, f̂_{4p±1}^4 = 0, f̂_{4p+2}^4 = 0 \\
# f̂_{5p}^5 &= 0, f̂_{5p+1}^5 = f̂_{-4} = 1/2, f̂_{5p-1}^5 = f̂_{4} = 1/2, f̂_{5p±2}^5 =0 \\
# f̂_{6p}^6 &=0, f̂_{6p±1}^6 = 0, f̂_{6p+2}^6 = f̂_{-4} = 1/2, f̂_{6p-2}^6 = f̂_{4} = 1/2, f̂_{6p+3}^6 =0 \\
# f̂_{7p}^7 &= 0, f̂_{7p±1}^7 = 0, f̂_{7p±2}^7 = 0, f̂_{7p±3}^7 = f̂_{∓4} = 1/2 \\
# f̂_{8p}^8 &= f̂_{8p±1}^8 = f̂_{8p±2}^8 = f̂_{8p±3}^8 = 0, f̂_{8p+4}^8 = f̂_4 + f̂_{-4} = 1 \\
# f̂_{k+pn}^n &= f̂_k \hbox{ for $-4 ≤ k ≤ 4$, 0 otherwise}.
# \end{align*}
# $$
# 3. Here we have:
# $$
# (\sin θ)^4= \left({\exp({\rm i} θ) - \exp(-{\rm i} θ) \over 2 {\rm i}}\right)^4
# = \left({\exp(2{\rm i} θ) -2 + \exp(-2{\rm i} θ) \over -4}\right)^2
# = {\exp(4{\rm i} θ) -4\exp(2{\rm i} θ) +6 -4 \exp(-2{\rm i} θ)+\exp(-2{\rm i} θ) \over 16}
# $$
# that is $f̂_{-4} = f̂_4 = 1/16$, $f̂_{-2} = f̂_2 = -1/4$, $f_0 = 3/8$, $f̂_k = 0$ otherwise.
# Therefore for $p∈ ℤ$ we have
# $$
# \begin{align*}
# f̂_p^1 &=f̂_{-4} + f̂_{-2} + f̂_0 + f̂_2 + f̂_4 = 0 \\
# f̂_k^2 &= 0 \\
# f̂_{3p}^3 &= f̂_0 = 3/8, f̂_{3p+1}^3 = f̂_{-2} + f̂_4 =-3/16, f̂_{3p-1}^3 = f̂_{2} + f̂_{-4} =-3/16 \\
# f̂_{4p}^4 &= f̂_0 + f̂_{-4} + f̂_4 = 1/2, f̂_{4p±1}^4 = 0, f̂_{4p+2}^4 = f̂_{2} + f̂_{-2} =-1/2 \\
# f̂_{5p}^5 &= f̂_0 = 3/8, f̂_{5p+1}^5 = f̂_{-4} = 1/16, f̂_{5p-1}^5 = f̂_{4} = 1/16, f̂_{5p+2}^5 = f̂_2 = -1/4, f̂_{5p-2}^5 = f̂_{-2} = -1/4 \\
# f̂_{6p}^6 &= f̂_0 = 3/8, f̂_{6p±1}^6 = 0, f̂_{6p+2}^6 = f̂_2 + f̂_{-4} = -3/16, f̂_{6p-2}^6 = f̂_{-2} + f̂_{4} = -3/16, f̂_{6p+3}^6 =0 \\
# f̂_{7p}^7 &= f̂_0 = 3/8, f̂_{7p±1}^7 = 0, f̂_{7p±2}^7 = f̂_{±2} = -1/4, f̂_{7p±3}^7 = f̂_{∓4} = 1/16 \\
# f̂_{8p}^8 &= f̂_0 = 3/8, f̂_{8p±1}^8 = 0, f̂_{8p±2}^8 = f̂_{±2} = -1/4, f̂_{8p±3}^8 = 0, f̂_{8p+4}^8 = f̂_4 + f̂_{-4} = 1/8 \\
# f̂_{k+pn}^n &= f̂_k \hbox{ for $-4 ≤ k ≤ 4$, 0 otherwise}.
# \end{align*}
# $$
# 4. Under the change of variables $z = {\rm e}^{{\rm i}θ}$ we can use Geoemtric series to determine
# $$
# {3 \over 3 - z} = {1 \over 1- z/3} = ∑_{k=0}^∞ {z^k \over 3^k}
# $$
# That is $f̂_k = 1/3^k$ for $k ≥ 0$, and $f̂_k = 0$ otherwise.
# We then have for $0 ≤ k ≤ n-1$
# $$
# f̂_{k+pn}^n = ∑_{ℓ=0}^∞ {1 \over 3^{k+ℓn}} = {1 \over 3^k} {1 \over 1 - 1/3^n} = {3^n \over 3^{n+k} - 3^k}
# $$
# 5. Now make the change of variables $z = {\rm e}^{-{\rm i} θ}$ to get:
# $$
# {1 \over 1 - 2/z} = {1 \over -2/z} {1 \over 1 - z/2} = {1 \over -2/z} ∑_{k=0}^∞ {z^k \over 2^k}
# = - ∑_{k=1}^∞ {{\rm e}^{-{\rm i} k θ} \over 2^k}
# $$
# That is $f̂_k = -1/2^{-k}$ for $k ≤ -1$ and 0 otherwise.
# We then have for $-n ≤ k ≤ -1$
# $$
# f̂_{k+pn}^n =- ∑_{ℓ=0}^∞ {1 \over 2^{-k+ℓn}} = -{1 \over 2^{-k}} {1 \over 1 - 1/2^n} = -{2^{n+k} \over 2^n - 1}
# $$
#
# **Problem 3.2⋆ (B)** Prove that if the first $λ-1$ derivatives $f(θ), f'(θ), …, f^{(λ-1)}(θ)$
# are 2π-periodic and $f^{(λ)}$ is uniformly bounded that
# $$
# |f̂_k| = O(|k|^{-λ})\qquad \hbox{as $|k| → ∞$}
# $$
# Use this to show for the Taylor case ($0 = f̂_{-1} = f̂_{-2} = ⋯$) that
# $$
# |f(θ) - ∑_{k=0}^{n-1} f̂_k {\rm e}^{{\rm i}kθ}| = O(n^{1-λ})
# $$
#
# **SOLUTION**
# A straightforward application of integration by parts yields the result
#
# $$f̂ₖ = \frac{1}{2π} \int^{2π}_{0} f(θ) {\rm e}^{-ikθ} dθ = \frac{(-i)^λ}{2π k^{λ}} \int^{2π}_{0} f^{(λ)}(θ) {\rm e}^{-ikθ} dθ $$
# given that $f^{(λ)}$ is uniformly bounded, the second part follows directly from this result
#
# $$
# |∑_{k=n}^{\infty} f̂_k {\rm e}^{{\rm i}kθ}| \leq ∑_{k=n}^{\infty} |f̂_k | \leq C ∑_{k=n}^{\infty} k^{-λ}
# $$
#
# for some constant $C$.
#
#
# **Problem 3.3⋆ (C)**
# If $f$ is a trigonometric polynomial ($f̂_k = 0$ for $|k| > m$) show
# for $n ≥ 2m+1$ we can exactly recover $f$:
# $$
# f(θ) = \sum_{k=-m}^m f̂_k^n {\rm e}^{{\rm i} k θ}
# $$
#
# **SOLUTION**
# This proof is nearly identical to the proof of "Theorem (Taylor series converges)" in the lecture notes. Only now one has to also subtract the negative coefficients from the negative approximate coefficients in the chain of arguments.
#
# **Problem 3.4⋆ (B)** For the general (non-Taylor) case and $n = 2m+1$, prove convergence for
# $$
# f_{-m:m}(θ) := ∑_{k=-m}^m f̂_k^n {\rm e}^{{\rm i} k θ}
# $$
# to $f(θ)$ as $n \rightarrow ∞$.
# What is the rate of convergence if the first $λ-1$ derivatives $f(θ), f'(θ), …, f^{(λ-1)}(θ)$
# are 2π-periodic and $f^{(λ)}$ is uniformly bounded?
#
# **SOLUTION**
#
# Observe that by aliasing (see corollary in lecture notes) and triangle inequality we have the following
#
# $$ |f̂_k^n - f̂_k| \leq \sum_{p=1}^{\infty} (|f̂_{k+pn}|+|f̂_{k-pn}|) $$
#
# Using the result from Problem 3.2 yields
#
# $$ |f̂_k^n - f̂_k| \leq \frac{C}{n^\lambda} \sum_{p=1}^{\infty} \frac{1}{\left(p + \frac{k}{n}\right)^\lambda} + \frac{1}{\left(p - \frac{k}{n}\right)^\lambda} $$
#
# now we pick $|q| < \frac{1}{2}$ (such that the estimate below will hold for both summands above) and construct an integral with convex and monotonocally decreasing integrand such that
#
# $$ \left( p + q \right)^{-\lambda} < \int_{p-\frac{1}{2}}^{p+\frac{1}{2}} (x + q)^{-\lambda} dx $$
#
# more over summing over the left-hand side from $1$ to $\infty$ yields a bound by the integral:
#
# $$ \int^{\infty}_{\frac{1}{2}} (x + q)^{-\lambda} dx = \frac{1}{\lambda}(\frac{1}{2} + q)^{- \lambda + 1}$$
#
# Finally let $q = \pm \frac{k}{n}$ to achieve the rate of convergence
#
# $$ |f̂_k^n - f̂_k| \leq \frac{C_{\lambda}}{ n^{\lambda}} \left( \left( \frac{1}{2} + k/n \right)^{ - \lambda + 1} + \left( \left( \frac{1}{2} - k/n \right) \right)^{- \lambda +1} \right)$$
#
# where $C_{\lambda}$ is a constant depending on $\lambda$. Note that it is indeed important to split the $n$ coefficients equally over the negative and positive coefficients as stated in the notes, due to the estatime we used above.
#
# Finally, we have (thanks to Anonymous on ed):
# $$
# \begin{align*}
# |f(\theta) - f_{-m:m}(\theta)|
# &= |\sum_{k=-m}^m {(f̂_k - f̂_k^n)z^k} + \sum_{k=m+1}^\infty {f̂_k z^k} + \sum_{k=-\infty}^{-m-1} {f̂_k z^k} | \\
# &\le \sum_{k=-m}^m | f̂_k - f̂_k^n | + \sum_{k=m+1}^\infty |f̂_k| + \sum_{k=-\infty}^{-m-1} |f̂_k| \\
# &\le \sum_{k=-m}^m {\frac{C_{\lambda}}{ n^{\lambda}} \left( \left( \frac{1}{2} + k/n \right)^{ - \lambda + 1} + \left( \left( \frac{1}{2} - k/n \right) \right)^{- \lambda +1} \right)} + \sum_{k=m+1}^\infty |f̂_k| + \sum_{k=-\infty}^{-m-1} |f̂_k| \\
# &= \frac{C_{\lambda}}{n^{\lambda}} 2^{\lambda} + \sum_{k=m+1}^\infty |f̂_k| + \sum_{k=-\infty}^{-m-1} |f̂_k| \\
# &= O(n^{-\lambda}) + O(n^{1-\lambda} ) + O(n^{1-\lambda} ) \\
# &= O(n^{1-\lambda})
# \end{align*}
# $$
|
sheets/week7s.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5" _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19"
# This Python 3 environment comes with many helpful analytics libraries installed
# It is defined by the kaggle/python Docker image: https://github.com/kaggle/docker-python
# For example, here's several helpful packages to load
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import matplotlib.pyplot as plt
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk('/kaggle/input'):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# -
df = pd.read_csv("/kaggle/input/customer-segmentation-tutorial-in-python/Mall_Customers.csv")
df.head()
df.corr()
sns.distplot(df["Annual Income (k$)"])
plt.title("Distribution of Annual Income (k$)")
plt.xlabel("Range of Income")
plt.ylabel("Count")
sns.distplot(df["Age"])
plt.title("Distribution of Age")
plt.xlabel("Range of Age")
plt.ylabel("Count")
sns.distplot(df["Spending Score (1-100)"])
plt.title("Distribution of Spending Score (1-100)")
plt.xlabel("Range of Spending Score (1-100)")
plt.ylabel("Count")
genders = df["Gender"].value_counts()
sns.barplot(x=genders.index, y=genders.values)
plt.title("Comparison of Each Gender Activity")
plt.xlabel("Gender")
plt.ylabel("Count")
sns.scatterplot(df["Annual Income (k$)"], df["Spending Score (1-100)"])
plt.title("Annual Income VS. Spending Score")
plt.xlabel("Annual Income")
plt.ylabel("Spending Score")
# +
from sklearn.cluster import KMeans
X = df[["Annual Income (k$)", "Spending Score (1-100)"]]
kmeans = KMeans(n_clusters=5, random_state=0)
kmeans.fit(X)
y = kmeans.predict(X)
df["label"] = y
# -
sns.scatterplot(df["Annual Income (k$)"], df["Spending Score (1-100)"], hue=df["label"],
palette=['green','orange','brown','dodgerblue','red'])
plt.title("Annual Income VS. Spending Score")
plt.xlabel("Annual Income")
plt.ylabel("Spending Score")
df2 = df
# +
from sklearn.cluster import KMeans
X2 = df2[["Age", "Annual Income (k$)", "Spending Score (1-100)"]]
kmeans = KMeans(n_clusters=5, random_state=0)
kmeans.fit(X)
y = kmeans.predict(X)
df2["label"] = y
# -
fig = plt.figure(figsize=(20,10))
ax = fig.add_subplot(111, projection='3d')
ax.scatter(df2.Age[df2.label == 0], df2["Annual Income (k$)"][df2.label == 0], df2["Spending Score (1-100)"][df2.label == 0], c='purple', s=60)
ax.scatter(df2.Age[df2.label == 1], df2["Annual Income (k$)"][df2.label == 1], df2["Spending Score (1-100)"][df2.label == 1], c='red', s=60)
ax.scatter(df2.Age[df2.label == 2], df2["Annual Income (k$)"][df2.label == 2], df2["Spending Score (1-100)"][df2.label == 2], c='blue', s=60)
ax.scatter(df2.Age[df2.label == 3], df2["Annual Income (k$)"][df2.label == 3], df2["Spending Score (1-100)"][df2.label == 3], c='green', s=60)
ax.scatter(df2.Age[df2.label == 4], df2["Annual Income (k$)"][df2.label == 4], df2["Spending Score (1-100)"][df2.label == 4], c='black', s=60)
ax.view_init(35, 185)
plt.xlabel("Age")
plt.ylabel("Annual Income (k$)")
ax.set_zlabel('Spending Score (1-100)')
plt.show()
|
nb-mall-customer.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/novoforce/Exploring-Pytorch/blob/master/5_Optimization.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="6zLaeCFLP09f"
# # Gradiant descent Update Rule
#
# # $w= w-\eta \frac{\partial{L(w)}}{\partial{w}}$ $\Rightarrow [eq_1]$
#
# The $[eq_1]$ is the vanilla gradiant descent update rule.
# For optimization or better learning algorithms what all things need to be done:
#
# ```
# How do you compute the gradient ?
# or What data should you use for computing the gradients ?
# ```
#
# ```
# How do you use the gradient ?
# or Can you come up with better update rule ?
# ```
#
#
#
#
#
#
# + [markdown] id="5mcE4qyqhsPk"
# # Momentum based GD
#
# **Problems with GD:**
# If the loss curve(graph) is flat then the gradient update would be very very less which will eventually lead to huge training time as the algorithm will take longer time to get into the minima.
#
# So to mitigate the problems associated with Vanilla Gradient descent we are hereby proposing the Momentum based approach.
#
# **Intuition**
#
# If I'm repeatedly being asked to go in the same direction then I should probably gain some confidence(momentum) and start taking bigger steps in that direction.
#
# The below equation $[eq_2]$ and the $[eq_3]$ gives the final value for the momentum based GD.
#
# # $v_t=\gamma.v_{t-1}+\eta \nabla w_t$ $\Rightarrow[eq_2]$
#
# # $w_{t+1}=w_t-v_t$ $\Rightarrow[eq_3]$
#
# Below image gives the mathematical expanation of the Momentum based GD.
#
# <img src="https://storage.googleapis.com/openscreenshot/j%2F8%2F1/gHJJUN18j.png">
#
# **Explanation:**
#
# Going by the intuition. Suppose you are in the middle of the way and the you have asked people about which direction is the target and the values given by the people(or history) are represented as $v_0, v_1, v_2, v_3...v_t$
#
# So at timestep=0, history is $0$ (as no person encountered) thus $v_0=0$
#
# At timestep=1,history is $\eta \nabla w_1$ thus $v_1= \eta \nabla w_1$
#
# At timestep=2,history is $\gamma.\eta \nabla w_2+\eta \nabla w_2$ thus $v_2= \gamma.\eta \nabla w_2+\eta \nabla w_2$
# .....
#
# One more important point to be noted is that after certain $n$ steps we will find that $\gamma$ values are increasing and that represents the decay. Since the $\gamma$ values are less than zero so the incresing the powers will result in the much lesser values.
#
# So intuitively speaking the if you are the current timestep $t$ and the previous person's information can be of less important as compared to the current person's information thus we are using the concept of decay to make the previous person's infor. less important.
#
#
# **Disadvantages:**
#
# Intuitively speaking,sometimes what happens is that we may farther distance because of the high confidence and then we have to turn back and come back.
#
# **So some of the interesting observations are:**
#
# Momentum based GD oscillates in-out of the minima valley, and despite this U-turns it still converges than the vanilla GD.
#
# + [markdown] id="vm0ZL4OoPxTh"
# # Nesterov accelerated GD
#
# **Intuition:** Can we do something to reduce the Oscillations ?
#
# The answer is Nesterov accelerated GD which is basically 3 equations below:
#
# # $w_{temp}= w_t-\gamma*v_{t-1} $ $\Rightarrow[eq_4]$
#
# # $w_{t+1}= w_{temp}-\eta \nabla w_{temp}$ $\Rightarrow[eq_5]$
#
# # $v_t= \gamma*v_{t-1}+ \eta \nabla w_{temp}$ $\Rightarrow[eq_6]$
#
# Before explaining Nesterov accelerated GD, let's see what the **Momentum based GD** does is that the algo. calculates the gradient before making a history update, whereas the **Nesterov accelarated GD(NAG)** makes the history update first then calculate the gradient.
#
# The new effect of this change is that even if the gradient might overshoot still the overshooting **won't be high** as that in the case of **Momentum based GD**.
#
# **Some Observations:**
#
# Look ahead helps NAG in correcting its course quicker than the momentum based GD. Hence the oscillations are smaller and thus less chance of escaping the valley lower.
#
# + [markdown] id="Ktrz6zxNpYBj"
# # Concept of Adaptive Learning Rate
#
# Suppose in our dataset there may be certain features which are less in numbers as compared to other features, So to give more importance to those features we are using Adaptive learning rates which will increase the gradient values for low features and decrease gradient for high features.
# + [markdown] id="bstF7TSB_bwf"
# https://math.meta.stackexchange.com/questions/5020/mathjax-basic-tutorial-and-quick-reference
# + id="gw0wDrQWPuzE"
|
5_Optimization.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Combine a Matplotlib Basemap with IPython Widgets
# This is an experiment in creating a [Jupyter](https://jupyter.org) notebook showing a world map with different parameters (including map projection) by combining a [Matplotlib Basemap](http://matplotlib.org/basemap/index.html) and [IPython widgets](https://ipywidgets.readthedocs.io/en/latest/index.html).
#
# Tested on Python 3.5, basemap 1.0.7, and ipywidgets 4.1.1.
# Make plots appear inline (inside the Jupyter notebook).
# %matplotlib inline
# +
import datetime
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap, supported_projections
from ipywidgets import interact, interact_manual, FloatSlider
# -
# Get a list of supported projection names (no, there seems to be no single ready-made list):
lines = supported_projections.strip().split('\n')
proj_names = [line.strip().split()[0] for line in lines]
print(sorted(proj_names))
# Create sliders without continuous update, since creating a map can take a few seconds (this effect shows only when replacing the `@interact_manual` decorator below with `@interact`):
lat_slider = FloatSlider(min=-90, max=90, step=0.1, continuous_update=False)
lon_slider = FloatSlider(min=-180, max=180, step=0.1, continuous_update=False)
hour_slider = FloatSlider(min=-12, max=12, step=1/60, continuous_update=False)
# This function does the real work. Notice that some projections will create warnings or even errors when they need additional parameters!
@interact_manual(lat_0=lat_slider, lon_0=lon_slider,
delta_hours=hour_slider,
projection=proj_names, title='Sample Title')
def show_map(lat_0=0, lon_0=0, delta_hours=0, projection='mill', title=''):
"Show a world map."
# Resolutions: c (crude), l (low), i (intermediate), h (high), f (full) or None.
map = Basemap(projection=projection, lat_0=lat_0, lon_0=lon_0, resolution='c')
# Plot coastlines, draw label meridians and parallels.
map.drawcoastlines() # linewidth=0.5, linestyle='solid', color='k', antialiased=1, ax=None, zorder=None)
# Plot countries.
map.drawcountries() # linewidth=0.5, linestyle='solid', color='k', antialiased=1, ax=None, zorder=None)
# Plot parallels and meridians.
map.drawparallels(np.arange(-90, 90, 30), labels=[1, 0, 0, 0])
map.drawmeridians(np.arange(map.lonmin, map.lonmax + 30, 60), labels=[0, 0, 0, 1])
# Fill continents 'coral' (with zorder=0), color wet areas 'aqua'
map.drawmapboundary(fill_color='aqua')
map.fillcontinents(color='coral', lake_color='aqua')
# Shade the night areas, with alpha transparency so the
# map shows through. Use current time in UTC + delta.
date = datetime.datetime.utcnow().timestamp() + delta_hours * 3600
date = datetime.datetime.fromtimestamp(date)
map.nightshade(date, alpha=0.35)
plt.title('%s %s (UTC)' % (title, date.isoformat()[:19]))
plt.show()
|
pixelplanet/basemap_ipywidgets.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="flLpKhvYnGBp" colab_type="text"
# Сверточные сети относительно хорошо масшабируются на длинные последовательности. Поэтому их часто применяют к отдельным символам, а не токенам. В домашке вам нужно будет обучить большую сверточную модель на символах (отличаться по сути будет только токенизация).
#
# При обучении используйте колбек для отслеживания лучшей модели. Ориентируйтесь на ф1 меру.
#
# Конкретнее задание такое: Обучите модель с минимум 15 слоями, где у каждого слоя разные параметры (Dropout, Conv1d и Pooling считаются слоями, остальное нет). Как минимум 4 слоя должны быть наложены друг на друга. Должен быть хотя бы один слой каждого типа.
#
# Советы: Начните с небольших сетей и постепенно добавляйте, не пытайтесь сразу собрать все слои. Сделайте размер эмбединга сильно меньше. Попробуйте паддинг поменьше. Символьная модель может обучаться намного дольше. Иногда кернел может крашиться просто так или из-за слишком больших матриц.
#
# Бонусный балл можно получить за изучение влияния предобработки (нужно ли приводить к нижнему регистру, нужно ли выкидывать не алфавитные символы, помогает ли замена цифр на определенный токен).
# + id="0GUoYGhgnGoY" colab_type="code" colab={}
# %tensorflow_version 2.x
# + id="STyacS9vnWRQ" colab_type="code" outputId="9176da7f-5f67-4ce3-bebf-eb65d89715f6" colab={"base_uri": "https://localhost:8080/", "height": 34}
# cd 'drive/My Drive/colab_data'
# + id="G4jFn5R5nhaQ" colab_type="code" colab={}
import tensorflow as tf
from tensorflow.keras import backend as K
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, f1_score
from collections import Counter
import re
from string import ascii_lowercase, digits, ascii_letters, punctuation
short_punctuation = ',.!?;'
# + id="VGC1oZKYnj4I" colab_type="code" colab={}
quora = pd.read_csv('quora.csv')
# + id="f58eU2UPnnK4" colab_type="code" colab={}
def f1(y_true, y_pred):
def recall(y_true, y_pred):
"""Recall metric.
Only computes a batch-wise average of recall.
Computes the recall, a metric for multi-label classification of
how many relevant items are selected.
"""
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
def precision(y_true, y_pred):
"""Precision metric.
Only computes a batch-wise average of precision.
Computes the precision, a metric for multi-label classification of
how many selected items are relevant.
"""
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
precision = precision(y_true, y_pred)
recall = recall(y_true, y_pred)
return 2*((precision*recall)/(precision+recall+K.epsilon()))
# + id="UK06P-fSdK6D" colab_type="code" colab={}
chars_to_remove = r"[\ufeff|\u202c|\x7f|\u200b|\xad|\u2060|\u200c|\uf02d|\x10|\u200e|\u2061]"
whitespace = r"[\t|\n|\r|\x0b|\x0c]"
# + id="1rCsfakVqfMH" colab_type="code" colab={}
# def tokenize(text: str) -> list:
# tokens = text.lower().split()
# return [token.strip(punctuation) for token in tokens]
def clean_text(text):
text = re.sub(chars_to_remove, ' ', text) # funky punctuation symbols
text = re.sub(whitespace, ' ', text) # different symbols for whitespace
text = re.sub(r" {2,}", ' ', text) # multiple whitespaces
return text
def tokenize(text: str) -> list:
result = []
for ch in text.lower():
if ch in ascii_lowercase or ch == ' ':
result.append(ch)
elif ch in digits:
result.append('DIG')
elif ch in short_punctuation:
result.append('PNC')
else:
result.append('UNK')
return result
def filter_dict(d: dict, func: callable) -> dict:
new_d = dict()
for key, value in d.items():
if func((key, value)):
new_d[key] = value
return new_d
# + id="Fusra5sIuC-A" colab_type="code" colab={}
def build_vocab(texts: list, min_count: int) -> set:
vocab = Counter()
for text in texts:
vocab.update(text)
return set(filter_dict(vocab, lambda x: x[1] > min_count))
# + id="_MdSW4ifuEaF" colab_type="code" colab={}
def index_chars(chars: set) -> dict:
d = {'PNC': 3, 'DIG': 2, 'UNK': 1, 'PAD': 0}
for ch in chars :
if ch not in d.keys():
d[ch] = len(d)
return d
# + id="PwxrTnTjuGoW" colab_type="code" colab={}
def index_text(text: list) -> list:
return [char2id.get(token, 1) for token in text]
# + id="SBZqZ6N5feAq" colab_type="code" colab={}
quora['cleaned'] = quora.question_text.apply(clean_text)
# + id="WtegLa1vuIOd" colab_type="code" colab={}
quora['tokenized'] = quora.cleaned.apply(tokenize)
# + id="ipMIy6_3uM_6" colab_type="code" colab={}
vocab = build_vocab(quora.tokenized.values, 1)
# + id="lyUOQQjtuQG1" colab_type="code" colab={}
char2id = index_chars(vocab)
# + id="nA-HXw6MuRgS" colab_type="code" colab={}
id2char = {i: ch for ch, i in char2id.items()}
# + id="whRDSBA4uS0l" colab_type="code" colab={}
quora['indexed'] = quora.tokenized.apply(index_text)
# + id="E97ij7i8Im21" colab_type="code" colab={}
def limit_max_len(len_list, n_sigma):
mean = np.mean(len_list)
std = np.std(len_list)
return int(np.ceil(mean + (n_sigma * std)))
# + id="_4LK7qZ1IiTA" colab_type="code" colab={}
len_limit = limit_max_len(quora.indexed.str.len(), 1)
# + id="KkjpZuycJar-" colab_type="code" outputId="78e5a556-215a-4c13-e610-d1edce751e95" colab={"base_uri": "https://localhost:8080/", "height": 34}
len_limit
# + id="arDi6S7yuVOl" colab_type="code" colab={}
X = tf.keras.preprocessing.sequence.pad_sequences(quora.indexed.values, maxlen=len_limit)
# + id="G4lEcnAWueRK" colab_type="code" colab={}
y = quora.target.values
# + id="LmgiIpoVufNu" colab_type="code" colab={}
X_train, X_valid, y_train, y_valid = train_test_split(X, y,
test_size=0.05,
random_state=1,
stratify=y)
# + id="fuub7Y43NteI" colab_type="code" outputId="9f6dc5a4-ede4-4b64-9c6f-3b11b95f1a49" colab={"base_uri": "https://localhost:8080/", "height": 34}
X.shape
# + [markdown] id="sE1iF3GfzQuS" colab_type="text"
# # Попытка 1
# + id="ipOtwiuwukJN" colab_type="code" colab={}
inputs = tf.keras.layers.Input(shape=(len_limit,))
embeddings = tf.keras.layers.Embedding(input_dim=len(char2id), output_dim=50)(inputs)
conv_1 = tf.keras.layers.Conv1D(kernel_size=3, filters=16, strides=1)(embeddings)
conv_2 = tf.keras.layers.Conv1D(kernel_size=3, filters=16, strides=1)(conv_1)
conv_3 = tf.keras.layers.Conv1D(kernel_size=3, filters=16, strides=1)(conv_2)
pool_1 = tf.keras.layers.AveragePooling1D()(conv_3)
drop_1 = tf.keras.layers.Dropout(0.1)(pool_1)
conv_4 = tf.keras.layers.Conv1D(kernel_size=3, filters=16, strides=1)(drop_1)
conv_5 = tf.keras.layers.Conv1D(kernel_size=3, filters=32, strides=1)(conv_4)
conv_6 = tf.keras.layers.Conv1D(kernel_size=3, filters=64, strides=1, activation='relu')(conv_5)
pool_2 = tf.keras.layers.AveragePooling1D()(conv_6)
drop_2 = tf.keras.layers.Dropout(0.1)(pool_2)
conv_7 = tf.keras.layers.Conv1D(kernel_size=4, filters=16, strides=1, activation='relu')(drop_2)
conv_8 = tf.keras.layers.Conv1D(kernel_size=4, filters=32, strides=1, activation='relu')(conv_7)
conv_9 = tf.keras.layers.Conv1D(kernel_size=4, filters=64, strides=1, activation='relu')(conv_8)
pool_3 = tf.keras.layers.MaxPooling1D()(conv_9)
drop_3 = tf.keras.layers.Dropout(0.1)(pool_3)
concat = tf.keras.layers.Flatten()(drop_3)
dense_1 = tf.keras.layers.Dense(64, activation='relu')(concat)
dense_2 = tf.keras.layers.Dense(64, activation='relu')(dense_1)
outputs = tf.keras.layers.Dense(1, activation='sigmoid')(dense_2)
model = tf.keras.Model(inputs=inputs, outputs=outputs)
# + id="w-yJzOe5xoXb" colab_type="code" colab={}
optimizer = tf.keras.optimizers.Adam(learning_rate=0.001)
# + id="WctVkDTcxhum" colab_type="code" colab={}
checkpoint = tf.keras.callbacks.ModelCheckpoint('model.weights',
monitor='val_f1',
verbose=1,
save_weights_only=True,
save_best_only=True,
mode='max',
save_freq='epoch')
# + id="D-Lqs9alxm82" colab_type="code" colab={}
model.compile(optimizer=optimizer,
loss='binary_crossentropy',
metrics=[f1])
# + id="QxDkQidywk-d" colab_type="code" outputId="406c1b6c-17bc-4e89-c122-8bd16f27cf6f" colab={"base_uri": "https://localhost:8080/", "height": 1000}
model.fit(X_train, y_train,
validation_data=(X_valid, y_valid),
batch_size=4000,
epochs=15,
callbacks=[checkpoint])
# + id="ftcpi9pnzJf4" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="5605d59b-66a2-4d61-8666-76e64ce1f162"
model.load_weights('model.weights')
# + id="2cIkI8pNCzz_" colab_type="code" colab={}
preds = model.predict(X_valid).reshape(-1)
# + id="1lEJV6XaDB4d" colab_type="code" outputId="d276f703-1a1f-450e-ad31-a787fbd32be2" colab={"base_uri": "https://localhost:8080/", "height": 170}
print(classification_report(y_valid, (preds > 0.5).astype(int)))
# + [markdown] id="bbvv9GJmzUmC" colab_type="text"
# # Попытка 2 (основная)
# + id="K0g9u6M74DcQ" colab_type="code" colab={}
inputs = tf.keras.layers.Input(shape=(len_limit,))
embeddings = tf.keras.layers.Embedding(input_dim=len(char2id), output_dim=50)(inputs)
conv = []
conv_1 = tf.keras.layers.Conv1D(kernel_size=2, filters=16, strides=1, activation='tanh')(embeddings)
conv_2 = tf.keras.layers.Conv1D(kernel_size=2, filters=16, strides=1, activation='tanh')(conv_1)
conv_3 = tf.keras.layers.Conv1D(kernel_size=2, filters=16, strides=1, activation='tanh')(conv_2)
pool_1 = tf.keras.layers.GlobalMaxPooling1D()(conv_3)
drop_1 = tf.keras.layers.AlphaDropout(0.1)(pool_1)
conv.append(drop_1)
conv_4 = tf.keras.layers.Conv1D(kernel_size=3, filters=32, strides=1, activation='tanh')(embeddings)
conv_5 = tf.keras.layers.Conv1D(kernel_size=3, filters=32, strides=1, activation='tanh')(conv_4)
conv_6 = tf.keras.layers.Conv1D(kernel_size=3, filters=32, strides=1, activation='tanh')(conv_5)
pool_2 = tf.keras.layers.GlobalMaxPooling1D()(conv_6)
drop_2 = tf.keras.layers.AlphaDropout(0.1)(pool_2)
conv.append(drop_2)
conv_7 = tf.keras.layers.Conv1D(kernel_size=4, filters=64, strides=1, activation='tanh')(embeddings)
conv_8 = tf.keras.layers.Conv1D(kernel_size=4, filters=64, strides=1, activation='tanh')(conv_7)
conv_9 = tf.keras.layers.Conv1D(kernel_size=4, filters=64, strides=1, activation='tanh')(conv_8)
pool_3 = tf.keras.layers.GlobalMaxPooling1D()(conv_9)
drop_3 = tf.keras.layers.AlphaDropout(0.1)(pool_3)
conv.append(drop_3)
conv_10 = tf.keras.layers.Conv1D(kernel_size=2, filters=16, strides=1, activation='tanh')(embeddings)
conv_11 = tf.keras.layers.Conv1D(kernel_size=2, filters=32, strides=1, activation='tanh')(conv_10)
conv_12 = tf.keras.layers.Conv1D(kernel_size=2, filters=64, strides=1, activation='tanh')(conv_11)
conv_13 = tf.keras.layers.Conv1D(kernel_size=2, filters=128, strides=1, activation='tanh')(conv_12)
pool_4 = tf.keras.layers.GlobalMaxPooling1D()(conv_13)
drop_4 = tf.keras.layers.AlphaDropout(0.1)(pool_4)
conv.append(drop_4)
concat = tf.keras.layers.Concatenate()(conv)
dense_1 = tf.keras.layers.Dense(128, activation='selu')(concat)
dense_2 = tf.keras.layers.Dense(64, activation='selu')(dense_1)
dense_3 = tf.keras.layers.Dense(32, activation='selu')(dense_2)
drop_5 = tf.keras.layers.AlphaDropout(0.1)(dense_3)
outputs = tf.keras.layers.Dense(1, activation='sigmoid')(drop_5)
model_2 = tf.keras.Model(inputs=inputs, outputs=outputs)
# + id="Qgm3mb4y0Je0" colab_type="code" colab={}
optimizer = tf.keras.optimizers.Adam(learning_rate=0.001)
# + id="2cdTfEVj0Mrf" colab_type="code" colab={}
checkpoint = tf.keras.callbacks.ModelCheckpoint('model.weights',
monitor='val_f1',
verbose=1,
save_weights_only=True,
save_best_only=True,
mode='max',
save_freq='epoch')
# + id="XS5Gd4yC0O42" colab_type="code" colab={}
model_2.compile(optimizer=optimizer,
loss='binary_crossentropy',
metrics=[f1])
# + id="vXzt1h680UjR" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="4a0e47ab-584c-48a1-8031-9ab52b742c85"
model_2.fit(X_train, y_train,
validation_data=(X_valid, y_valid),
batch_size=4000,
epochs=30,
callbacks=[checkpoint])
# + id="zy0HiK_U0jOZ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="38b133ef-002d-402b-a2c7-d207c3b7a2b4"
model_2.load_weights('model.weights')
# + id="dMYHEHERAXac" colab_type="code" colab={}
preds = model_2.predict(X_valid).reshape(-1)
# + id="aZHEIoQnAZyr" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 170} outputId="b4275e08-5c24-4e48-ac3e-a1026b66755d"
print(classification_report(y_valid, (preds > 0.5).astype(int)))
# + [markdown] id="pLkQrLeZAyWq" colab_type="text"
# # Попытка 3
# + [markdown] id="8Z9ON9BiA0tM" colab_type="text"
# Попробуем учитывать регистр
# + id="LR4M6hjzAqOo" colab_type="code" colab={}
def tokenize_cased(text: str) -> list:
result = []
for ch in text:
if ch in ascii_letters or ch == ' ':
result.append(ch)
elif ch in digits:
result.append('DIG')
elif ch in short_punctuation:
result.append('PNC')
else:
result.append('UNK')
return result
# + id="y9C3zvNnBFu4" colab_type="code" colab={}
quora['tokenized_cased'] = quora.cleaned.apply(tokenize_cased)
vocab = build_vocab(quora.tokenized_cased.values, 1)
char2id = index_chars(vocab)
id2char = {i: ch for ch, i in char2id.items()}
quora['indexed_cased'] = quora.tokenized_cased.apply(index_text)
len_limit = limit_max_len(quora.indexed_cased.str.len(), 1)
X = tf.keras.preprocessing.sequence.pad_sequences(quora.indexed_cased.values, maxlen=len_limit)
X_train, X_valid, y_train, y_valid = train_test_split(X, y,
test_size=0.05,
random_state=1,
stratify=y)
# + id="zRIlr4-ICohL" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="c373f843-b45c-44ba-f379-3406566b767b"
inputs = tf.keras.layers.Input(shape=(len_limit,))
embeddings = tf.keras.layers.Embedding(input_dim=len(char2id), output_dim=50)(inputs)
conv = []
conv_1 = tf.keras.layers.Conv1D(kernel_size=2, filters=16, strides=1, activation='tanh')(embeddings)
conv_2 = tf.keras.layers.Conv1D(kernel_size=2, filters=16, strides=1, activation='tanh')(conv_1)
conv_3 = tf.keras.layers.Conv1D(kernel_size=2, filters=16, strides=1, activation='tanh')(conv_2)
pool_1 = tf.keras.layers.GlobalMaxPooling1D()(conv_3)
drop_1 = tf.keras.layers.AlphaDropout(0.1)(pool_1)
conv.append(drop_1)
conv_4 = tf.keras.layers.Conv1D(kernel_size=3, filters=32, strides=1, activation='tanh')(embeddings)
conv_5 = tf.keras.layers.Conv1D(kernel_size=3, filters=32, strides=1, activation='tanh')(conv_4)
conv_6 = tf.keras.layers.Conv1D(kernel_size=3, filters=32, strides=1, activation='tanh')(conv_5)
pool_2 = tf.keras.layers.GlobalMaxPooling1D()(conv_6)
drop_2 = tf.keras.layers.AlphaDropout(0.1)(pool_2)
conv.append(drop_2)
conv_7 = tf.keras.layers.Conv1D(kernel_size=4, filters=64, strides=1, activation='tanh')(embeddings)
conv_8 = tf.keras.layers.Conv1D(kernel_size=4, filters=64, strides=1, activation='tanh')(conv_7)
conv_9 = tf.keras.layers.Conv1D(kernel_size=4, filters=64, strides=1, activation='tanh')(conv_8)
pool_3 = tf.keras.layers.GlobalMaxPooling1D()(conv_9)
drop_3 = tf.keras.layers.AlphaDropout(0.1)(pool_3)
conv.append(drop_3)
conv_10 = tf.keras.layers.Conv1D(kernel_size=2, filters=16, strides=1, activation='tanh')(embeddings)
conv_11 = tf.keras.layers.Conv1D(kernel_size=2, filters=32, strides=1, activation='tanh')(conv_10)
conv_12 = tf.keras.layers.Conv1D(kernel_size=2, filters=64, strides=1, activation='tanh')(conv_11)
conv_13 = tf.keras.layers.Conv1D(kernel_size=2, filters=128, strides=1, activation='tanh')(conv_12)
pool_4 = tf.keras.layers.GlobalMaxPooling1D()(conv_13)
drop_4 = tf.keras.layers.AlphaDropout(0.1)(pool_4)
conv.append(drop_4)
concat = tf.keras.layers.Concatenate()(conv)
dense_1 = tf.keras.layers.Dense(128, activation='selu')(concat)
dense_2 = tf.keras.layers.Dense(64, activation='selu')(dense_1)
dense_3 = tf.keras.layers.Dense(32, activation='selu')(dense_2)
drop_5 = tf.keras.layers.AlphaDropout(0.1)(dense_3)
outputs = tf.keras.layers.Dense(1, activation='sigmoid')(drop_5)
model_3 = tf.keras.Model(inputs=inputs, outputs=outputs)
optimizer = tf.keras.optimizers.Adam(learning_rate=0.001)
checkpoint = tf.keras.callbacks.ModelCheckpoint('model.weights',
monitor='val_f1',
verbose=1,
save_weights_only=True,
save_best_only=True,
mode='max',
save_freq='epoch')
model_3.compile(optimizer=optimizer,
loss='binary_crossentropy',
metrics=[f1])
model_3.fit(X_train, y_train,
validation_data=(X_valid, y_valid),
batch_size=4000,
epochs=30,
callbacks=[checkpoint])
# + id="CgHM_neuECmt" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 170} outputId="bbb24879-5897-44b3-8027-c74f7908a9f7"
model_3.load_weights('model.weights')
preds = model_3.predict(X_valid).reshape(-1)
print(classification_report(y_valid, (preds > 0.5).astype(int)))
# + [markdown] id="2YU5NejyJKyz" colab_type="text"
# Здесь разница только в том, что пик ф-меры был достигнут на на несколько эпох раньше.
# + [markdown] id="1USsf2ewH0O9" colab_type="text"
# # Попытка 4
# + [markdown] id="STmnuvWxH23O" colab_type="text"
# Оставляем регистр, добавляем в словарь "значимую" пунктуацию. Остальную храним как один символ.
# + id="qBQU9uhFJX1i" colab_type="code" colab={}
long_punctuation = set(punctuation) - set(short_punctuation)
# + id="M6Divy0eH2Hh" colab_type="code" colab={}
def tokenize_cased_punct(text: str) -> list:
result = []
for ch in text:
if ch in ascii_letters or ch in short_punctuation or ch == ' ':
result.append(ch)
elif ch in digits:
result.append('DIG')
elif ch in long_punctuation:
result.append('PNC')
else:
result.append('UNK')
return result
# + id="ek-XGh3wIMtl" colab_type="code" colab={}
quora['tokenized_cased_punct'] = quora.cleaned.apply(tokenize_cased)
vocab = build_vocab(quora.tokenized_cased_punct.values, 1)
char2id = index_chars(vocab)
id2char = {i: ch for ch, i in char2id.items()}
quora['indexed_cased_punct'] = quora.tokenized_cased_punct.apply(index_text)
len_limit = limit_max_len(quora.indexed_cased_punct.str.len(), 1)
X = tf.keras.preprocessing.sequence.pad_sequences(quora.indexed_cased_punct.values, maxlen=len_limit)
X_train, X_valid, y_train, y_valid = train_test_split(X, y,
test_size=0.05,
random_state=1,
stratify=y)
# + id="B_J9vh4cIMwA" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="a02760a8-d84a-4621-9a40-7e800ba9031a"
inputs = tf.keras.layers.Input(shape=(len_limit,))
embeddings = tf.keras.layers.Embedding(input_dim=len(char2id), output_dim=50)(inputs)
conv = []
conv_1 = tf.keras.layers.Conv1D(kernel_size=2, filters=16, strides=1, activation='tanh')(embeddings)
conv_2 = tf.keras.layers.Conv1D(kernel_size=2, filters=16, strides=1, activation='tanh')(conv_1)
conv_3 = tf.keras.layers.Conv1D(kernel_size=2, filters=16, strides=1, activation='tanh')(conv_2)
pool_1 = tf.keras.layers.GlobalMaxPooling1D()(conv_3)
drop_1 = tf.keras.layers.AlphaDropout(0.1)(pool_1)
conv.append(drop_1)
conv_4 = tf.keras.layers.Conv1D(kernel_size=3, filters=32, strides=1, activation='tanh')(embeddings)
conv_5 = tf.keras.layers.Conv1D(kernel_size=3, filters=32, strides=1, activation='tanh')(conv_4)
conv_6 = tf.keras.layers.Conv1D(kernel_size=3, filters=32, strides=1, activation='tanh')(conv_5)
pool_2 = tf.keras.layers.GlobalMaxPooling1D()(conv_6)
drop_2 = tf.keras.layers.AlphaDropout(0.1)(pool_2)
conv.append(drop_2)
conv_7 = tf.keras.layers.Conv1D(kernel_size=4, filters=64, strides=1, activation='tanh')(embeddings)
conv_8 = tf.keras.layers.Conv1D(kernel_size=4, filters=64, strides=1, activation='tanh')(conv_7)
conv_9 = tf.keras.layers.Conv1D(kernel_size=4, filters=64, strides=1, activation='tanh')(conv_8)
pool_3 = tf.keras.layers.GlobalMaxPooling1D()(conv_9)
drop_3 = tf.keras.layers.AlphaDropout(0.1)(pool_3)
conv.append(drop_3)
conv_10 = tf.keras.layers.Conv1D(kernel_size=2, filters=16, strides=1, activation='tanh')(embeddings)
conv_11 = tf.keras.layers.Conv1D(kernel_size=2, filters=32, strides=1, activation='tanh')(conv_10)
conv_12 = tf.keras.layers.Conv1D(kernel_size=2, filters=64, strides=1, activation='tanh')(conv_11)
conv_13 = tf.keras.layers.Conv1D(kernel_size=2, filters=128, strides=1, activation='tanh')(conv_12)
pool_4 = tf.keras.layers.GlobalMaxPooling1D()(conv_13)
drop_4 = tf.keras.layers.AlphaDropout(0.1)(pool_4)
conv.append(drop_4)
concat = tf.keras.layers.Concatenate()(conv)
dense_1 = tf.keras.layers.Dense(128, activation='selu')(concat)
dense_2 = tf.keras.layers.Dense(64, activation='selu')(dense_1)
dense_3 = tf.keras.layers.Dense(32, activation='selu')(dense_2)
drop_5 = tf.keras.layers.AlphaDropout(0.1)(dense_3)
outputs = tf.keras.layers.Dense(1, activation='sigmoid')(drop_5)
model_4 = tf.keras.Model(inputs=inputs, outputs=outputs)
optimizer = tf.keras.optimizers.Adam(learning_rate=0.001)
checkpoint = tf.keras.callbacks.ModelCheckpoint('model.weights',
monitor='val_f1',
verbose=1,
save_weights_only=True,
save_best_only=True,
mode='max',
save_freq='epoch')
model_4.compile(optimizer=optimizer,
loss='binary_crossentropy',
metrics=[f1])
model_4.fit(X_train, y_train,
validation_data=(X_valid, y_valid),
batch_size=4000,
epochs=30,
callbacks=[checkpoint])
# + id="GPxysarNIMys" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 170} outputId="eb6cffac-711a-435d-8080-cabcfb24cdd1"
model_4.load_weights('model.weights')
preds = model_4.predict(X_valid).reshape(-1)
print(classification_report(y_valid, (preds > 0.5).astype(int)))
# + [markdown] id="g87cUFyIO84p" colab_type="text"
# Как видим, качество несколько улучшилось. Вероятно в пунктуации действительно содержится полезная информация для нашего классификатора.
# + [markdown] id="VM7vT2o-INPY" colab_type="text"
# # Попытка 5
# + [markdown] id="ghNSCA6cK0bY" colab_type="text"
# Оставляем только латинские буквы нижнего регистра
# + id="TzZ85kNjIO2f" colab_type="code" colab={}
def tokenize_ascii(text: str) -> list:
result = []
for ch in text.lower():
if ch in ascii_lower:
result.append(ch)
else:
result.append('UNK')
return result
def index_chars(chars: set) -> dict:
d = {'UNK': 1, 'PAD': 0}
for ch in chars :
if ch not in d.keys():
d[ch] = len(d)
return d
# + id="EuxW8KntLDPI" colab_type="code" colab={}
quora['tokenized_ascii'] = quora.cleaned.apply(tokenize_cased)
vocab = build_vocab(quora.tokenized_ascii.values, 1)
char2id = index_chars(vocab)
id2char = {i: ch for ch, i in char2id.items()}
quora['indexed_ascii'] = quora.tokenized_ascii.apply(index_text)
len_limit = limit_max_len(quora.indexed_ascii.str.len(), 1)
X = tf.keras.preprocessing.sequence.pad_sequences(quora.indexed_ascii.values, maxlen=len_limit)
X_train, X_valid, y_train, y_valid = train_test_split(X, y,
test_size=0.05,
random_state=1,
stratify=y)
# + id="dS3F8ydBLvRu" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="748dfd59-89dd-4103-cc39-95dbb37adeae"
inputs = tf.keras.layers.Input(shape=(len_limit,))
embeddings = tf.keras.layers.Embedding(input_dim=len(char2id), output_dim=50)(inputs)
conv = []
conv_1 = tf.keras.layers.Conv1D(kernel_size=2, filters=16, strides=1, activation='tanh')(embeddings)
conv_2 = tf.keras.layers.Conv1D(kernel_size=2, filters=16, strides=1, activation='tanh')(conv_1)
conv_3 = tf.keras.layers.Conv1D(kernel_size=2, filters=16, strides=1, activation='tanh')(conv_2)
pool_1 = tf.keras.layers.GlobalMaxPooling1D()(conv_3)
drop_1 = tf.keras.layers.AlphaDropout(0.1)(pool_1)
conv.append(drop_1)
conv_4 = tf.keras.layers.Conv1D(kernel_size=3, filters=32, strides=1, activation='tanh')(embeddings)
conv_5 = tf.keras.layers.Conv1D(kernel_size=3, filters=32, strides=1, activation='tanh')(conv_4)
conv_6 = tf.keras.layers.Conv1D(kernel_size=3, filters=32, strides=1, activation='tanh')(conv_5)
pool_2 = tf.keras.layers.GlobalMaxPooling1D()(conv_6)
drop_2 = tf.keras.layers.AlphaDropout(0.1)(pool_2)
conv.append(drop_2)
conv_7 = tf.keras.layers.Conv1D(kernel_size=4, filters=64, strides=1, activation='tanh')(embeddings)
conv_8 = tf.keras.layers.Conv1D(kernel_size=4, filters=64, strides=1, activation='tanh')(conv_7)
conv_9 = tf.keras.layers.Conv1D(kernel_size=4, filters=64, strides=1, activation='tanh')(conv_8)
pool_3 = tf.keras.layers.GlobalMaxPooling1D()(conv_9)
drop_3 = tf.keras.layers.AlphaDropout(0.1)(pool_3)
conv.append(drop_3)
conv_10 = tf.keras.layers.Conv1D(kernel_size=2, filters=16, strides=1, activation='tanh')(embeddings)
conv_11 = tf.keras.layers.Conv1D(kernel_size=2, filters=32, strides=1, activation='tanh')(conv_10)
conv_12 = tf.keras.layers.Conv1D(kernel_size=2, filters=64, strides=1, activation='tanh')(conv_11)
conv_13 = tf.keras.layers.Conv1D(kernel_size=2, filters=128, strides=1, activation='tanh')(conv_12)
pool_4 = tf.keras.layers.GlobalMaxPooling1D()(conv_13)
drop_4 = tf.keras.layers.AlphaDropout(0.1)(pool_4)
conv.append(drop_4)
concat = tf.keras.layers.Concatenate()(conv)
dense_1 = tf.keras.layers.Dense(128, activation='selu')(concat)
dense_2 = tf.keras.layers.Dense(64, activation='selu')(dense_1)
dense_3 = tf.keras.layers.Dense(32, activation='selu')(dense_2)
drop_5 = tf.keras.layers.AlphaDropout(0.1)(dense_3)
outputs = tf.keras.layers.Dense(1, activation='sigmoid')(drop_5)
model_5 = tf.keras.Model(inputs=inputs, outputs=outputs)
optimizer = tf.keras.optimizers.Adam(learning_rate=0.001)
checkpoint = tf.keras.callbacks.ModelCheckpoint('model.weights',
monitor='val_f1',
verbose=1,
save_weights_only=True,
save_best_only=True,
mode='max',
save_freq='epoch')
model_5.compile(optimizer=optimizer,
loss='binary_crossentropy',
metrics=[f1])
model_5.fit(X_train, y_train,
validation_data=(X_valid, y_valid),
batch_size=4000,
epochs=30,
callbacks=[checkpoint])
# + id="dpG6ImVNL0IB" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 170} outputId="a686f6b6-9bee-45b9-98f0-6686137867a1"
model_5.load_weights('model.weights')
preds = model_5.predict(X_valid).reshape(-1)
print(classification_report(y_valid, (preds > 0.5).astype(int)))
# + [markdown] id="bu2qBkPFUEvX" colab_type="text"
# Качество ухудшилось, но не так уж сильно.
# + id="CP3ADhXbUKfB" colab_type="code" colab={}
|
HW7/HW7.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="qnUzRSg37Wfe" colab_type="code" outputId="d6044ba3-2a06-4634-e09b-21a43efa65b3" executionInfo={"status": "ok", "timestamp": 1587663263159, "user_tz": -480, "elapsed": 3849, "user": {"displayName": "\u8cc7\u7ba1\u78a9\u58eb\u73ed\u9673\u90c1\u84c1", "photoUrl": "", "userId": "16498330763476508006"}} colab={"base_uri": "https://localhost:8080/", "height": 212}
# !wget https://raw.githubusercontent.com/yuchen118125/data/master/KHC.csv
# + id="N-tzvnpt7tnP" colab_type="code" outputId="0c59da6b-7391-4d34-e008-8fd3f9e5ed10" executionInfo={"status": "ok", "timestamp": 1587663263160, "user_tz": -480, "elapsed": 3677, "user": {"displayName": "\u8cc7\u7ba1\u78a9\u58eb\u73ed\u9673\u90c1\u84c1", "photoUrl": "", "userId": "16498330763476508006"}} colab={"base_uri": "https://localhost:8080/", "height": 413}
import pandas as pd
import numpy as np
import datetime
import tqdm #進度條
import talib
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
# %matplotlib inline
data = pd.read_csv('KHC.csv')
data1 = pd.read_csv("data/1019_CL.csv").dropna()
data2 = pd.read_csv("data/1019_OVX.csv").dropna()
data3 = pd.read_csv("data/1019_GC.csv").dropna()
data4 = pd.read_csv("data/1019_GVZ.csv").dropna()
Date = data.get("Date")
Date = pd.to_datetime(Date)
Open = data.get("Open")
High = data.get("High")
Low = data.get("Low")
Close = data.get("Close")
Close_cl = data1.get("Close")
Close_ovx = data2.get("Close")
Close_gc = data3.get("Close")
Close_gvz = data4.get("Close")
dict = {"Date": Date,
"open": Open,
"high": High,
"low": Low,
"close": Close,
"close_cl": Close_cl,
"close_ovx": Close_ovx,
"close_gc": Close_gc,
"close_gvz": Close_gvz,
"return":0,
}
df = pd.DataFrame(dict)
df.set_index('Date', inplace=True)
df = df.dropna()
print("總共:%d個交易時間點" % (len(Date)))
print("去除空白值後剩下:%d個交易時間點" % (len(df)))
df.tail(10)
# + id="-AHsml6j7tpu" colab_type="code" colab={}
def create_labels(df,col_name,window_size=11):
row_counter = 0
total_rows = len(df)
labels = np.zeros(total_rows)
labels[:]=np.nan
while row_counter <total_rows:
if row_counter>=window_size-1:
window_begin=row_counter-(window_size-1)
window_end=row_counter
window_middle=(window_begin+window_end)/2
min_=np.inf
min_index=-1
max_=-np.inf
max_index=-1
for i in range(window_begin,window_end+1):
price = df.iloc[i][col_name]
if price<min_:
min_=price
min_index=i
elif price>max_:
max_=price
max_index=i
if max_index == window_middle:
labels[row_counter] = 2
elif min_index == window_middle:
labels[row_counter] = 0
else:
labels[row_counter] = 1
row_counter = row_counter+1
return labels
df['return'] = create_labels(df,'close')
# + id="y2puMlbK7tr7" colab_type="code" colab={}
ema = talib.EMA(df['close'])
sma = talib.SMA(df['close'])
roc = talib.ROC(df['close'])
macd, macdsignal, macdhist = talib.MACD(df['close'])
k, d = talib.STOCH (df['high'], df['low'], df['close'])
upper, middle, lower = talib.BBANDS(df['close'])
B = (df['close'] - lower)/(upper - lower)
dataset = pd.DataFrame({
'Close':df['close'],
'EMA':ema,
'SMA':sma,
'ROC':roc,
'MACD':macd,
'%K':k,
'%D':d,
'Upper Band':upper,
'Lower Band':lower,
'%B':B,
'Close_CL':df['close_cl'],
'Close_OVX':df['close_ovx'],
'Close_GC':df['close_gc'],
'Close_GVZ':df['close_gvz'],
'return': df['return'],
})
feature_names = list(dataset.columns[:-1])
# + id="MMHFo3Q57tun" colab_type="code" outputId="c120a200-0431-4048-b8bf-71975bbd6593" executionInfo={"status": "ok", "timestamp": 1587663265688, "user_tz": -480, "elapsed": 4452, "user": {"displayName": "\u8cc7\u7ba1\u78a9\u58eb\u73ed\u9673\u90c1\u84c1", "photoUrl": "", "userId": "16498330763476508006"}} colab={"base_uri": "https://localhost:8080/", "height": 52}
print("before dropping NaN", dataset.shape)
dataset = dataset.dropna()
print("after dropping NaN", dataset.shape)
# + id="sRaKnKk67txC" colab_type="code" outputId="11d49aa9-36bb-49c8-870e-0166983b4ef9" executionInfo={"status": "ok", "timestamp": 1587663265689, "user_tz": -480, "elapsed": 3906, "user": {"displayName": "\u8cc7\u7ba1\u78a9\u58eb\u73ed\u9673\u90c1\u84c1", "photoUrl": "", "userId": "16498330763476508006"}} colab={"base_uri": "https://localhost:8080/", "height": 307}
from sklearn.preprocessing import MinMaxScaler
ss = MinMaxScaler()
dataset_scaled = ss.fit_transform(dataset)
dataset_scaled = pd.DataFrame(dataset_scaled, columns=dataset.columns, index=dataset.index)
dataset_scaled['return'] = dataset['return']
dataset_scaled.describe()
# + id="XBkiXpJbngST" colab_type="code" outputId="c16876b8-b5ac-4a98-aa05-bfac87b3a32f" executionInfo={"status": "ok", "timestamp": 1587663266106, "user_tz": -480, "elapsed": 3923, "user": {"displayName": "\u8cc7\u7ba1\u78a9\u58eb\u73ed\u9673\u90c1\u84c1", "photoUrl": "", "userId": "16498330763476508006"}} colab={"base_uri": "https://localhost:8080/", "height": 356}
result = pd.value_counts(dataset_scaled['return'])
print(result)
dataset_scaled['return'].hist()
# + id="1GcChtM07tzN" colab_type="code" outputId="8404f0a6-da2c-40b9-8c24-3c6a8b390e70" executionInfo={"status": "ok", "timestamp": 1587663266706, "user_tz": -480, "elapsed": 4345, "user": {"displayName": "\u8cc7\u7ba1\u78a9\u58eb\u73ed\u9673\u90c1\u84c1", "photoUrl": "", "userId": "16498330763476508006"}} colab={"base_uri": "https://localhost:8080/", "height": 120, "referenced_widgets": ["076203debdac41138db2daeb5ad2d2bb", "04d79678ca86427e8ef1e5ff3ad7f789", "<KEY>", "<KEY>", "<KEY>", "3d8f96a0c3b54eee937917869151f80e", "d07cbb837ac940d0b94ac745e0246987", "665b1b3080c448e394ee9dcd7e0b7746"]}
from keras.utils import np_utils
#每張圖包含幾天的資料
time_period = 20
#預設幾日後的漲跌
day = 1
cnn_x = []
cnn_y = []
indexs = []
dataset_scaled_x= dataset_scaled[feature_names]
for i in tqdm.tqdm_notebook(range(0, len(dataset_scaled)-time_period-1-day)):
cnn_x.append(dataset_scaled_x.iloc[i:i+time_period].values)
r = dataset_scaled['return'].iloc[i+time_period-1+day]
rr = np_utils.to_categorical(r, num_classes=3)
cnn_y.append(rr)
indexs.append(dataset_scaled.index[i+time_period-1])
cnn_x = np.array(cnn_x)
cnn_y = np.array(cnn_y)
indexes = np.array(indexs)
# + id="H-AbKcvo4ftC" colab_type="code" colab={}
#2010~2018年為訓練和測試,2019年則為驗證
import datetime
cnn_x_t = cnn_x[indexes < datetime.datetime(2019,1,1)]
cnn_y_t = cnn_y[indexes < datetime.datetime(2019,1,1)]
cnn_x_validation = cnn_x[indexes > datetime.datetime(2019,1,1)]
cnn_y_validation = cnn_y[indexes > datetime.datetime(2019,1,1)]
# + id="xRT5JuZq4fvg" colab_type="code" colab={}
from sklearn.model_selection import train_test_split
cnn_x_train, cnn_x_test, cnn_y_train, cnn_y_test = train_test_split(cnn_x_t, cnn_y_t,
test_size=0.2,
random_state =1,
stratify=cnn_y_t,
shuffle = True)
# + id="jTUibO4Q7t1j" colab_type="code" colab={}
cnn_x_train = np.stack((cnn_x_train,)*3,axis=-1)
cnn_x_test = np.stack((cnn_x_test,)*3,axis=-1)
cnn_x_validation = np.stack((cnn_x_validation,)*3,axis=-1)
# + id="V-jDgo6C7t33" colab_type="code" outputId="a4020105-d147-416b-da5f-38bbbb358042" executionInfo={"status": "ok", "timestamp": 1587663271682, "user_tz": -480, "elapsed": 7745, "user": {"displayName": "\u8cc7\u7ba1\u78a9\u58eb\u73ed\u9673\u90c1\u84c1", "photoUrl": "", "userId": "16498330763476508006"}} colab={"base_uri": "https://localhost:8080/", "height": 862}
fig = plt.figure(figsize = (15,15))
columns = rows = 5
for i in range(1,columns*rows+1):
index = np.random.randint(len(cnn_x_train))
img = cnn_x_train[index]
fig.add_subplot(rows,columns,i)
plt.axis("off")
plt.title(str(index)+' class = '+str(np.argmax(cnn_y_train[index])))
plt.subplots_adjust(wspace=0.2,hspace=0.2)
plt.imshow(img)
plt.show()
# + id="3y784iIX-950" colab_type="code" colab={}
from sklearn.utils.class_weight import compute_class_weight
import tensorflow as tf
def get_sample_weights(y):
y = y.astype(int)
class_weight = compute_class_weight('balanced',np.unique(y),y)
print("real class weight are{}".format(class_weight),np.unique(y))
print("value_counts",np.unique(y,return_counts=True))
sample_weights = y.copy().astype(float)
for i in np.unique(y):
sample_weights[sample_weights==i]=class_weight[i]
return class_weight,sample_weights
# + id="b4I404x0--SD" colab_type="code" outputId="a96f15a1-bb8b-45ea-efd3-87fd97d1d6a3" executionInfo={"status": "ok", "timestamp": 1587663271683, "user_tz": -480, "elapsed": 4659, "user": {"displayName": "\u8cc7\u7ba1\u78a9\u58eb\u73ed\u9673\u90c1\u84c1", "photoUrl": "", "userId": "16498330763476508006"}} colab={"base_uri": "https://localhost:8080/", "height": 52}
#將onehot編碼轉回數組
lable = np.argmax(cnn_y_train, axis=1)
lable = np.array(lable)
class_weight,sample_weights = get_sample_weights(lable)
# + id="ZJ__zIxSEMAu" colab_type="code" outputId="fd6b2c53-3520-41f2-c6db-f5f16a163b52" executionInfo={"status": "ok", "timestamp": 1587663271684, "user_tz": -480, "elapsed": 4120, "user": {"displayName": "\u8cc7\u7ba1\u78a9\u58eb\u73ed\u9673\u90c1\u84c1", "photoUrl": "", "userId": "16498330763476508006"}} colab={"base_uri": "https://localhost:8080/", "height": 34}
class_weight={0:class_weight[0],
1:class_weight[1],
2:class_weight[2],}
print(class_weight)
# + id="DNHP55Za7t8p" colab_type="code" colab={}
import tensorflow.keras
import tensorflow.keras.layers as layers
from tensorflow.keras.models import Sequential
from tensorflow.keras.initializers import he_normal
input_shape = cnn_x_train[0].shape
def get_model(dropout1_rate=0.5, dropout2_rate=0.5):
model = Sequential()
model.add(layers.Conv2D(filters=32,
kernel_size=(3,3),
activation='relu',
padding="same",
input_shape=input_shape))
model.add(layers.Conv2D(filters=64,
kernel_size=(3,3),
padding="same",
activation='relu'))
model.add(layers.MaxPooling2D(pool_size=(2,2)))
model.add(layers.Dropout(dropout1_rate, name="dropout_1"))
model.add(layers.Flatten())
model.add(layers.Dense(units=128, activation='relu'))
model.add(layers.Dropout(dropout2_rate, name="dropout_2"))
model.add(layers.Dense(units=3, activation='softmax'))
return model
# + id="ChmBWAYc7t-u" colab_type="code" colab={}
from tensorflow.keras.callbacks import EarlyStopping
earlystop = EarlyStopping(monitor='val_loss', patience=10, verbose=1)
def fit_with(dropout1_rate, dropout2_rate, lr, batchsize):
# batchsize只能是整數,強迫把浮點轉int
batchsize = int(batchsize)
# 使用指定的超參數創建模型。
model = get_model(dropout1_rate, dropout2_rate)
# 設定優化器、學習率並建模
optimizer = tensorflow.keras.optimizers.Adam(lr=lr)
model.compile(loss='categorical_crossentropy',optimizer=optimizer, metrics=['accuracy'])
# 使用訓練數據集訓練模型。
model.fit(cnn_x_train, cnn_y_train,
batch_size = batchsize,
epochs=250,
verbose=0,
validation_data=(cnn_x_test,cnn_y_test),
callbacks=[earlystop],
class_weight=class_weight,
)
# steps=10 = 評估階段結束之前的步驟總數(樣本批次)
# score = model.evaluate(cnn_x_test,cnn_y_test, steps=10, verbose=0)
# 使用測試數據集評估模型。
score = model.evaluate(cnn_x_test, cnn_y_test, verbose=0)
#print('Test loss:', score[0])
#print('Test accuracy:', score[1])
return score[1]
# + id="Vi4bB53_8120" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="94e0939a-4786-4402-deff-da92f65ab7a3" executionInfo={"status": "ok", "timestamp": 1587662700163, "user_tz": -480, "elapsed": 769783, "user": {"displayName": "\u8cc7\u7ba1\u78a9\u58eb\u73ed\u9673\u90c1\u84c1", "photoUrl": "", "userId": "16498330763476508006"}}
from bayes_opt import BayesianOptimization
optimizer = BayesianOptimization(
fit_with,
{'dropout1_rate': (0, 0.5),
'dropout2_rate': (0, 0.5),
'lr': (1e-5, 1e-2),
'batchsize': (16,128)}
)
optimizer.maximize(init_points=10, n_iter=20)
# + id="9GAZ7jwQ83XJ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 105} outputId="ac70bb04-25c2-4f4d-b01c-f499f76734cb" executionInfo={"status": "ok", "timestamp": 1587662700164, "user_tz": -480, "elapsed": 767896, "user": {"displayName": "\u8cc7\u7ba1\u78a9\u58eb\u73ed\u9673\u90c1\u84c1", "photoUrl": "", "userId": "16498330763476508006"}}
print(optimizer.max)
print(optimizer.max['params']['dropout1_rate'])
print(optimizer.max['params']['dropout2_rate'])
print(optimizer.max['params']['lr'])
print(optimizer.max['params']['batchsize'])
##Bayesian Optimization 找到的最佳超參數值
dropout_rate1 = optimizer.max['params']['dropout1_rate']
dropout_rate2 = optimizer.max['params']['dropout2_rate']
learning_rate = optimizer.max['params']['lr']
batchsize = int(optimizer.max['params']['batchsize'])
# + id="cop2iskN5YPs" colab_type="code" colab={}
dropout_rate1 = 0.4638
dropout_rate2 = 0.04089
learning_rate = 0.001952
batchsize = int(69.01)
# + id="pFUmbe4483aT" colab_type="code" outputId="9a04b0b1-0351-41ba-e0dc-8edd3dc62fde" executionInfo={"status": "ok", "timestamp": 1587663272883, "user_tz": -480, "elapsed": 690, "user": {"displayName": "\u8cc7\u7ba1\u78a9\u58eb\u73ed\u9673\u90c1\u84c1", "photoUrl": "", "userId": "16498330763476508006"}} colab={"base_uri": "https://localhost:8080/", "height": 461}
import tensorflow.keras
import tensorflow.keras.layers as layers
from tensorflow.keras.models import Sequential
from tensorflow.keras.initializers import he_normal
input_shape = cnn_x_train[0].shape
model = Sequential()
model.add(layers.Conv2D(filters=32,
kernel_size=(3,3),
activation='relu',
padding="same",
input_shape=input_shape))
model.add(layers.Conv2D(filters=64,
kernel_size=(3,3),
padding="same",
activation='relu'))
model.add(layers.MaxPooling2D(pool_size=(2,2)))
model.add(layers.Dropout(dropout_rate1, name="dropout_1"))
model.add(layers.Flatten())
model.add(layers.Dense(units=128, activation='relu'))
model.add(layers.Dropout(dropout_rate2, name="dropout_2"))
model.add(layers.Dense(units=3, activation='softmax'))
adam = tensorflow.keras.optimizers.Adam(learning_rate)
model.compile(loss='categorical_crossentropy',optimizer=adam, metrics=['accuracy'])
print(model.summary())
# + id="lPXVBvOD-qHJ" colab_type="code" outputId="b324297f-8c7e-42cb-e921-086219e23b23" executionInfo={"status": "ok", "timestamp": 1587663320657, "user_tz": -480, "elapsed": 47311, "user": {"displayName": "\u8cc7\u7ba1\u78a9\u58eb\u73ed\u9673\u90c1\u84c1", "photoUrl": "", "userId": "16498330763476508006"}} colab={"base_uri": "https://localhost:8080/", "height": 1000}
from tensorflow.keras.callbacks import EarlyStopping
earlystop = EarlyStopping(monitor='val_loss', patience=10, verbose=1)
history = model.fit(cnn_x_train, cnn_y_train,
batch_size=batchsize,
epochs=250,
verbose=1,
validation_data=(cnn_x_test,cnn_y_test),
callbacks=[earlystop],
#sample_weight=sample_weights,
class_weight=class_weight,
)
# + id="Sh2GBSXK--1R" colab_type="code" outputId="a32db9b8-4676-47eb-93ce-c212b3264b94" executionInfo={"status": "ok", "timestamp": 1587663320658, "user_tz": -480, "elapsed": 45284, "user": {"displayName": "\u8cc7\u7ba1\u78a9\u58eb\u73ed\u9673\u90c1\u84c1", "photoUrl": "", "userId": "16498330763476508006"}} colab={"base_uri": "https://localhost:8080/", "height": 123}
o_loss,o_accuracy = model.evaluate(cnn_x_train,cnn_y_train)
print("對訓練資料的:\nLoss: %.2f, Accuracy: %.2f" % (o_loss, o_accuracy))
n_loss,n_accuracy = model.evaluate(cnn_x_test,cnn_y_test)
print("對測試資料的:\nLoss: %.2f, Accuracy: %.2f" % (n_loss, n_accuracy))
# + id="cZhIFa_s_GPG" colab_type="code" outputId="c094aac1-a0e2-474d-b733-57dc36385df4" executionInfo={"status": "ok", "timestamp": 1587663322463, "user_tz": -480, "elapsed": 46679, "user": {"displayName": "\u8cc7\u7ba1\u78a9\u58eb\u73ed\u9673\u90c1\u84c1", "photoUrl": "", "userId": "16498330763476508006"}} colab={"base_uri": "https://localhost:8080/", "height": 694}
import matplotlib.pyplot as plt
# %matplotlib inline
plt.style.use("ggplot")
plt.figure(figsize=(10, 5))
plt.plot(history.history['loss'], color = 'green', label='Loss')
plt.plot(history.history['val_loss'], color = 'blue', label='Val_Loss')
plt.legend()
plt.title('Training loss based on CNN')
plt.ylabel('Value')
plt.xlabel('Number of epochs')
plt.show()
plt.style.use("ggplot")
plt.figure(figsize=(10, 5))
plt.plot(history.history['accuracy'], color = 'green', label='accuracy')
plt.plot(history.history['val_accuracy'], color = 'blue', label='val_accuracy')
plt.legend()
plt.title('Training accuracy based on CNN')
plt.ylabel('Value')
plt.xlabel('Number of epochs')
plt.show()
# + id="S5VOOBAe_GXY" colab_type="code" outputId="cbc99842-b55b-4551-f8b8-fb8e934a33e1" executionInfo={"status": "ok", "timestamp": 1587663322463, "user_tz": -480, "elapsed": 46497, "user": {"displayName": "\u8cc7\u7ba1\u78a9\u58eb\u73ed\u9673\u90c1\u84c1", "photoUrl": "", "userId": "16498330763476508006"}} colab={"base_uri": "https://localhost:8080/", "height": 285}
#將onehot編碼轉回數組
cnn_y_test = np.argmax(cnn_y_test, axis=1)
xcnn_y_test = pd.Series(cnn_y_test)
#預測測試集
pre = model.predict_classes(cnn_x_test)
pre = pd.Series(pre)
pre.hist()
# + id="Tfvy5lCsynOb" colab_type="code" outputId="7b4afda5-d52b-4e08-8a56-adb1bafed173" executionInfo={"status": "ok", "timestamp": 1587663322464, "user_tz": -480, "elapsed": 46009, "user": {"displayName": "\u8cc7\u7ba1\u78a9\u58eb\u73ed\u9673\u90c1\u84c1", "photoUrl": "", "userId": "16498330763476508006"}} colab={"base_uri": "https://localhost:8080/", "height": 515}
from sklearn.metrics import confusion_matrix
LABELS = ["Buy","Hold","Sell"]
conf_matrix = confusion_matrix(xcnn_y_test, pre)
#原始資料
t_size = len(cnn_y_test)
b_size = (cnn_y_test>=2).sum()
s_size = (cnn_y_test<=0).sum()
h_size = t_size-(b_size+s_size)
b_guess = b_size/t_size
s_guess = s_size/t_size
h_guess = h_size/t_size
#矩陣數字
c00 = conf_matrix[0][0]
c01 = conf_matrix[0][1]
c02 = conf_matrix[0][2]
c10 = conf_matrix[1][0]
c11 = conf_matrix[1][1]
c12 = conf_matrix[1][2]
c20 = conf_matrix[2][0]
c21 = conf_matrix[2][1]
c22 = conf_matrix[2][2]
#評估指標
accuracy = (c00+c11+c22)/t_size
precision_b = c00/(c00+c10+c20)
precision_h = c11/(c01+c11+c21)
precision_s = c22/(c02+c12+c22)
recall_b = c00/(c00+c01+c02)
recall_h = c11/(c10+c11+c12)
recall_s = c22/(c20+c21+c22)
F1_b = 2*((precision_b*recall_b)/(precision_b+recall_b))
F1_h = 2*((precision_h*recall_h)/(precision_h+recall_h))
F1_s = 2*((precision_s*recall_s)/(precision_s+recall_s))
#印出
print("對訓練集資料的:Loss: %.2f, Accuracy: %.2f" % (o_loss, o_accuracy))
print("對測試集資料的:Loss: %.2f, Accuracy: %.2f" % (n_loss, n_accuracy))
print("\n測試資料總共:%.2f 筆\n其中 Buy 的筆數為: %.2f, 全部猜 Buy 猜對的機率是: %.2f" % (t_size, b_size, b_guess))
print("其中 Hold 的筆數為: %.2f, 全部猜 Hold 猜對的機率是: %.2f" % (h_size, h_guess))
print("其中 Sell 的筆數為: %.2f, 全部猜 Sell 猜對的機率是: %.2f" % (s_size, s_guess))
print("\n該測試集資料的 Accuracy 為: %.2f \nPrecision(Buy): %.2f, Recall(Buy): %.2f, F1 score(Buy): %.2f" % (accuracy, precision_b, recall_b, F1_b))
print("Precision(Hold): %.2f, Recall(Hold): %.2f, F1 score(Hold): %.2f" % (precision_h, recall_h, F1_h))
print("Precision(Sell): %.2f, Recall(Sell): %.2f, F1 score(Sell): %.2f" % (precision_s, recall_s, F1_s))
#畫圖
sns.heatmap(conf_matrix, xticklabels=LABELS, yticklabels=LABELS, annot=True, fmt="d",center=0.7,cmap = 'GnBu');
plt.title("Confusion matrix")
plt.ylabel('True class')
plt.xlabel('Predicted class')
plt.show()
# + id="QMoA5pSi-Q3Q" colab_type="code" outputId="a0959b51-58cb-420d-eec3-ba247e48c60d" executionInfo={"status": "ok", "timestamp": 1587663323355, "user_tz": -480, "elapsed": 46718, "user": {"displayName": "\u8cc7\u7ba1\u78a9\u58eb\u73ed\u9673\u90c1\u84c1", "photoUrl": "", "userId": "16498330763476508006"}} colab={"base_uri": "https://localhost:8080/", "height": 285}
#將onehot編碼轉回數組cnn_x_validation
cnn_y_validation = np.argmax(cnn_y_validation, axis=1)
xcnn_y_validation = pd.Series(cnn_y_validation)
#預測
pre = model.predict_classes(cnn_x_validation)
pre = pd.Series(pre)
pre.hist()
# + id="zVC3-ScZ5tKY" colab_type="code" outputId="79875923-da8a-4c4f-bc61-ac<PASSWORD>" executionInfo={"status": "ok", "timestamp": 1587663323356, "user_tz": -480, "elapsed": 46432, "user": {"displayName": "\u8cc7\u7ba1\u78a9\u58eb\u73ed\u9673\u90c1\u84c1", "photoUrl": "", "userId": "16498330763476508006"}} colab={"base_uri": "https://localhost:8080/", "height": 515}
from sklearn.metrics import confusion_matrix
LABELS = ["Buy","Hold","Sell"]
conf_matrix = confusion_matrix(xcnn_y_validation, pre)
#原始資料
t_size = len(cnn_y_validation)
b_size = (cnn_y_validation>=2).sum()
s_size = (cnn_y_validation<=0).sum()
h_size = t_size-(b_size+s_size)
b_guess = b_size/t_size
s_guess = s_size/t_size
h_guess = h_size/t_size
#矩陣數字
c00 = conf_matrix[0][0]
c01 = conf_matrix[0][1]
c02 = conf_matrix[0][2]
c10 = conf_matrix[1][0]
c11 = conf_matrix[1][1]
c12 = conf_matrix[1][2]
c20 = conf_matrix[2][0]
c21 = conf_matrix[2][1]
c22 = conf_matrix[2][2]
#評估指標
accuracy = (c00+c11+c22)/t_size
precision_b = c00/(c00+c10+c20)
precision_h = c11/(c01+c11+c21)
precision_s = c22/(c02+c12+c22)
recall_b = c00/(c00+c01+c02)
recall_h = c11/(c10+c11+c12)
recall_s = c22/(c20+c21+c22)
F1_b = 2*((precision_b*recall_b)/(precision_b+recall_b))
F1_h = 2*((precision_h*recall_h)/(precision_h+recall_h))
F1_s = 2*((precision_s*recall_s)/(precision_s+recall_s))
#印出
print("對訓練集資料的:Loss: %.2f, Accuracy: %.2f" % (o_loss, o_accuracy))
print("對測試集資料的:Loss: %.2f, Accuracy: %.2f" % (n_loss, n_accuracy))
print("\n驗證資料總共:%.2f 筆\n其中 Buy 的筆數為: %.2f, 全部猜 Buy 猜對的機率是: %.2f" % (t_size, b_size, b_guess))
print("其中 Hold 的筆數為: %.2f, 全部猜 Hold 猜對的機率是: %.2f" % (h_size, h_guess))
print("其中 Sell 的筆數為: %.2f, 全部猜 Sell 猜對的機率是: %.2f" % (s_size, s_guess))
print("\n該測試集資料的 Accuracy 為: %.2f \nPrecision(Buy): %.2f, Recall(Buy): %.2f, F1 score(Buy): %.2f" % (accuracy, precision_b, recall_b, F1_b))
print("Precision(Hold): %.2f, Recall(Hold): %.2f, F1 score(Hold): %.2f" % (precision_h, recall_h, F1_h))
print("Precision(Sell): %.2f, Recall(Sell): %.2f, F1 score(Sell): %.2f" % (precision_s, recall_s, F1_s))
#畫圖
sns.heatmap(conf_matrix, xticklabels=LABELS, yticklabels=LABELS, annot=True, fmt="d",center=0.7,cmap = 'GnBu');
plt.title("Confusion matrix")
plt.ylabel('True class')
plt.xlabel('Predicted class')
plt.show()
# + id="tFptKD4n6B23" colab_type="code" outputId="33d14afc-ba47-4102-993c-320c744bb8af" executionInfo={"status": "ok", "timestamp": 1587663323357, "user_tz": -480, "elapsed": 46246, "user": {"displayName": "\u8cc7\u7ba1\u78a9\u58eb\u73ed\u9673\u90c1\u84c1", "photoUrl": "", "userId": "16498330763476508006"}} colab={"base_uri": "https://localhost:8080/", "height": 87}
result = pd.value_counts(xcnn_y_validation)
print(result)
# + id="DZJzWI__8edw" colab_type="code" outputId="b8532063-76da-4c67-fc39-c1aa2de16b47" executionInfo={"status": "ok", "timestamp": 1587663323357, "user_tz": -480, "elapsed": 46087, "user": {"displayName": "\u8cc7\u7ba1\u78a9\u58eb\u73ed\u9673\u90c1\u84c1", "photoUrl": "", "userId": "16498330763476508006"}} colab={"base_uri": "https://localhost:8080/", "height": 34}
len(df['close'])
# + id="YY258MPoxLXe" colab_type="code" outputId="291ab849-c827-48ff-b260-a36a7a137896" executionInfo={"status": "ok", "timestamp": 1587663323358, "user_tz": -480, "elapsed": 45937, "user": {"displayName": "\u8cc7\u7ba1\u78a9\u58eb\u73ed\u9673\u90c1\u84c1", "photoUrl": "", "userId": "16498330763476508006"}} colab={"base_uri": "https://localhost:8080/", "height": 34}
len(pre)
# + id="4m2PVHy1755_" colab_type="code" colab={}
f = df['close'][883:]
a = []
b = []
for i in range(len(f)):
a.append(f[i])
b.append(pre[i])
#b.append(xcnn_y_validation[i])
a = np.array(a)
b = np.array(b)
S_dataset = pd.DataFrame({
'price':a,
'signal':b,
})
# + id="lQmVm7TL7BYd" colab_type="code" colab={}
#起始資金10000美金,每次交易手續費9美金(以eToro為例),stock是現在持有的股票數,previous上一部的狀態一樣則不動作。
total_money = 10000
Handling_fee = 9
stock = 0
previous = 1
for i in range(len(S_dataset)):
total_money = total_money
stock = stock
previous = previous
now = S_dataset['signal'][i]
if now == previous:
previous = now
else:
if now == 2:
if stock!=0:
total_money = total_money+(S_dataset['price'][i]*stock)
total_money = total_money-Handling_fee
stock=0
elif now == 0:
if total_money>S_dataset['price'][i]:
stock = int(total_money/S_dataset['price'][i])
total_money = total_money-(stock*S_dataset['price'][i])
previous = now
# + id="0FTwIu5-RQ9a" colab_type="code" colab={}
total = total_money+(S_dataset['price'][len(S_dataset)-1]*stock)-Handling_fee
# + id="ye7-jV0OS0BZ" colab_type="code" outputId="565fce3b-36b9-472a-8572-2ffc8fd1fd55" executionInfo={"status": "ok", "timestamp": 1587663323360, "user_tz": -480, "elapsed": 43085, "user": {"displayName": "\u8cc7\u7ba1\u78a9\u58eb\u73ed\u9673\u90c1\u84c1", "photoUrl": "", "userId": "16498330763476508006"}} colab={"base_uri": "https://localhost:8080/", "height": 34}
#Return on investment (%)
print(((total-10000)/10000)*100)
# + id="_CnU8LVLTYvD" colab_type="code" colab={}
indexes = indexes[indexes > datetime.datetime(2019,1,1)]
f = df['close'][2267:]
w = []
x = []
y = []
for i in range(len(f)):
w.append(indexes[i])
x.append(xcnn_y_validation[i])
y.append(pre[i])
x = np.array(x)
y = np.array(y)
S_dataset = pd.DataFrame({
'time':w,
'real signal':x,
'signal':y,
})
# + id="v8LRXfqxrVhL" colab_type="code" outputId="cc044681-09ab-4d08-c911-4786e159c85d" executionInfo={"status": "ok", "timestamp": 1587661560388, "user_tz": -480, "elapsed": 50342, "user": {"displayName": "\u8cc7\u7ba1\u78a9\u58eb\u73ed\u9673\u90c1\u84c1", "photoUrl": "", "userId": "16498330763476508006"}} colab={"base_uri": "https://localhost:8080/", "height": 47}
S_dataset.tail(16)
# + id="Mmrwk6VGsEwq" colab_type="code" colab={}
|
experiments/class3/[0_BHS_1101]Bayesian_CNN8_4_Rate_of_return.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Dependencies and Setup
# %matplotlib inline
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
# Hide warning messages in notebook
import warnings
warnings.filterwarnings('ignore')
# File to Load (Remember to Change These)
mouse_drug_data_to_load = "data/mouse_drug_data.csv"
clinical_trial_data_to_load = "data/clinicaltrial_data.csv"
# Read the Mouse and Drug Data and the Clinical Trial Data
# Combine the data into a single dataset
# Display the data table for preview
# -
# ## Tumor Response to Treatment
# +
# Store the Mean Tumor Volume Data Grouped by Drug and Timepoint
# Convert to DataFrame
# Preview DataFrame
# -
# +
# Store the Standard Error of Tumor Volumes Grouped by Drug and Timepoint
# Convert to DataFrame
# Preview DataFrame
# -
# +
# Minor Data Munging to Re-Format the Data Frames
# Preview that Reformatting worked
# -
# +
# Generate the Plot (with Error Bars)
# Save the Figure
# -
# Show the Figure
plt.show()
# ## Metastatic Response to Treatment
# +
# Store the Mean Met. Site Data Grouped by Drug and Timepoint
# Convert to DataFrame
# Preview DataFrame
# -
# +
# Store the Standard Error associated with Met. Sites Grouped by Drug and Timepoint
# Convert to DataFrame
# Preview DataFrame
# -
# +
# Minor Data Munging to Re-Format the Data Frames
# Preview that Reformatting worked
# -
# +
# Generate the Plot (with Error Bars)
# Save the Figure
# Show the Figure
# -
# ## Survival Rates
# +
# Store the Count of Mice Grouped by Drug and Timepoint (W can pass any metric)
# Convert to DataFrame
# Preview DataFrame
# -
# +
# Minor Data Munging to Re-Format the Data Frames
# Preview the Data Frame
# -
# +
# Generate the Plot (Accounting for percentages)
# Save the Figure
# Show the Figure
plt.show()
# -
# ## Summary Bar Graph
# +
# Calculate the percent changes for each drug
# Display the data to confirm
# -
# +
# Store all Relevant Percent Changes into a Tuple
# Splice the data between passing and failing drugs
# Orient widths. Add labels, tick marks, etc.
# Use functions to label the percentages of changes
# Call functions to implement the function calls
# Save the Figure
# Show the Figure
fig.show()
# -
|
Pymaceuticals/pymaceuticals_starter.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
def add_num(n1,n2) :
return n1+n2
def mul_num(*args) :
#args = tuple
result = 1
for i in args :
result *= i
return result
|
_python2/module_gyeongwon.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernel_info:
# name: python3
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
from matplotlib import style
style.use('fivethirtyeight')
import matplotlib.pyplot as plt
from flask import Flask
import numpy as np
import pandas as pd
import datetime as dt
# # Reflect Tables into SQLAlchemy ORM
# Python SQL toolkit and Object Relational Mapper
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func, inspect
engine = create_engine("sqlite:///Resources/hawaii.sqlite")
# reflect an existing database into a new model
Base = automap_base()
# reflect the tables
Base.prepare(engine, reflect=True)
Base.classes.keys()
Measurement = Base.classes.measurement
Station = Base.classes.station
# +
inspector = inspect(engine)
columns = inspector.get_columns('measurement')
for column in columns:
print(column["name"], column["type"])
# -
# Create our session (link) from Python to the DB
session = Session(engine)
# # Exploratory Climate Analysis
# +
# Design a query to retrieve the last 12 months of precipitation data and plot the results
date = dt.datetime(2016, 8, 22)
results = session.query(Measurement.prcp, Measurement.date).\
order_by(Measurement.date.asc()).filter(Measurement.date > date).all()
# +
# storing results in a list
date = [result[1] for result in results]
rainfall = [(result[0]) for result in results]
# converting results to dataframe
df = pd.DataFrame(results, columns =["Rainfall","Date"])
df["Rainfall"] = df["Rainfall"]/2.2
df = df.fillna(value = 0).groupby("Date").sum().sort_values(by="Date")
# -
xticks = ["23rd August, 2016","October 22nd, 2016","December 21st, 2016","February 20th, 2017","April 21st, 2017","June 20th, 2017","August 18th, 2020"]
df.plot.bar(title="Rainfall")
plt.xticks(np.arange(366, step = 60), xticks, rotation=90, fontsize = 8)
plt.ylim(top=7)
plt.show()
# Use Pandas to calcualte the summary statistics for the precipitation data
df.describe()
columns = inspector.get_columns('station')
for column in columns:
print(column["name"], column["type"])
results = session.query(Station.id, Station.name)
# What are the most active stations? (i.e. what stations have the most rows)?
# List the stations and the counts in descending order.
station_results = session.query(Measurement.station, Measurement.prcp).all()
station_df = pd.DataFrame(station_results, columns =["id","Count"]).groupby('id').count()
station_df.sort_values(by = "Count", ascending= False)
# Design a query to show how many stations are available in this dataset?
len(station_df)
# +
# Using the station id from the previous query, calculate the lowest temperature recorded,
# highest temperature recorded, and average temperature of the most active station?
max_query = session.query(Measurement.station, func.max(Measurement.tobs)).\
filter(Measurement.station == "USC00519281")
maximum = [result[1] for result in max_query]
min_query = session.query(Measurement.station, func.min(Measurement.tobs)).\
filter(Measurement.station == "USC00519281")
minimum = [result[1] for result in min_query]
avg_query = session.query(Measurement.station, func.avg(Measurement.tobs)).\
filter(Measurement.station == "USC00519281")
average = [result[1] for result in avg_query]
print(f"For station USC00519281, the average temperature (F) was {round(average[0],2)}, the minimum {minimum[0]} and the maximum {maximum[0]}.")
# +
# Choose the station with the highest number of temperature observations.
# Query the last 12 months of temperature observation data for this station and plot the results as a histogram
date = dt.datetime(2016, 8, 23)
temp_query = session.query(Measurement.station, Measurement.tobs).\
filter(Measurement.station == "USC00519281").\
filter(Measurement.date > date).all()
temperatures = [result[1] for result in temp_query]
# -
bins = [58, 60, 62 , 64, 66, 68, 70 ,72, 74, 76, 78, 80, 83]
plt.hist(temperatures, bins)
plt.title("Histogram of Temperature")
plt.ylabel("Count")
plt.xlabel("Temperature")
|
climate_starter.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Python OOPS-Magic Methods In Classes
### All the class variables are public
### Car Blueprint
class Car():
def __new__(self,windows,doors,enginetype):
print("The object has started getting initialized")
def __init__(self,windows,doors,enginetype):
self.windows=windows
self.doors=doors
self.enginetype=enginetype
def __str__(self):
return "The object has been initialized"
def __sizeof__(self):
return "This displays size of the object"
def drive(self):
print("The Person drives the car")
c=Car(4,5,"Diesel")
print(c)
c.__sizeof__()
dir(c)
|
All about Python/Advance Python Series-Magic Methods In Classes.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="IyHwxMlFiRvv"
# ### Zadania
#
# 1. Dodać GPU
# + id="A6hffZjUcajW"
import torch
import numpy as np
import matplotlib.pyplot as plt
# + id="H33pSMkXf68U"
dtype = torch.float
device = torch.device("cpu")
N, D_in, H, D_out = 16, 4, 0, 1
# Create random input and output data
x_numpy = np.array( [[0., 0., 0., 1.],
[1., 0., 0., 1.],
[0., 1., 0., 1.],
[0., 0., 1., 1.],
[1., 1., 0., 1.],
[1., 0., 1., 1.],
[0., 1., 1., 1.],
[1., 1., 1., 1.],
[0., 0., 0., 0.],
[1., 0., 0., 0.],
[0., 1., 0., 0.],
[0., 0., 1., 0.],
[1., 1., 0., 0.],
[1., 0., 1., 0.],
[0., 1., 1., 0.],
[1., 1., 1., 0.]])
x = torch.from_numpy(x_numpy).float()
print(x)
# + id="hOCO9CINiel9"
y_numpy = np.array( [[1.],
[1.],
[1.],
[1.],
[1.],
[1.],
[1.],
[1.],
[0.],
[0.],
[0.],
[0.],
[0.],
[0.],
[0.],
[0.]])
y = torch.from_numpy(y_numpy).float()
# + id="NhbWLwe3j8Dj"
w = torch.randn(D_in, D_out, device=device, dtype=dtype, requires_grad=True)
print(w)
# + id="OiVljZvUh8PH"
learning_rate = 1e-4
loss_list = []
for t in range(5):
y_pred = x.mm(w)
loss = (y_pred - y).pow(2).sum()
loss_list.append(loss.item())
loss.backward()
with torch.no_grad():
w -= learning_rate * w.grad
w.grad.zero_()
# + id="T63x3fY2ls24"
plt.plot(loss_list, label = 'loss')
plt.legend()
plt.show()
# + id="Khsdv_l6od5-"
|
first_pytorch.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: weather-python3
# language: python
# name: weather-python3
# ---
# # Spherical samplings for weather prediction
# +
# Cartopy uses deprecated mpl features (MatplotlibDeprecationWarning).
import warnings
from matplotlib.cbook import MatplotlibDeprecationWarning
warnings.simplefilter("ignore", MatplotlibDeprecationWarning)
import numpy as np
import scipy.spatial
from scipy.spatial.distance import pdist, squareform
from scipy.spatial import SphericalVoronoi, geometric_slerp
from scipy import sparse
import pygsp as pg
import healpy as hp
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import proj3d
import cartopy.crs as ccrs
# +
def plot_mollweide(graph, signal=None, ax=None, **kwargs):
crs = ax.projection
ax.coastlines()
ax.set_global()
graph.set_coordinates('sphere', dim=2)
graph.coords *= 180/np.pi
graph.coords = crs.transform_points(ccrs.Geodetic(), *graph.coords.T)[:, :2]
graph.plot(signal, ax=ax, **kwargs)
def plot_spherical_graph(graph):
print(graph)
fig = plt.figure(figsize=(17, 5))
ax = fig.add_subplot(1, 3, 1, projection='3d')
graph.set_coordinates('sphere', dim=3)
graph.plot(indices=True, ax=ax, title='3D')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
ax = fig.add_subplot(1, 3, 2)
graph.set_coordinates('sphere', dim=2)
graph.plot(indices=True, ax=ax, title='Equirectangular projection')
ax.set_xlabel('longitude [radians]')
ax.set_ylabel('latitude [radians]')
# ax = fig.add_subplot(1, 3, 3, projection='mollweide')
# graph.plot(indices=True, ax=ax, title='Mollweide projection (equi-area)')
ax = fig.add_subplot(1, 3, 3, projection=ccrs.Mollweide())
plot_mollweide(graph, ax=ax, indices=True, title='Mollweide projection')
fig.tight_layout()
# -
# ## 1 Spherical grids
#
# The sphere graphs are created with the [PyGSP](https://pygsp.readthedocs.io).
# If the data is created from another tool, it is important to make sure the vertices/cells are ordered in the same way.
# An easy way to check is to plot the data on the sampled sphere with `graph.plot(myfield)`.
# ### 1.1 Equiangular (Driscoll-Healy, Clenshaw-Curtis)
#
# * Resolution parameter: number of isolatitude rings $N_{lat}$ (and pixels per ring, often $N_{lon} = 2 N_{lat}$).
# * Number of pixels: $N_{pix} = N_{lat} \times N_{lon}$.
# * Number of pixels around the equator: $N_{lon}$.
# Illustration.
graph = pg.graphs.SphereEquiangular(4, 8, poles=0)
#graph = pg.graphs.SphereEquiangular(4, 8, poles=2)
plot_spherical_graph(graph)
# ### 1.2 (Reduced) Gauss-Legendre
#
# ECMWF: octahedral reduced Gaussian grid, named `O320` for $N=320$.
#
# * Resolution parameter: number of isolatitude rings $N_{lat} = 2N$.
# * Number of pixels: $4N(N+9)$.
# * Number of pixels around the equator: $4N+16$.
#
# References:
# * <https://confluence.ecmwf.int/display/FCST/Introducing+the+octahedral+reduced+Gaussian+grid>
# * <https://confluence.ecmwf.int/display/OIFS/4.2+OpenIFS%3A+Octahedral+grid>
# Illustration.
graph = pg.graphs.SphereGaussLegendre(6, nlon='ecmwf-octahedral', k=10)
#graph = pg.graphs.SphereGaussLegendre(6, k=8)
plot_spherical_graph(graph)
# ### 1.3 HEALPix
#
# * Resolution parameter: number of subdivisions $L$ ($N_{side}$).
# * Number of pixels: $12 L^2$.
# * Number of pixels around the equator: $4 L$.
# Illustration.
graph = pg.graphs.SphereHealpix(2, k=8)
#graph = pg.graphs.SphereHealpix(2, k=8, nest=True)
plot_spherical_graph(graph)
# Compare with healpy (geographical vs astrophysical flip).
hp.mollview(graph.signals['lon'], flip='geo')
# Percentage of the sphere attainable by a filter.
# The number of neighbors k is proportional to area.
kernel_size = 3
G = pg.graphs.SphereHealpix(16, k=40)
(G.L**(kernel_size-1)).nnz / G.N**2 *100
# ### 1.4 Icosahedral
#
# * Resolution parameter: number of subdivisions $L$.
# * Number of pixels: $10 L^2 + 2$ (vertices, hexagonal cells, `dual=False`) or $20 L^2$ (faces, triangular cells, `dual=True`).
# * Number of pixels around the equator: $\approx 4L$ or $5L$.
# * The subdivided icosahedron has no prefered orientation, nor isolatitude rings.
# Illustration.
graph = pg.graphs.SphereIcosahedral(2, dual=False)
#graph = pg.graphs.SphereIcosahedral(2, dual=True)
plot_spherical_graph(graph)
# Distances between pixels become less and less constant as resolution increases.
graph = pg.graphs.SphereIcosahedral(8, dual=True, k=3)
dist = squareform(pdist(graph.coords))
dist *= graph.A.toarray()
dist = dist.flatten()
dist = dist[dist!=0]
plt.hist(dist, bins=100);
# ### 1.5 Cubed-sphere
#
# Used by the [US Global Forecasting Model](https://www.gfdl.noaa.gov/fv3/fv3-grids/).
#
# * Resolution parameter: number of subdivisions $L$.
# * Number of pixels: $6L^2$.
# * Number of pixels around the equator: $4L$.
graph = pg.graphs.SphereCubed(3, 'equiangular')
#graph = pg.graphs.SphereCubed(3, 'equidistant')
plot_spherical_graph(graph)
# ## 2 Resolutions
#
# Comparison:
# 1. Same average resolution (area, sqrt(area)=angle) <=> same number of pixels.
# 2. Same average resolution near equator (equatorial band) -> different for non-uniform samplings.
#
# Comments:
# * All pixels in HEALPix have the same area. The Icosahedral and reduced Gauss-Legendre are mostly equiarea. The Equiangular is not.
#
# Procedure:
# 1. Choose the number of subdivisions for HEALPix and Icosahedral (as they are the least flexible ones).
# 2. Compute the resulting number of pixels (averaged between the two).
# 3. Choose parameters for Equiangular and Gauss-Legendre to approach that target number of pixels.
# 4. Add another Equiangular with 50% more pixels. It will have about the same resolution as the others at the equator.
# +
def params2npix(sampling, params):
if sampling == 'equiangular':
nlat, nlon = params
return nlat*nlon
elif sampling == 'gaussian':
nlat = params
assert (nlat % 2) == 0
nlat //= 2
return 4 * nlat * (nlat+9)
elif sampling == 'healpix':
subdivisions = params
return 12 * subdivisions**2
elif sampling == 'icosahedral':
subdivisions = params
return 10 * subdivisions**2 + 2
elif sampling == 'cubed':
subdivisions = params
return 6 * subdivisions**2
def npix2params(sampling, npix):
if sampling == 'equiangular':
nlat = round(np.sqrt(npix/2))
return nlat, 2*nlat
elif sampling == 'gaussian':
a, b, c = 4, 36, -npix
sol = (-b + np.sqrt(b**2 - 4*a*c)) / (2*a)
nlat = 2*sol
return nlat
elif sampling == 'healpix':
subdivisions = np.sqrt(npix / 12)
return subdivisions
elif sampling == 'icosahedral':
subdivisions = np.sqrt((npix-2) / 10)
return subdivisions
elif sampling == 'cubed':
subdivisions = np.sqrt(npix / 6)
return subdivisions
assert npix2params('equiangular', params2npix('equiangular', (100, 200))) == (100, 200)
assert npix2params('gaussian', params2npix('gaussian', 80)) == 80
assert npix2params('healpix', params2npix('healpix', 5)) == 5
assert npix2params('icosahedral', params2npix('icosahedral', 5)) == 5
assert npix2params('cubed', params2npix('cubed', 8)) == 8
# +
def npix2res(npix, height=1):
radius = 6371 # radius of the Earth
height = 2 * height * radius
return np.sqrt(2*np.pi*radius*height/npix)
def plot_resolutions(graphs):
# TODO: smooth with non-square window, e.g., a Gaussian.
avg = np.pi/180*12.3456789
bins = np.linspace(avg/2, np.pi/2-avg/2, 100)
hist = np.empty_like(bins)
fig, ax = plt.subplots(figsize=(10, 8))
for graph in graphs:
lat = abs(graph.signals['lat'])
for i, bin in enumerate(bins):
hist[i] = np.sum((lat >= bin-avg/2) & (lat <= bin+avg/2))
hist = npix2res(hist, np.sin(bins+avg/2) - np.sin(bins-avg/2))
label = f'{graph.__class__.__name__} ({graph.N} pixels, {npix2res(graph.N):.0f} km, {np.sqrt(180*360/graph.N):.2f}°)'
ax.plot(bins/np.pi*180, hist, '.', label=label)
ax.axhline(npix2res(graph.N), linestyle='--', color='grey', zorder=3)
ax.legend()
ax.set_xlabel('latitude [°]')
ax.set_ylabel('mean resolution [km]')
# -
# **Target 1**: 5° resolution on average ([WeatherBench](https://github.com/pangeo-data/WeatherBench) is 5.625°).
# +
npix = (params2npix('healpix', 16) + params2npix('icosahedral', 16)) / 2
print(f'target: {npix:.0f} pixels')
print(npix2params('cubed', npix))
print(npix2params('gaussian', npix))
print(npix2params('equiangular', npix))
print(npix2params('equiangular', npix*1.5))
plot_resolutions([
pg.graphs.SphereHealpix(16),
pg.graphs.SphereIcosahedral(16),
pg.graphs.SphereCubed(22),
pg.graphs.SphereGaussLegendre(45, nlon='ecmwf-octahedral'), # ECMWF uses even numbers of rings only
# pg.graphs.SphereEquiangular(32, 64), # WeatherBench
pg.graphs.SphereEquiangular(38, 76),
pg.graphs.SphereEquiangular(46, 92),
])
# -
# **Target 2**: 100 km resolution on average.
#
# * But let's see how far we can go before the GPU memory is filled.
# * For cosmology, we could train with a single GPU on HEALPix with 10 subdivisions, i.e., ~12M pixels or a resolution of ~6.4km on the Earth.
# But it was for classification, hence the NN had no decoder.
# * The ECMWF's IFS HRES currently runs on a reduced (octahedral) Gaussian grid of resolution O1280, i.e., ~6M pixels or a resolution of ~8.8km on the Earth.
# * ERA5 is on a reduced (linear) Gaussian grid of resolution N320 (as older IFS), which should correspond to a resolution of ~32km.
print(npix2res(params2npix('healpix', 10)))
print(npix2res(params2npix('gaussian', 2*1280)))
# +
npix = (params2npix('healpix', 64) + params2npix('icosahedral', 64)) / 2
print(f'target: {npix:.0f} pixels')
print(npix2params('cubed', npix))
print(npix2params('gaussian', npix))
print(npix2params('equiangular', npix))
print(npix2params('equiangular', npix*1.5))
plot_resolutions([
pg.graphs.SphereHealpix(64),
pg.graphs.SphereIcosahedral(64),
pg.graphs.SphereCubed(87),
pg.graphs.SphereGaussLegendre(204, nlon='ecmwf-octahedral'),
pg.graphs.SphereEquiangular(150, 300),
pg.graphs.SphereEquiangular(184, 368),
])
# -
# ## 3 Positions of pixels (cells) and vertices
# The positions of the pixels (graph vertices) are given by a PyGSP `graph`:
# 1. The 3D positions of the graph vertices that support the data are stored in `graph.coords`.
# 2. The longitude and latitude positions are stored as signals as `graph.signals['lon']` and `graph.signals['lat']`.
# 3. `graph.coords` is set to 3D coordinates with `graph.set_coordinates('sphere', dim=3)`, and 2D lat-lon coordinates with `graph.set_coordinates('sphere', dim=2)`.
# +
graph = pg.graphs.SphereEquiangular(2, 4)
#graph.set_coordinates('sphere', dim=3)
print(f'{graph.coords.shape[0]} cells embedded in {graph.coords.shape[1]} dimensions')
print(graph.coords)
graph.set_coordinates('sphere', dim=2)
print(f'{graph.coords.shape[0]} cells embedded in {graph.coords.shape[1]} dimensions')
print(graph.coords)
# -
# A general definition of a pixel is as the set of points which are closest to a center.
# Samplings can however define pixels differently, as HEALPix.
#
# Assuming the graph vertices are at the center of cells supporting the data, those cells are given by the [Voronoi diagram](https://en.wikipedia.org/wiki/Voronoi_diagram) (the dual of a [Delaunay triangulation](https://en.wikipedia.org/wiki/Delaunay_triangulation)).
# SciPy can compute a Voronoi diagram and give the positions of the Voronoi vertices at which the Voronoi cells intersect.
graph.set_coordinates('sphere', dim=3)
graph = pg.graphs.SphereEquiangular(2, 4)
sv = SphericalVoronoi(graph.coords, radius=1, center=[0, 0, 0])
print(f'{sv.vertices.shape[0]} vertices embedded in {sv.vertices.shape[1]} dimensions')
print(sv.vertices)
# HEALPix pixels aren't Voronoi cells.
graph = pg.graphs.SphereHealpix(1, k=8)
npix = graph.n_vertices
nside = np.sqrt(npix/12)
vertices = hp.boundaries(nside, range(npix), nest=graph.nest)
assert vertices.shape == (npix, 3, 4)
# While the HEALPix pixels aren't Voronoi pixels, it's Voronoi pixels are almost equiarea.
# +
graphs = [
pg.graphs.SphereHealpix(16),
pg.graphs.SphereIcosahedral(16),
pg.graphs.SphereCubed(22),
pg.graphs.SphereGaussLegendre(45, nlon='ecmwf-octahedral'),
pg.graphs.SphereEquiangular(38, 76),
pg.graphs.SphereRandom(2817),
]
fig, axes = plt.subplots(1, len(graphs), figsize=(3*len(graphs), 3))
for graph, ax in zip(graphs, axes):
sv = SphericalVoronoi(graph.coords, radius=1, center=[0, 0, 0])
areas = sv.calculate_areas()
np.testing.assert_allclose(areas.sum(), 4*np.pi)
ax.hist(areas, bins=100)
ax.set_title(graph.__class__.__name__)
fig.tight_layout()
# -
graphs = [
pg.graphs.SphereHealpix(16),
pg.graphs.SphereIcosahedral(16),
pg.graphs.SphereCubed(22),
pg.graphs.SphereGaussLegendre(45, nlon='ecmwf-octahedral'),
pg.graphs.SphereEquiangular(38, 76),
pg.graphs.SphereRandom(2817),
]
fig, axes = plt.subplots(1, len(graphs), figsize=(3*len(graphs), 3))
for graph, ax in zip(graphs, axes):
G = pg.graphs.NNGraph(graph.coords, k=4, kernel=lambda d: d, kernel_width=1)
ax.hist(G.W.data, bins=100)
ax.set_title(graph.__class__.__name__)
fig.tight_layout()
# ## 4 Plotting
#
# Code from <https://scipy.github.io/devdocs/generated/scipy.spatial.SphericalVoronoi.html>.
# +
def plot(graph, sv, edges=True, sphere=True, triangles=False, regions=True, ax=None):
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
if sphere:
# plot the unit sphere for reference
u = np.linspace(0, 2*np.pi, 100)
v = np.linspace(0, np.pi, 100)
x = np.outer(np.cos(u), np.sin(v))
y = np.outer(np.sin(u), np.sin(v))
z = np.outer(np.ones_like(u), np.cos(v))
ax.plot_surface(x, y, z, color='y', alpha=0.1)
# plot generator points (graph)
graph.plot('b', 30, edges=edges, ax=ax, title='')
# plot Voronoi vertices
ax.scatter(*sv.vertices.T, c='g')
# plot Delaunay triangles (as Euclidean polygons)
# TODO: triangles' vertices are not sorted
if triangles:
t_vals = np.linspace(0, 1, 10)
for region in sv._simplices:
n = len(region)
for i in range(n):
start = sv.points[region][i]
end = sv.points[region][(i + 1) % n]
result = geometric_slerp(start, end, t_vals)
ax.plot(result[..., 0],
result[..., 1],
result[..., 2],
c='k')
# indicate Voronoi regions (as Euclidean polygons)
if regions:
sv.sort_vertices_of_regions()
t_vals = np.linspace(0, 1, 10)
for region in sv.regions:
n = len(region)
for i in range(n):
start = sv.vertices[region][i]
end = sv.vertices[region][(i + 1) % n]
result = geometric_slerp(start, end, t_vals)
# Returns a list when two vertices are at the same position.
# Happens at the poles.
result = np.asanyarray(result)
ax.plot(result[..., 0],
result[..., 1],
result[..., 2],
c='k')
graphs = [
pg.graphs.SphereHealpix(1, k=8),
pg.graphs.SphereIcosahedral(1),
pg.graphs.SphereCubed(2),
pg.graphs.SphereGaussLegendre(4, 8),
pg.graphs.SphereEquiangular(4, 8),
pg.graphs.SphereRandom(20),
]
fig = plt.figure(figsize=(4*len(graphs), 4))
for i, graph in enumerate(graphs):
ax = fig.add_subplot(1, len(graphs), i+1, projection='3d')
sv = SphericalVoronoi(graph.coords, radius=1, center=[0, 0, 0])
plot(graph, sv, edges=False, sphere=True, regions=True, ax=ax)
ax.axis('off')
title = graph.__class__.__name__
title += f'\n{graph.n_vertices} pixels (graph vertices)' # regions / points
title += f'\n{sv.vertices.shape[0]} vertices (Delaunay triangles)' # (region connections)'
assert sv._simplices.shape[0] == sv.vertices.shape[0]
ax.set_title(title)
# -
# ## 5 Check spectral (hence equivariance) properties
def plot_spectrum(graphs, n_eigenvalues=49, normalize=False, ax=None):
if ax is None:
fig, ax = plt.subplots(figsize=(8, 4))#figsize=(12, 8))
for graph in graphs:
graph.compute_fourier_basis(min(graph.N, n_eigenvalues))
e = graph.e / graph.e[-1] if normalize else graph.e
ax.plot(e, '.', label=f'{graph.__repr__(limit=2)}')
ax.legend()#loc='upper left')
eigenspace = 1
vline = 1
while vline <= min(n_eigenvalues, max(graph.N for graph in graphs)):
ax.axvline(vline-0.5, linestyle='--', color='grey')
eigenspace += 2
vline += eigenspace
# Comparison:
# * HEALPix is best.
# * Icosahedral and Cubed are not too bad.
# * Can be made better by using `kernel_width` from HEALPix.
# * Equiangular is really bad. Worse than random. Seems to have the eigenstructure of a grid.
# * Can be made better with an `NNGraph`.
# * They are all improved with more neighbors.
#
# TODO:
# * NNGraph: set sigma to mean(farthest neighbor) / 2
# * Same as for radius graph.
# * Mean pushes the width too far => most vertices are farther (as if uniform k ~ area ~ d²).
# * Kernel value 0.5 in the middle of the ball.
# * I remember it should have been in a paper. Shi-Malik?
k = 20
lap_type = 'combinatorial' # 'normalized' looks worse, but it hasn't been optimized for
width = pg.graphs.nngraphs.spherehealpix._OPTIMAL_KERNEL_WIDTHS[k][16]
fig, axes = plt.subplots(1, 2, figsize=(24, 5))
plot_spectrum([
pg.graphs.SphereHealpix(16, k=k, lap_type=lap_type),
pg.graphs.SphereIcosahedral(16, k=k, kernel_width=width, lap_type=lap_type),
pg.graphs.SphereCubed(22, k=k, kernel_width=width, lap_type=lap_type),
pg.graphs.SphereGaussLegendre(45, nlon='ecmwf-octahedral', k=k, kernel_width=width, lap_type=lap_type), # better for k=8, same for k=20
# pg.graphs.NNGraph(pg.graphs.SphereEquiangular(38, 76).coords, k=k, kernel_width=width, lap_type=lap_type),
# pg.graphs.SphereRandom(2817, k=k, kernel_width=width, lap_type=lap_type),
], 200, ax=axes[0])
plot_spectrum([
pg.graphs.SphereGaussLegendre(45, nlon='ecmwf-octahedral', k=k, lap_type=lap_type), # better for k=40,60, same for k=20
# pg.graphs.SphereEquiangular(38, 76, lap_type=lap_type),
pg.graphs.NNGraph(pg.graphs.SphereEquiangular(38, 76).coords, k=k, lap_type=lap_type),
pg.graphs.SphereRandom(2817, k=k, lap_type=lap_type),
], 200, ax=axes[1])
#fig.savefig('spectrum_knn_graph.png', facecolor='w', dpi=200)
# HEALPix:
# * eigenspaces degrade from well-separated to continuous
# * separation is better with more neighbors
# * the more pixels, the farther the eigenspaces are good
#nsides = [2, 4]
nsides = [8, 16]
fig, axes = plt.subplots(1, 2, figsize=(16, 4))
plot_spectrum([
# pg.graphs.SphereHealpix(nsides[0], k=4, kernel_width=1), # Faces are all quadrilaterals, but they are not equidistant. Voronoi pixels are different.
pg.graphs.SphereHealpix(nsides[0], k=8),
pg.graphs.SphereHealpix(nsides[0], k=20),
pg.graphs.SphereHealpix(nsides[0], k=40),
pg.graphs.SphereHealpix(nsides[0], k=60),
], 200, ax=axes[0], normalize=True)
plot_spectrum([
# pg.graphs.SphereHealpix(nsides[1], k=4, kernel_width=1),
pg.graphs.SphereHealpix(nsides[1], k=8),
pg.graphs.SphereHealpix(nsides[1], k=20),
pg.graphs.SphereHealpix(nsides[1], k=40),
pg.graphs.SphereHealpix(nsides[1], k=60),
], 200, ax=axes[1], normalize=True)
# k=3 is much better because there is only 1 distance, as all faces are triangles.
fig, axes = plt.subplots(1, 2, figsize=(16, 4))
plot_spectrum([
pg.graphs.SphereIcosahedral(8, dual=False, k=5),
pg.graphs.SphereIcosahedral(8, dual=False, k=6), # Faces are mostly hexagons.
pg.graphs.SphereIcosahedral(8, dual=False, k=7),
], 100, ax=axes[0])
plot_spectrum([
pg.graphs.SphereIcosahedral(8, dual=True, k=3), # Faces are all triangles.
pg.graphs.SphereIcosahedral(8, dual=True, k=4),
pg.graphs.SphereIcosahedral(8, dual=True, k=8),
], 100, ax=axes[1])
plot_spectrum([
pg.graphs.SphereIcosahedral(1, dual=True, k=19), # Fully connected.
pg.graphs.SphereIcosahedral(1, dual=True, k=3), # Triangular faces.
pg.graphs.SphereIcosahedral(1, dual=False, k=11), # Fully connected.
pg.graphs.SphereIcosahedral(1, dual=False, k=6), # Hexagonal faces.
])
# SphereCubed: equiangular is better.
# Faces are quadrilaterals, but k=4 doesn't help. Aren't they equidistant?
fig, axes = plt.subplots(1, 2, figsize=(16, 4))
plot_spectrum([
pg.graphs.SphereCubed(22, 'equidistant'),
pg.graphs.SphereCubed(22, 'equiangular'),
], 100, ax=axes[0])
plot_spectrum([
pg.graphs.SphereCubed(22, 'equidistant', k=4),
pg.graphs.SphereCubed(22, 'equiangular', k=4),
], 100, ax=axes[1])
# SphereGaussLegendre: more neighbors and reduced (more uniform) helps.
fig, axes = plt.subplots(1, 2, figsize=(16, 4))
plot_spectrum([
pg.graphs.SphereGaussLegendre(45, nlon='ecmwf-octahedral'),
pg.graphs.SphereGaussLegendre(45, nlon=90),
], 100, ax=axes[0])
plot_spectrum([
pg.graphs.SphereGaussLegendre(45, nlon='ecmwf-octahedral', k=40),
pg.graphs.SphereGaussLegendre(45, nlon=90, k=40),
], 100, ax=axes[1])
# SphereEquiangular: not better with more edges.
# Changing kernels doesn't help either.
# More neighbors do help.
G1 = pg.graphs.SphereEquiangular(38, 76)
plot_spectrum([
G1,
pg.graphs.NNGraph(G1.coords, k=20),
pg.graphs.NNGraph(G1.coords, k=40),
# pg.graphs.NNGraph(G1.coords, k=16, kernel=lambda d: 1/d, kernel_width=.25),
# pg.graphs.NNGraph(G1.coords, k=4, kernel='gaussian', kernel_width=1),
# pg.graphs.NNGraph(G1.coords, k=G1.N-1, kernel='gaussian', kernel_width=.5),
# pg.graphs.NNGraph(G1.coords, kind='radius', radius=np.pi/20),
# pg.graphs.NNGraph(G1.coords, kind='radius', radius=np.pi/10, kernel=lambda d: 1/d, kernel_width=1),
# pg.graphs.NNGraph(G1.coords, k=4, kernel=lambda d: 1/d**2, kernel_width=1),
], 100)
plot_spectrum([
pg.graphs.NNGraph(G1.coords, k=20),
], 100)
plot_spectrum([
pg.graphs.NNGraph(G1.coords, k=40),
], 100)
# ### Window function
#
# [Tegmark, An icosahedron-based method for pixelizing the celestial sphere](https://arxiv.org/pdf/astro-ph/9610094.pdf)
nside = 4
npix = 12*nside**2
w = 4*np.pi/npix * np.ones(npix)
wl = hp.anafast(w, lmax=9*nside)
plt.semilogy(wl)
wl = hp.pixwin(nside, lmax=3*nside-1)
plt.plot(wl)
# +
nside = 4
npix = 12*nside**2
l, m = 1, 1
graph = pg.graphs.SphereHealpix(nside)
lat, lon = hp.pix2ang(nside, range(npix))
ylm = scipy.special.sph_harm(l, m, lon, lat)
ylm @ w
# -
# ### Setting the edge weights
#
# * Difference should be scaled by $1/d$, to get variation-per-unit-distance
# * But longer edges should count less.
# * Integration by summing edges connected to a vertex.
# * The more edges the more exact (quadrature).
# * Constant quadrature weights if edges go in uniform directions.
# +
x = np.linspace(0, 3)
y = np.exp(-x**2)
# Taylor series.
y1 = 1 / (1 + x**2)
y2 = 1 / (1 + x**2 + x**4/2)
y3 = 1 / (1 + x**2 + x**4/2 + x**6/6)
plt.plot(x, y)
plt.plot(x, y1)
plt.plot(x, y2)
plt.plot(x, y3)
# -
graph = pg.graphs.SphereHealpix(4)
plot_spectrum([
graph,
# Not so sensible to the kernel width.
pg.graphs.SphereHealpix(4, kernel_width=0.9*graph.kernel_width), # still ok
# pg.graphs.SphereHealpix(4, kernel_width=0.6*graph.kernel_width), # very bad
# 1/d is not the solution.
#pg.graphs.NNGraph(graph.coords, kernel=lambda d: 1/d, kernel_width=graph.kernel_width),
# Taylor series.
pg.graphs.NNGraph(graph.coords, kernel=lambda d: 1/(1+d**2), kernel_width=graph.kernel_width),
#pg.graphs.NNGraph(graph.coords, kernel=lambda d: 1/(1+d**2+d**4/2), kernel_width=graph.kernel_width),
pg.graphs.NNGraph(graph.coords, kernel=lambda d: 1/(1+d**2+d**4/2+d**6/6), kernel_width=graph.kernel_width),
], 200)
# +
_OPTIMAL_KERNEL_WIDTHS = pg.graphs.nngraphs.spherehealpix._OPTIMAL_KERNEL_WIDTHS
x = np.array(list(_OPTIMAL_KERNEL_WIDTHS[8].keys()))
x = 12*x**2 # nside to npix
plt.loglog(x, list(_OPTIMAL_KERNEL_WIDTHS[8].values()))
plt.loglog(x, list(_OPTIMAL_KERNEL_WIDTHS[20].values()))
plt.loglog(x, list(_OPTIMAL_KERNEL_WIDTHS[40].values()))
plt.loglog(x, list(_OPTIMAL_KERNEL_WIDTHS[60].values()))
# width = cst / subdivisions = cst / sqrt(npix)
# width = cst * distance = cst * sqrt(area)
# weights = kernel(distances/width)
# +
graph = pg.graphs.SphereHealpix(8, kernel=lambda d: d, kernel_width=1, k=4)
#min = np.min(graph.W.toarray(), axis=0)
d = np.max(graph.W.toarray(), axis=1)
#d = np.mean(graph.W.toarray(), axis=1)
#d = np.median(graph.W.toarray(), axis=1)
plt.hist(d, bins=100);
#plt.hist(graph.W.data, bins=100);
# +
neighbors = [8, 20, 40, 60]
#neighbors = np.arange(10, 200, 5)
radius_mean = []
radius_median = []
radius_max = []
for k in neighbors:
graph = pg.graphs.SphereHealpix(8, kernel=lambda d: d, kernel_width=1, k=k)
radius_mean.append(np.mean(graph.W.data))
radius_median.append(np.median(graph.W.data))
radius_max.append(np.max(graph.W.data))
# All statistics have the same asymptotic behaviour.
plt.plot(neighbors, radius_mean/radius_mean[-1], '.-', label='mean')
plt.plot(neighbors, radius_median/radius_median[-1], '.-', label='median')
plt.plot(neighbors, radius_max/radius_max[-1], '.-', label='max')
for nside in [32, 64, 128, 256, 512, 1024]:
y = np.array([_OPTIMAL_KERNEL_WIDTHS[k][nside] for k in neighbors])
y /= y[-1]
plt.plot(neighbors, y, '.-', label=f'nside={nside}')
#x = np.array(neighbors)
#x = np.linspace(8, 60, 100)
#y = np.linspace(y[0], 1, 100)
#plt.plot(x, y, '--', label='linear', c=(0.8,)*3)
plt.legend()
# +
def nside2pixradius(nside):
nside = 8
npix = 12*nside**2
pix_area = 4*np.pi / npix
pix_radius = np.sqrt(pix_area)
return pix_radius
nside = 8
r = 4 * nside2pixradius(nside)
graph = pg.graphs.SphereHealpix(nside, kind='radius', radius=r)
plt.hist(graph.d, bins=100);
# -
# * On a quasi-uniform sampling, a kNN graph is quasi a radius graph, with the radius given by the farthest connected pair of vertices.
# * `radius` grows as `sqrt(neighbors)`, and `area=radius**2` as `neighbors`.
# +
nside = 8
radiuses = np.linspace(1, 8, 20) * nside2pixradius(nside)
radius_mean = []
radius_median = []
radius_max = []
neighbors = []
neighbors_std = []
for r in radiuses:
graph = pg.graphs.SphereHealpix(nside, kernel=lambda d: d, kernel_width=1, kind='radius', radius=r)
neighbors.append(np.mean(graph.d))
neighbors_std.append(np.std(graph.d))
radius_mean.append(np.mean(graph.W.data))
radius_median.append(np.median(graph.W.data))
radius_max.append(np.max(graph.W.data))
#plt.plot(neighbors, radius_mean, '.-', label='mean')
#plt.plot(neighbors, radius_median, '.-', label='median')
#plt.plot(neighbors, radius_max, '.-', label='max')
plt.plot(neighbors, radius_mean/radius_mean[-1], '.-', label='mean')
plt.plot(neighbors, radius_median/radius_median[-1], '.-', label='median')
plt.plot(neighbors, radius_max/radius_max[-1], '.-', label='max')
area = np.array(radius_mean)**2
plt.plot(neighbors, area/area[-1], '.-', label='area')
#plt.plot(neighbors, radius_max/radius_max[-1], '.-', label='max')
#plt.plot(radiuses, neighbors, '.-')
plt.plot(neighbors, radiuses, '.-', label='radius')
for nside in [32, 64, 128, 256, 512, 1024]:
neighbors = [8, 20, 40, 60]
y = np.array([_OPTIMAL_KERNEL_WIDTHS[k][nside] for k in neighbors])
y /= y[-1] / 0.6
plt.plot(neighbors, y, '.-', label=f'nside={nside}')
plt.legend()
# -
# The distribution of #neighbors is well concentrated.
#plt.plot(radiuses, neighbors, '.-')
plt.plot(radiuses, neighbors_std, '.-')
# +
k = 40
nside = 8
npix = 12*nside**2
G1 = pg.graphs.SphereHealpix(16, k=k)
# Makes it better.
G2 = pg.graphs.SphereIcosahedral(8, k=k)
G3 = pg.graphs.SphereIcosahedral(8, k=k, kernel_width=G1.kernel_width)
G2 = pg.graphs.SphereIcosahedral(8, dual=True, k=k)
G3 = pg.graphs.SphereIcosahedral(8, dual=True, k=k, kernel_width=G1.kernel_width)
G2 = pg.graphs.SphereCubed(11, k=k)
G3 = pg.graphs.SphereCubed(11, k=k, kernel_width=G1.kernel_width)
G2 = pg.graphs.SphereGaussLegendre(45, 'ecmwf-octahedral', k=k)
G3 = pg.graphs.SphereGaussLegendre(45, 'ecmwf-octahedral', k=k, kernel_width=G1.kernel_width)
# Makes it worse.
#G2 = pg.graphs.SphereGaussLegendre(20, k=k)
#G3 = pg.graphs.SphereGaussLegendre(20, k=k, kernel_width=G1.kernel_width)
#G2 = pg.graphs.NNGraph(pg.graphs.SphereEquiangular(20).coords, k=k)
#G3 = pg.graphs.NNGraph(pg.graphs.SphereEquiangular(20).coords, k=k, kernel_width=G1.kernel_width)
#G4 = pg.graphs.SphereIcosahedral(8, k=6)
print(G1)
print(G2)
print(G3)
#print(G4)
plot_spectrum([
G1,
# G2,
G3,
# G4,
], 100)
# -
# ### Vertex weights as areas
#
# Can be better or worse.
# +
# Makes it a bit better.
graph = pg.graphs.SphereEquiangular(10)
# Makes it worse.
graph = pg.graphs.NNGraph(pg.graphs.SphereEquiangular(10).coords, k=10)
graph = pg.graphs.SphereGaussLegendre(20, k=20)
# Not much change (quasi-equiarea).
graph = pg.graphs.SphereIcosahedral(8)
graph = pg.graphs.SphereHealpix(8)
graph = pg.graphs.SphereCubed(8, k=20)
#plot_spectrum([graph])
sv = SphericalVoronoi(graph.coords)
areas = sv.calculate_areas()
plt.plot(areas, '.')
I = np.identity(len(areas))
D = np.diag(areas)
Di = np.diag(1/areas)
eI, UI = scipy.linalg.eigh(graph.L.toarray(), I)
eD, UD = scipy.linalg.eigh(graph.L.toarray(), D)
eDi, UDi = scipy.linalg.eigh(graph.L.toarray(), Di)
n = 100
plt.figure(figsize=(18, 4))
plt.plot(eI[:n], '.')
plt.plot(eD[:n]*np.mean(areas), '.')
#plt.plot(eDi[:n]/np.mean(areas), '.')
# -
# ### Density invariant graph Laplacian
#
# From <NAME> Coifman.
#
# $$
# \widetilde{W} = D^{-1} W D^{-1} \\
# \widetilde{L} = I - D^{-1} \widetilde{W}
# $$
#
# * Doesn't seem to help either.
# * The spectrum is even worse.
# * Same for the embedding of a non-uniformly sampled circle.
# * Maybe it only works for very smooth variations of density, as shown in figure 2.5 in Lafon's thesis (p. 35)
# * (Faster convergence when computing eigenvectors with $D$ as mass matrix.)
# +
k = 20
graph1 = pg.graphs.SphereEquiangular(38, 76)
graph2 = pg.graphs.NNGraph(graph1.coords, k=k)
def normalize(W, d):
Dinv = np.diag(1/d)
return Dinv @ W @ Dinv
W = normalize(graph2.W, graph2.dw)
graph3 = pg.graphs.Graph(W)
# -
plot_spectrum([graph1, graph2, graph3], 100, normalize=True)
# The degree (density/area) concentrates.
fig, axes = plt.subplots(1, 2, figsize=(10, 4))
axes[0].hist(graph2.dw, bins=20);
axes[1].hist(graph3.dw, bins=20);
# Combinatorial vs random-walk Laplacian.
# They look mostly the same.
# +
D = np.diag(graph3.dw)
Dinv = np.diag(1/graph3.dw)
L = D - W
#e, U = sparse.linalg.eigsh(L, k=100, which='SM')
#e, U = sparse.linalg.eigsh(L, M=np.identity(graph3.N), k=100, which='SM')
e, U = sparse.linalg.eigsh(L, M=D, k=100, which='SM')
#e, U = sparse.linalg.eigsh(L, M=Dinv, k=100, which='SM')
plt.plot(graph3.e * 10, '.')
plt.plot(e, '.')
# Should be same as random-walk L.
e, U = sparse.linalg.eigs(Dinv@L, k=100, which='SM')
#e, U = sparse.linalg.eigs(Dinv@W, k=100, which='LM')
plt.plot(e, '.')
# -
# The problem is exacerbated on a simple circle too.
# +
fig, axes = plt.subplots(1, 3, figsize=(12, 4))
G1 = pg.graphs.RandomRing(100, seed=0)
G1.plot(ax=axes[0], title='original data')
G1.set_coordinates('laplacian_eigenmap3D')
G1.plot(ax=axes[1], title='standard embedding')
W = normalize(G1.W, G1.dw)
G2 = pg.graphs.Graph(W)
G2.set_coordinates('laplacian_eigenmap3D')
G2.plot(ax=axes[2], title="Lafon's density-invariant normalization")
I = np.identity(G2.N)
D = np.diag(G2.dw)
Dinv = np.diag(1/G2.dw)
L = D - W
e, U = sparse.linalg.eigsh(L, M=I, k=3, which='SM')
e, U = sparse.linalg.eigsh(L, M=D, k=3, which='SM')
axes[2].scatter(*U[:, 1:3].T)
# Same as above.
e, U = sparse.linalg.eigs(Dinv@L, k=3, which='SM')
e, U = sparse.linalg.eigs(I - Dinv@W, k=3, which='SM')
#e, U = sparse.linalg.eigs(Dinv@W, k=3, which='LM')
axes[2].scatter(*U[:, 1:3].T);
# -
# Kernel width recommended by Lafon in his thesis.
# EVD doesn't converge.
# +
G = pg.graphs.NNGraph(graph1.coords, k=k, kernel=lambda d: 1/d, kernel_width=1)
d = 1 / G.W.max(0).toarray().squeeze()
width = np.mean(d**2)
print(graph2)
print(width)
graph4 = pg.graphs.NNGraph(graph1.coords, k=k, kernel='gaussian', kernel_width=width)
#plot_spectrum([graph2, graph4], 100, normalize=True)
# -
# ### Mesh Laplacian
#
# 1. Create a triangular mesh from a pixelized sphere (if not given).
# * Hard in general but easy on the sphere because it's convex.
# 2. Compute the cotan Laplacian = lumped FEM Laplacian from the mesh.
# * Use trimesh or igl.
# * Both need an $n \times 3$ matrix of vertices (entries are coordinates), and an $f \times 3$ matrix of faces (entries are vertex index).
# 3. Impact of the (diagonal) mass matrix.
#
# Notes:
# * Much better operator for non-uniform samplings. Not much change for HEALPix.
# * Convolutions on equiangular should work as well as on HEAPlix.
# * The eigenvalues have the correct value of $\ell (\ell + 1)$.
# * Quality of convolution on discrete spheres still depends on how much they capture the geometry of the continus sphere.
#
# Todo:
# * integrate in PyGSP as `pg.graphs.Mesh(vertices, faces)`
# * needs the PyGSP to accept arbitrary vertex metrics
# * the graph Laplacians (combinatorial, normalized, RW) could be abstracted out
import igl
#from meshplot import plot, subplot, interact
# +
graph = pg.graphs.SphereHealpix(4)
graph = pg.graphs.SphereEquiangular(6, 12)
#graph = pg.graphs.SphereRandom(200)
graph.compute_fourier_basis()
def triangulate(graph):
sv = SphericalVoronoi(graph.coords)
assert sv.points.shape[0] == graph.n_vertices
return sv.points, sv._simplices
v, f = triangulate(graph)
#igl.write_triangle_mesh('mesh.obj', v, f)
print(f'{v.shape[0]} vertices and {f.shape[0]} triangles')
# + active=""
# import trimesh
# mesh = trimesh.Trimesh(v, f)
#
# # What Laplacian is that?
# L1 = trimesh.smoothing.laplacian_calculation(mesh)
# assert len((L1 - L1.T).data) != 0
# e, U = np.linalg.eig(L1.toarray())
#
# fig, axes = plt.subplots(1, 2, figsize=(8, 4))
# axes[0].imshow(L1.toarray())
# axes[1].plot(e, '.');
# +
def compute_cotan_laplacian(graph, return_mass=False):
v, f = triangulate(graph)
L = -igl.cotmatrix(v, f)
assert len((L - L.T).data) == 0
M = igl.massmatrix(v, f, igl.MASSMATRIX_TYPE_VORONOI)
# M = igl.massmatrix(v, f, igl.MASSMATRIX_TYPE_BARYCENTRIC)
if return_mass:
# Eliminate zeros for speed (appears for equiangular).
L.eliminate_zeros()
return L, M
else:
Minv = sparse.diags(1 / M.diagonal())
return Minv @ L
L, M = compute_cotan_laplacian(graph, return_mass=True)
# +
# Gradient as 3D vector attached to triangle.
# Weighted average of three 1-forms (edge-valued functions).
# Boundary / difference operator (but going to face vectors).
B = igl.grad(v, f)
# Triangle area = triangle (dual vertex) weights.
Mf = igl.doublearea(v, f) / 2
Mf = sparse.diags(np.hstack(3*[Mf]))
L1 = B.T @ Mf @ B
np.testing.assert_allclose(L1.toarray(), L.toarray(), atol=1e-10)
np.unique(np.asarray((B != 0).sum(1)))
# -
fig, axes = plt.subplots(1, 3, figsize=(12, 4))
im = axes[0].imshow(graph.L.toarray())
fig.colorbar(im, ax=axes[0])
im = axes[1].imshow(L.toarray())
fig.colorbar(im, ax=axes[1])
im = axes[2].imshow((graph.L - L).toarray())
fig.colorbar(im, ax=axes[2])
axes[0].set_title(f'kNN graph ({graph.L.nnz} non-zeros)')
axes[1].set_title(f'cotan mesh ({L.nnz} non-zeros)');
axes[2].set_title(f'difference');
# +
fig, axes = plt.subplots(1, 3, figsize=(12, 4))
# Laplacian matrices.
axes[0].plot(graph.L.diagonal(), '.')
axes[0].plot(L.diagonal(), '.')
# Weight matrices / metrics.
# Looks like area indeed.
axes[1].plot(M.diagonal(), '.');
axes[2].plot(Mf.diagonal(), '.');
# +
# Spectrum is great!
e, U = scipy.linalg.eigh(L.toarray(), M.toarray())
np.testing.assert_allclose(U.T @ M @ U, np.identity(graph.N), atol=1e-10)
# Mostly like ours without the mass matrix (vertex weights/metric).
#e, U = np.linalg.eigh(L.toarray())
# But that isn't as good either.
#e, U = scipy.linalg.eigh(graph.L.toarray(), M.toarray())
fig = plt.figure(figsize=(10, 4))
# Eigenvalues.
n = 50
ax = fig.add_subplot(1, 2, 1)
ax.plot(graph.e[:n] / graph.e[n]*e[n], '.', label='kNN graph')
ax.plot(e[:n], '.', label='cotan mesh')
ax.legend()
# Eigenvectors.
ax = fig.add_subplot(1, 2, 2, projection=ccrs.Mollweide())
plot_mollweide(graph, U[:,5], ax=ax, title='')
# -
L2 = np.diag(1/M.diagonal()) @ L
e2, U2 = scipy.linalg.eig(L2)
np.testing.assert_allclose(sorted(e2), e, atol=1e-10)
# +
graphs = [
pg.graphs.SphereHealpix(16),
pg.graphs.SphereIcosahedral(16),
pg.graphs.SphereCubed(22),
pg.graphs.SphereGaussLegendre(45, nlon='ecmwf-octahedral'),
pg.graphs.SphereEquiangular(38, 76),
pg.graphs.SphereRandom(2817),
]
n_eigenvalues = 200
fig, ax = plt.subplots(1, 1, figsize=(16, 4))
for graph in graphs:
L, M = compute_cotan_laplacian(graph, return_mass=True)
e, U = scipy.linalg.eigh(L.toarray(), M.toarray())
# The sparse routine (Lanzcos iteration) is much slower than the full EVD. Why?
#e, U = sparse.linalg.eigsh(L, n_eigenvalues, M, which='SM')
ax.plot(e[:n_eigenvalues], '.', label=f'{graph.__repr__(limit=2)}')
ax.legend()
eigenspace = 1
vline = 1
while vline <= min(n_eigenvalues, max(graph.N for graph in graphs)):
ax.axvline(vline-0.5, linestyle='--', color='grey')
eigenspace += 2
vline += eigenspace
#fig.savefig('spectrum_cotan_mesh.png', facecolor='w', dpi=200)
# -
# ### Filtering with the cotan Laplacian (a non-symmetric operator)
# Without metric (or with identity metric $M=I$):
# $L = U \Lambda U^\top, \ U^\top U = I, \ U^\top L U = \Lambda, \ L U = U \Lambda$
# $$
# \newcommand\U{\tilde{U}}
# \newcommand\u{\tilde{u}}
# \newcommand\L{\tilde{L}}
# \newcommand\l{\tilde{\Lambda}}
# $$
# With a metric $M$ (for vertex-valued functions):
# * Symmetric eigendecomposition: $M^{-1/2} L M^{-1/2} = U \Lambda U^\top$
# * Generalized eigendecomposition: $\L = M^{-1} L = M^{-1/2} U \Lambda U^\top M^{1/2} = \U \Lambda \U^{-1}, \ L \U = M \U \Lambda$
# * can also be seen as rescaling before and after the operator
# * Relation between the two eigenbases: $\U = M^{-1/2} U, \ \U^{-1} = U^\top M^{1/2} = \U^\top M$
# * Inverse $\U^{-1}$ is easily calculated.
# * $\U^{-1} \U = \U^\top M \U = (U^\top M^{-1/2}) M (M^{-1/2} U) = U^\top U = I$
# * Fourier basis $\U$ is orthonormal w.r.t. the metric $M$.
# * $\L$ is a symmetric operator w.r.t. the metric $M$.
# * Function $g(\L) = g(M^{-1} L) = M^{-1/2} U g(\Lambda) U^\top M^{1/2} = \U g(\Lambda) \U^{-1} = \U g(\Lambda) \U^\top M$
#
# Notes:
# * $\tilde{L}$ is sparse (same sparsity as $L$) if $M$ is diagonal.
# * Smoothest eigenvector minimizes $\lambda_i = \u_i^{-1} \L \u_i = (\u_i^\top) M (\L \u_i) = \u_i^T L \u_i$
# * inner-products (hence norms and distances) don't depend on the vertex metric
# * $\u_0$ is constant
# +
graph = pg.graphs.SphereEquiangular(6, 12)
#graph = pg.graphs.SphereHealpix(16)
L, M = compute_cotan_laplacian(graph, return_mass=True)
e, U = scipy.linalg.eigh(L.toarray(), M.toarray())
Uinv = U.T @ M
np.testing.assert_allclose(Uinv, np.linalg.inv(U), atol=1e-10)
np.testing.assert_allclose(Uinv @ U, np.identity(graph.N), atol=1e-10)
np.testing.assert_allclose(U @ Uinv, np.identity(graph.N), atol=1e-10)
np.testing.assert_allclose(U.T @ M @ U, np.identity(graph.N), atol=1e-10)
Msqrt = sparse.diags(1 / np.sqrt(M.diagonal()))
Ln = Msqrt @ L @ Msqrt
en, Un = np.linalg.eigh(Ln.toarray())
en, Un = scipy.linalg.eigh(Ln.toarray(), overwrite_a=True)
np.testing.assert_allclose(en, e, atol=1e-10)
# -
e_, U_ = scipy.linalg.eig(L.toarray(), M.toarray())
np.testing.assert_allclose(sorted(e_), e, atol=1e-10)
Minv = sparse.diags(1 / M.diagonal())
e_, U_ = scipy.linalg.eig((Minv @ L).toarray())
np.testing.assert_allclose(sorted(e_), e, atol=1e-10)
# Filter.
import sys
sys.path.append('..')
from modules import layers
import torch
# +
def delta(i, n):
signal = torch.zeros((1, n, 1))
signal[0, i] = 1
return signal
# Identity filter.
weights = torch.zeros((1, 5, 1))
weights[0, 0] = 1
Ltorch = layers.prepare_torch_laplacian(L, torch.float32)
xin = delta(graph.N//2, graph.N)
xout = layers.cheb_conv(Ltorch, xin, weights)
np.testing.assert_allclose(xout, xin)
# +
graph = pg.graphs.SphereEquiangular(12, 24)
#graph = pg.graphs.SphereHealpix(16)
laplacian = 'knn-graph'
laplacian = 'cotan-mesh'
if laplacian == 'knn-graph':
Ltorch = layers.prepare_torch_laplacian(graph.L, torch.float32)
_G = graph
_G.estimate_lmax()
elif laplacian == 'cotan-mesh':
L = compute_cotan_laplacian(graph)
Ltorch = layers.prepare_torch_laplacian(L, torch.float32)
_G = pg.graphs.Graph([[0]])
_G._lmax = layers.estimate_lmax(L)
g = pg.filters.Heat(_G, 400)
#g = pg.filters.Wave(_G, 10)
K = 30 # polynomial order / size of filters
weights = pg.filters.approximations.compute_cheby_coeff(g, K)
weights[0] /= 2
weights = weights.reshape((1, -1, 1))
weights = torch.from_numpy(weights.astype(np.float32))
xin = delta(graph.N//2, graph.N)
xout = layers.cheb_conv(Ltorch, xin, weights)
# Compare with pygsp (identity metric only).
if laplacian == 'knn-graph':
xout_pygsp = g.filter(xin.squeeze(), order=K)
np.testing.assert_allclose(xout.squeeze(), xout_pygsp, atol=1e-5)
fig = plt.figure(figsize=(12, 4))
ax = fig.add_subplot(1, 3, 1)
g.plot(ax=ax)
ax = fig.add_subplot(1, 3, 2, projection=ccrs.Mollweide())
plot_mollweide(graph, xin, ax=ax, title='')
ax = fig.add_subplot(1, 3, 3, projection=ccrs.Mollweide())
plot_mollweide(graph, xout, ax=ax, title='')
|
tutorials/spherical_grids.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.optimizers import SGD,RMSprop,adam
from keras.utils import np_utils
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import os
import theano
from PIL import Image
from numpy import *
# SKLEARN
from sklearn.utils import shuffle
from sklearn.cross_validation import train_test_split
# +
import os
from sklearn.utils import shuffle
import glob
import sys
################### DATASET HANDLING ####################
DATASET_PATH = "/home/dic/jupyter/train_Binary" #change the path to your dataset folder here
def parseDataset():
#we use subfolders as class labels
classes = [folder for folder in sorted(os.listdir(DATASET_PATH))]
#now we enlist all image paths
images = []
for c in classes:
images += ([os.path.join(DATASET_PATH, c, path) for path in os.listdir(os.path.join(DATASET_PATH, c))])
#print(images)
#shuffle image paths
images = shuffle(images, random_state=74)
#we want to use a 15% validation split
total_len=len(images)
print("total:",total_len)
vsplit = int(len(images) * 0.70) #=40
print("vsplit:",vsplit)
tsplit= int(total_len-vsplit)
print("tsplit:",tsplit)
gsplit=int(tsplit/2)
print(gsplit)
msplit=int(vsplit+gsplit)
train = images[:vsplit] #everything except the last vsplit items in the array
val = images[vsplit:msplit] #only last vsplit items from the array
test=images[msplit:]
#show some stats
print ("CLASS LABELS:", classes)
print ("TRAINING IMAGES:", len(train))
print ("VALIDATION IMAGES:", len(val))
print ("TEST IMAGES:", len(test))
return classes, train, val,test
#parse dataset
CLASSES, TRAIN, VAL,TEST = parseDataset()
# +
import cv2
import numpy as np
img_p=[]
#################### BATCH HANDLING #####################
def loadImageAndTarget(path):
#print(path)
#here we open the image and scale it to 64x64 pixels
img = cv2.imread(path)
#print(path)
img = cv2.resize(img, (22, 23))
#OpenCV uses BGR instead of RGB, but for now we can ignore that
#our image has the shape (64, 64, 3) but we need it to be (3, 64, 64)
img = np.transpose(img, (2, 1, 0))
#we want to use subfolders as class labels
label = path.split(os.sep[-1])[-2]
#print(label)
#we need to get the index of our label from CLASSES
index = CLASSES.index(label)
#allocate array for target
target = np.zeros((2), dtype='float32')
#we set our target array = 1.0 at our label index, all other entries remain zero
#Example: if label = dog and dog has index 2 in CLASSES, target looks like: [0.0, 0.0, 1.0, 0.0, 0.0]
target[index] = 1.0
#we need a 4D-vector for our image and a 2D-vector for our targets
#we can adjust array dimension with reshape
img = img.reshape(-1, 3, 22, 23)
target = target.reshape(-1, 2)
img_p.append(img)
return img, target
print(img_p)
#a reasonable size for one batch is 128
BATCH_SIZE = 200
def getDatasetChunk(split):
#get batch-sized chunks of image paths
for i in range(0, len(split), BATCH_SIZE):
yield split[i:i+BATCH_SIZE]
def getNextImageBatch(split):
#allocate numpy arrays for image data and targets
#input shape of our ConvNet is (None, 3, 22, 23)
x_b = np.zeros((BATCH_SIZE, 3, 22, 23), dtype='float32')
#output shape of our ConvNet is (None, 5) as we have 5 classes
y_b = np.zeros((BATCH_SIZE, 2), dtype='float32')
#fill batch
for chunk in getDatasetChunk(split):
ib = 0
for path in chunk:
#load image data and class label from path
x, y = loadImageAndTarget(path)
#pack into batch array
x_b[ib] = x
y_b[ib] = y
ib += 1
#instead of return, we use yield
yield x_b[:len(chunk)], y_b[:len(chunk)]
#x_train= np.array()
x_train=[]
y_train=[]
for image_batch, target_batch in getNextImageBatch(TRAIN):
x_train.append(image_batch)
# x_train.append(image_batch)
y_train.append(target_batch)
x_train = np.array(x_train)
#print(y_train)
x_test=[]
y_test=[]
for image_batch, target_batch in getNextImageBatch(TEST):
x_test.append(image_batch)
y_test.append(target_batch)
x_val=[]
y_val=[]
for image_batch, target_batch in getNextImageBatch(VAL):
x_val.append(image_batch)
y_val.append(target_batch)
# -
import pandas as pd
df_x=pd.DataFrame(x_train)
print(df_x)
df_x= data.iloc[:,1:].values.reshape
# +
from lasagne import layers
from lasagne.nonlinearities import softmax, tanh
import keras
from keras.models import Sequential, Model
from keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten
from keras.models import Sequential, Model
from keras.layers import Dense, Dropout, Flatten, Activation, Input
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K
img_rows=22
img_cols=23
num_classes=2
input_shape = (3, img_rows, img_cols)
################## BUILDING THE MODEL ###################
#def buildModel():
#this is our input layer with the inputs (None, dimensions, width, height)
#l_input = layers.InputLayer((None, 3, 22, 23))
model = Sequential()
#first convolutional layer, has l_input layer as incoming and is followed by a pooling layer
#l_conv1 = layers.Conv2DLayer(l_input, num_filters=32, filter_size=3, nonlinearity=lasagne.nonlinearities.tanh)
model.add(Conv2D(32, (3, 3), padding='same', activation='relu', input_shape=input_shape))
#l_pool = layers.MaxPool2DLayer(l_conv1, pool_size=2)
#l_drop1= layers.DropoutLayer(l_conv1, p=0.1)
model.add(Dropout(0.1))
#l_dense1 = layers.DenseLayer(l_drop1, num_units=128)
#l_conv2 = layers.Conv2DLayer(l_drop1, num_filters=128, filter_size=5, nonlinearity=lasagne.nonlinearities.rectify)
#l_conv3 = layers.Conv2DLayer(l_conv2, num_filters=256, filter_size=5, nonlinearity=lasagne.nonlinearities.rectify)
model.add(Flatten())
#l_dense2 = layers.DenseLayer(l_drop1, num_units=128)
model.add(Dense(128, activation='relu'))
#l_drop2= layers.DropoutLayer(l_dense2, p=0.25)
model.add(Dropout(0.25))
#l_dense2 = layers.DenseLayer(l_drop2, num_units=128)
model.add(Dense(128, activation='relu'))
#l_output = layers.DenseLayer(l_dense2, num_units=7, nonlinearity=lasagne.nonlinearities.softmax)
model.add(Dense(num_classes,activation='softmax', name='preds'))
#let's see how many params our net has
#print ("MODEL HAS", layers.count_params(l_output), "PARAMS")
#we return the layer stack as our network by returning the last layer
#return model
#print(model.summary())
# -
#############COMPILING#############
batch_size = 256
epochs = 10
model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
# +
model.fit(x_train, y_train,
batch_size=32, nb_epoch=10, verbose=1)
# +
X_train = np.asarray(x_train)
X_train = X_train.reshape(X_train.shape[0],1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 3)
x_val = x_val.reshape(x_val.shape[0], img_rows, img_cols, 3)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_val = x_val.astype('float32')
x_train /= 255
x_test /= 255
x_val /= 255
# +
###########Training###################
history = model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_val, y_val))
|
all_codes/keras model/keras_implementation.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
import datetime as dt
import pandas as pd
#from arcgis.gis import GIS
import numpy as np
import seaborn as sns
import json
import requests
import matplotlib.pyplot as plt
import sklearn
from sklearn import cluster
from sklearn.preprocessing import LabelEncoder
from sklearn.cluster import KMeans
from arcgis import GIS, features, GeoAccessor, GeoSeriesAccessor, geometry
from arcgis.features import SpatialDataFrame
import arcgis
gis = GIS('PRO')
map = gis.map('Raleigh, NC')
# +
# Get Data from Cityworks EURL
# Can also be used for an ArcGIS REST endpoint
url = 'https://<insert your Cityworks or AGOL Feature Service>/FeatureServer/2/query?f=json&returnGeometry=true&spatialRel=esriSpatialRelIntersects&maxAllowableOffset=38&inSR=102100&outFields=*&outSR=102100'
resp = requests.get(url=url)
data = resp.json()
dicts = [{k:v} for k,v in data.items()]
# List Type, but it's a list of dictionaries
type(data['features'])
PRCRCityworks = pd.DataFrame(data['features'])
print(PRCRCityworks.shape)
# Converting List of Dictionaries to a pandas dataframe
PRCR2 = pd.DataFrame(d['attributes'] for d in data['features'])
#print(PRCR2)
PRCR2.head()
# -
# Using LabelEncoder to add a column for Description as a number
labelencoder = LabelEncoder()
PRCR2['Description_Cat'] = labelencoder.fit_transform(PRCR2['Description'])
PRCR2.head()
# If you want to trim the columns, you can select out the ones you need this way
# but, it may be better to use "df.loc[:,['A','B']]" when conducting the analysis
# this will select out only those columns for analysis but leave the others
# selecting only cost and description columns
df1 = PRCR2[['Description','Description_Cat','WoCost','WoEquipCost','WoLaborCost','WoMatCost']]
df1.head()
# +
# data cleanup
dfnozero = PRCR2
dfnozero.loc[~(dfnozero==0).all(axis=1)]
dfnozero.loc[(dfnozero!=0).any(1)]
# remove zeros by column
dfnozero= dfnozero[dfnozero['WoCost'] != 0]
dfnozero= dfnozero[dfnozero['WoMatCost'] != 0]
dfnozero= dfnozero[dfnozero['WoLaborCost'] != 0]
dfnozero= dfnozero[dfnozero['WoEquipCost'] != 0]
print(dfnozero.shape)
dfnozero.head()
# -
# %matplotlib inline
import seaborn as sns; sns.set()
# use dataframe.loc to analyze only those columns
sns_plot = sns.pairplot(dfnozero.loc[:,['Description','WoEquipCost','WoLaborCost']], hue='Description', height=5);
sns_plot.savefig("c:/temp/output_LabMat_Pair.png")
# errors about "Data must have variance to compute a kernel density estimate" are common
# but it should still eventually plot, may take some time depending on how much data
# +
X_PRCR = dfnozero.loc[:,['WoEquipCost','WoLaborCost']]
# here is an excellent writeup on what PCA is
# https://jakevdp.github.io/PythonDataScienceHandbook/05.09-principal-component-analysis.html
# in essence, it helps you reduce the number of variables
from sklearn.decomposition import PCA
model = PCA(n_components=2)
model.fit(X_PRCR)
X_2D = model.transform(X_PRCR)
dfnozero['PCA1'] = X_2D[:, 0]
dfnozero['PCA2'] = X_2D[:, 1]
sns.lmplot("PCA1", "PCA2", hue='Description', data=dfnozero, fit_reg=False);
# in ArcGIS Pro, you may get this message: <seaborn.axisgrid.FacetGrid object at 0x000001D6DB6BF278>
# if so, download anaconda and run this from a jupyter notebook directly instead of within Pro.
# +
# K-means analysis, unsupervised clustering
# similar to unsupervised clustering on a raster, but instead of grouping pixel values
# it is grouping your tabular data based its values
#dropping non-numeric columns
dfkmeans = dfnozero[['Description_Cat','WoCost','WoEquipCost','WoLaborCost','WoMatCost']]
# Convert DataFrame to matrix
mat = dfkmeans.values
# requesting 3 clusters
km = sklearn.cluster.KMeans(n_clusters=3)
km.fit(mat)
# Get cluster assignment labels
labels = km.labels_
# Format results as a DataFrame
results = pd.DataFrame([dfkmeans.index,labels]).T
# the results go into their own dataframe that needs to be merged with the main one later, so we know
# which cluster that particular data point belongs to.
results
# +
kmeans = KMeans(n_clusters=3).fit(dfkmeans)
centroids = kmeans.cluster_centers_
print(centroids)
plt.scatter(dfkmeans['WoEquipCost'], dfkmeans['WoLaborCost'], c= kmeans.labels_.astype(float), s=50, alpha=0.5)
plt.scatter(centroids[:, 0], centroids[:, 1], c='red', s=50)
# plt.show() doesn't seem to work in ArcGIS Pro
#plt.show()
# if needed, you can save the figure https://matplotlib.org/api/_as_gen/matplotlib.pyplot.savefig.html
# -
print(dfnozero.shape)
dfnozero.head()
# join the cluster results to the dataframe
mergedDf = dfnozero.merge(results, left_index=True, right_on=0)
mergedDf
mergedDf.rename(columns = {'WOXCoordinate':'x','WOYCoordinate':'y'}, inplace = True)
mergedDf.describe()
mergedDf['cluster'] = 'cluster'+mergedDf[1].apply(str)
mergedDf['cluster']
# +
# In theory this GeoAccessor should plot the points on a map that will appear inside this pandas notebook
# I couldn't get it working despite having something similar working on a similar dataset.
# plot from x, y
#mergedDf.rename(columns = {'WOXCoordinate':'x','WOYCoordinate':'y'}, inplace = True)
#mergedDf = GeoAccessor.from_xy(mergedDf,'WOXCoordinate','WOYCoordinate',sr=2264)
#mergedDf = GeoAccessor.from_xy(mergedDf,'x','y',sr=2264)
# drop the corrupt shape column?
#mergedDf.drop(columns=['SHAPE'])
#from shapely.geometry import Point
# combine lat and lon column to a shapely Point() object
#mergedDf['geometry'] = df.apply(lambda x: Point((float(x.lon), float(x.lat))), axis=1)
#sdf = pd.DataFrame.spatial.from_xy(df=mergedDf, y_column='y', x_column='x', sr=2264)
#https://community.esri.com/thread/223454-arcgis-python-api-set-geometry-column-of-spatially-enabled-data-frame
#mergedDf['SHAPE'] = mergedDf.apply(lambda row : arcgis.geometry.Geometry({'x': row['x'], 'y': row['y']}), axis=1 )
#sdf = SpatialDataFrame(mergedDf)
#layer = gis.content.import_data(sdf, title='My Data')
#sdf = pd.DataFrame.spatial.from_xy(df=mergedDf, y_column='y', x_column='x', sr=2264)
# import the data frame into a GIS Layer
CityworksPoints = gis.content.import_data(mergedDf)
# -
# dropping columns with numeric headers that aren't needed anymore
# numeric headers cause issues when importing a CSV with XYTableToPoint
mergedDf.drop(columns=[0,1])
# converting dataframe to csv
mergedDf.to_csv('C:/temp/DataFrame.csv',index=False)
# +
# converting csv to feature class
import arcpy
arcpy.env.workspace = r"c:\temp\Scratch.gdb"
arcpy.env.overwriteOutput = True
arcpy.management.XYTableToPoint('C:/temp/DataFrame.csv', 'cityworks',
"WOXCoordinate", "WOYCoordinate","", arcpy.SpatialReference(2264))
#"x", "y","", arcpy.SpatialReference(2264))
# +
# add layer to Map in my current project (to be used within ArcGIS Pro)
aprx = arcpy.mp.ArcGISProject("CURRENT")
print(aprx.filePath)
m = aprx.listMaps("Map")[0]
m.addDataFromPath(r"c:\temp\Scratch.gdb\cityworks")
# https://pro.arcgis.com/en/pro-app/arcpy/mapping/symbology-class.htm
# https://pro.arcgis.com/en/pro-app/arcpy/mapping/uniquevaluerenderer-class.htm
# changing layer symbology
for lyr in m.listLayers():
print(lyr)
if lyr.isFeatureLayer:
sym = lyr.symbology
if hasattr(sym, 'renderer'):
if sym.renderer.type == 'SimpleRenderer':
sym.updateRenderer('UniqueValueRenderer')
sym.renderer.fields = ['cluster']
for grp in sym.renderer.groups:
for itm in grp.items:
transVal = itm.values[0][0] #Grab the first "percent" value in the list of potential values
print(transVal)
itm.symbol.color = {'RGB': [255, 0, 0, int(transVal)*50]}
itm.label = 'cluster ' + str(transVal)
lyr.symbology = sym
# +
# add the layer to the map
# this is old, and was to be used with the GeoAccessor bit above. I'm leaving it in for reference.
# map.add_layer(CityworksPoints)
# plot the map
# mergedDf.spatial.plot(map_widget=map,
# renderer_type='u',
# col='cluster')
#Display the map
# map
# +
# one way of doing a linear regression
from sklearn import linear_model
reg = linear_model.LinearRegression()
reg.fit(mergedDf[['WoLaborCost', 'WoMatCost']], mergedDf['WoEquipCost'])
# -
reg.coef_
# +
# another way of doing a linear regression and plotting the results
# this is a predictive analysis, just to show how an unsupervised technique
# can be used as input into other analyses methods later
# great write up here:
# https://towardsdatascience.com/simple-and-multiple-linear-regression-with-python-c9ab422ec29c
df_lowcost = mergedDf[mergedDf['cluster'] == 'cluster0']
df_highcost = mergedDf[mergedDf['cluster'] == 'cluster2']
# regression plot using seaborn
fig = plt.figure(figsize=(10, 7))
sns.regplot(x=df_lowcost.WoLaborCost, y=df_lowcost.WoEquipCost, color='blue', marker='+')
sns.regplot(x=df_highcost.WoLaborCost, y=df_highcost.WoEquipCost, color='magenta', marker='+')
# Legend, title and labels.
plt.legend(labels=['Low Cost', 'High Cost'])
plt.title('Relationship between Low and High Cost Work Order Clusters', size=24)
plt.xlabel('Labor Cost', size=18)
plt.ylabel('Equipment Cost', size=18);
plt.savefig("c:/temp/output_LabMat_Regression.png")
|
Unsupervised/CityworksToPro_clean.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # The Qubit
#
# This tutorial introduces you to one of the core concepts in quantum computing - the qubit, and its representation in mathematical notation and in Q# code.
#
# If you aren't familiar with [complex arithmetic](../ComplexArithmetic/ComplexArithmetic.ipynb) and [linear algebra](../LinearAlgebra/LinearAlgebra.ipynb), we recommend that you complete those tutorials first.
#
# This tutorial covers the following topics:
# * The concept of a qubit
# * Superposition
# * Vector representation of qubit states
# * Dirac notation
# * `Qubit` data type in Q#
# # The Concept of a Qubit
#
# The basic building block of a classical computer is the bit - a single memory cell that is either in state $0$ or in state $1$. Similarly, the basic building block of a quantum computer is the quantum bit, or **qubit**. Like the classical bit, a qubit can be in state $0$ or in state $1$. Unlike the classical bit, however, the qubit isn't limited to just those two states - it may also be in a combination, or **superposition** of those states.
#
# > A common misconception about quantum computing is that a qubit is always in one state or the other, we just don't know which one until we "measure" it. That is not the case. A qubit in a superposition is in a state between the states $0$ and $1$. When a qubit is measured, it is forced entirely into one state or the other - in other words, measuring it actually changes its state.
# # Matrix Representation
#
# The state of a qubit is represented by a complex vector of size 2:
#
# $$\begin{bmatrix} \alpha \\ \beta \end{bmatrix}$$
#
# Here $\alpha$ represents how "close" the qubit is to the state $0$, and $\beta$ represents how "close" the qubit is to the state $1$. This vector is normalized: $|\alpha|^2 + |\beta|^2 = 1$.
#
# $\alpha$ and $\beta$ are known as **amplitudes** of states $0$ and $1$, respectively.
# # Basis States
#
# A qubit in state $0$ would be represented by the following vector:
#
# $$\begin{bmatrix} 1 \\ 0 \end{bmatrix}$$
#
# Likewise, a qubit in state $1$ would be represented by this vector:
#
# $$\begin{bmatrix} 0 \\ 1 \end{bmatrix}$$
#
# Note that you can use scalar multiplication and vector addition to express any qubit state as a sum of these two vectors with certain weights (known as **linear combination**):
#
# $$\begin{bmatrix} \alpha \\ \beta \end{bmatrix} =
# \begin{bmatrix} \alpha \\ 0 \end{bmatrix} + \begin{bmatrix} 0 \\ \beta \end{bmatrix} =
# \alpha \cdot \begin{bmatrix} 1 \\ 0 \end{bmatrix} + \beta \cdot \begin{bmatrix} 0 \\ 1 \end{bmatrix}$$
#
# Because of this, these two states are known as **basis states**.
#
# These two vectors have two additional properties. First, as mentioned before, both are **normalized**:
#
# $$\langle \begin{bmatrix} 1 \\ 0 \end{bmatrix} , \begin{bmatrix} 1 \\ 0 \end{bmatrix} \rangle =
# \langle \begin{bmatrix} 0 \\ 1 \end{bmatrix} , \begin{bmatrix} 0 \\ 1 \end{bmatrix} \rangle = 1$$
#
# Second, they are **orthogonal** to each other:
#
# $$\langle \begin{bmatrix} 1 \\ 0 \end{bmatrix} , \begin{bmatrix} 0 \\ 1 \end{bmatrix} \rangle =
# \langle \begin{bmatrix} 0 \\ 1 \end{bmatrix} , \begin{bmatrix} 1 \\ 0 \end{bmatrix} \rangle = 0$$
#
# > As a reminder, $\langle V , W \rangle$ is the [inner product](../LinearAlgebra/LinearAlgebra.ipynb#Inner-Product) of $V$ and $W$.
#
# This means that these vectors form an **orthonormal basis**. The basis of $\begin{bmatrix} 1 \\ 0 \end{bmatrix}$ and $\begin{bmatrix} 0 \\ 1 \end{bmatrix}$ is called the **computational basis**, also known as the **canonical basis**.
#
# > There exist other orthonormal bases, for example, the **Hadamard basis**, formed by the vectors
# >
# > $$\begin{bmatrix} \frac{1}{\sqrt{2}} \\ \frac{1}{\sqrt{2}} \end{bmatrix} \text{ and } \begin{bmatrix} \frac{1}{\sqrt{2}} \\ -\frac{1}{\sqrt{2}} \end{bmatrix}$$
# >
# > You can check that these vectors are normalized, and orthogonal to each other. Any qubit state can be expressed as a linear combination of these vectors:
# >
# > $$\begin{bmatrix} \alpha \\ \beta \end{bmatrix} =
# \frac{\alpha + \beta}{\sqrt{2}} \begin{bmatrix} \frac{1}{\sqrt{2}} \\ \frac{1}{\sqrt{2}} \end{bmatrix} +
# \frac{\alpha - \beta}{\sqrt{2}} \begin{bmatrix} \frac{1}{\sqrt{2}} \\ -\frac{1}{\sqrt{2}} \end{bmatrix}$$
# >
# > The Hadamard basis is widely used in quantum computing, for example, in the [BB84 quantum key distribution protocol](https://en.wikipedia.org/wiki/BB84).
# # Dirac Notation
#
# Writing out each vector when doing quantum calculations takes up a lot of space, and this will get even worse once we introduce quantum gates and multi-qubit systems. **Dirac notation** is a shorthand notation that helps solve this issue. In Dirac notation, a vector is denoted by a symbol called a **ket**. For example, a qubit in state $0$ is represented by the ket $|0\rangle$, and a qubit in state $1$ is represented by the ket $|1\rangle$:
#
# <table>
# <col width=150>
# <col width=150>
# <tr>
# <td style="text-align:center; border:1px solid">$|0\rangle = \begin{bmatrix} 1 \\ 0 \end{bmatrix}$</td>
# <td style="text-align:center; border:1px solid">$|1\rangle = \begin{bmatrix} 0 \\ 1 \end{bmatrix}$</td>
# </tr>
# </table>
#
# These two kets represent basis states, so they can be used to represent any other state:
#
# $$\begin{bmatrix} \alpha \\ \beta \end{bmatrix} = \alpha|0\rangle + \beta|1\rangle$$
#
# Any symbol other than $0$ or $1$ within the ket can be used to represent arbitrary vectors, similar to how variables are used in algebra:
#
# $$|\psi\rangle = \alpha|0\rangle + \beta|1\rangle$$
#
# Several ket symbols have a generally accepted use, such as:
#
# <table>
# <col width=180>
# <col width=180>
# <tr>
# <td style="text-align:center; border:1px solid">$|+\rangle = \frac{1}{\sqrt{2}}\big(|0\rangle + |1\rangle\big)$</td>
# <td style="text-align:center; border:1px solid">$|-\rangle = \frac{1}{\sqrt{2}}\big(|0\rangle - |1\rangle\big)$</td>
# </tr>
# <tr>
# <td style="text-align:center; border:1px solid">$|i\rangle = \frac{1}{\sqrt{2}}\big(|0\rangle + i|1\rangle\big)$</td>
# <td style="text-align:center; border:1px solid">$|-i\rangle = \frac{1}{\sqrt{2}}\big(|0\rangle - i|1\rangle\big)$</td>
# </tr>
# </table>
#
# We will learn more about Dirac notation in the next tutorials, as we introduce quantum gates and multi-qubit systems.
# Q#
# ===
#
# ## Qubit data type
#
# In Q#, qubits are represented by the `Qubit` data type. On a physical quantum computer, it's impossible to directly access the state of a qubit, whether to read its exact state, or to set it to a desired state, and this data type reflects that. Instead, you can change the state of a qubit using [quantum gates](../SingleQubitGates/SingleQubitGates.ipynb), and extract information about the state of the system using measurements.
#
# That being said, when you run Q# code on a quantum simulator instead of a physical quantum computer, you can use diagnostic functions that allow you to peek at the state of the quantum system. This is very useful both for learning and for debugging small Q# programs.
#
# The qubits aren't an ordinary data type, so the variables of this type have to be declared and initialized ("allocated") a little differently:
#
# ```c#
# // This statement allocates a qubit, and binds it to the variable q
# use q = Qubit();
# // You can work with the qubit here
# // ...
#
# // The qubit is deallocated once it's not used any longer
# ```
#
# > Before Q# 0.15 the syntax for qubit allocation was different:
# ```c#
# // This statement allocates a qubit, and binds it to the variable q
# using (q = Qubit()) {
# // You can work with the qubit here
# // ...
# }
# // The qubit is no longer allocated outside of the 'using' block
# ```
#
# Freshly allocated qubits start out in state $|0\rangle$, and have to be returned to that state by the time they are released. If you attempt to release a qubit in any state other than $|0\rangle$, your program will throw a `ReleasedQubitsAreNotInZeroStateException`. We will see why it is important later, when we look at multi-qubit systems.
# ### <span style="color:blue">Demo: Examining Qubit States in Q#</span>
#
# We will be using the function [`DumpMachine`](https://docs.microsoft.com/qsharp/api/qsharp/microsoft.quantum.diagnostics.dumpmachine) to print the state of the quantum computer.
# The exact behavior of this function depends on the quantum simulator or processor you are using.
# On a full state simulator used in this demo, this function prints the information on each basis state, one basis state per row.
# This includes information about the amplitude of the state, the probability of measuring that state, and the phase of the state (more on that later).
# Each row has the following format:
#
# 
#
# For example, the state $|0\rangle$ would be represented as follows:
#
# 
#
# The state $\frac{1}{\sqrt{2}}|0\rangle - \frac{i}{\sqrt{2}}|1\rangle$ would be represented as so:
#
# 
#
#
# > It is important to note that although we reason about quantum systems in terms of their state, Q# does not have any representation of the quantum state in the language. Instead, state is an internal property of the quantum system, modified using gates. For more information, see [Q# documentation on quantum states](https://docs.microsoft.com/azure/quantum/concepts-dirac-notation#q-gate-sequences-equivalent-to-quantum-states).
#
# This demo shows how to allocate a qubit and examine its state in Q#. This demo uses quantum gates to manipulate the state of the qubit - we will explain how they work in the next tutorial, so do not worry about them for now. Run the next cell using `Ctrl+Enter` (`⌘+Enter` on Mac), then run the cell after it to see the output.
# +
// Run this cell using Ctrl+Enter (⌘+Enter on Mac)
// Then run the next cell to see the output
open Microsoft.Quantum.Diagnostics;
operation QubitsDemo () : Unit {
// This line allocates a qubit in state |0⟩
use q = Qubit();
Message("State |0⟩:");
// This line prints out the state of the quantum computer
// Since only one qubit is allocated, only its state is printed
DumpMachine();
// This line changes the qubit from state |0⟩ to state |1⟩
X(q);
Message("State |1⟩:");
DumpMachine();
// This line changes the qubit to state |-⟩ = (1/sqrt(2))(|0⟩ - |1⟩)
// That is, this puts the qubit into a superposition
// 1/sqrt(2) is approximately 0.707107
H(q);
Message("State |-⟩:");
DumpMachine();
// This line changes the qubit to state |-i⟩ = (1/sqrt(2))(|0⟩ - i|1⟩)
S(q);
Message("State |-i⟩:");
DumpMachine();
// This will put the qubit into an uneven superposition,
// where the amplitudes of |0⟩ and |1⟩ have different moduli
Rx(2.0, q);
Ry(1.0, q);
Message("Uneven superposition state:");
DumpMachine();
// This line returns the qubit to state |0⟩
Reset(q);
}
# -
# %simulate QubitsDemo
# # Relative and Global Phase
#
# You may recall that a complex number has a parameter called its phase. If a complex number $x$ is written in [polar form](../ComplexArithmetic/ComplexArithmetic.ipynb#Polar-coordinates) $x = re^{i\theta}$, its phase is $\theta$.
#
# The phase of a basis state is the complex phase of the amplitude of that state. For example, a system in state $\frac{1 + i}{2}|0\rangle + \frac{1 - i}{2}|1\rangle$, the phase of $|0\rangle$ is $\frac{\pi}{4}$, and the phase of $|1\rangle$ is $-\frac{\pi}{4}$. The difference between these two phases is known as **relative phase**.
#
# Multiplying the state of the entire system by $e^{i\theta}$ doesn't affect the relative phase: $\alpha|0\rangle + \beta|1\rangle$ has the same relative phase as $e^{i\theta}\big(\alpha|0\rangle + \beta|1\rangle\big)$. In the second expression, $\theta$ is known as the system's **global phase**.
#
# The state of a qubit (or, more generally, the state of a quantum system) is defined by its relative phase - global phase arises as a consequence of using linear algebra to represent qubits, and has no physical meaning. That is, applying a phase to the entire state of a system (multiplying the entire vector by $e^{i\theta}$ for any real $\theta$) doesn't actually affect the state of the system. Because of this, global phase is sometimes known as **unobservable phase** or **hidden phase**.
# # Conclusion
#
# This should be enough for you to gain a basic understanding of qubits and qubit states. Next, you will learn how to manipulate those states in the [single-qubit gates tutorial](../SingleQubitGates/SingleQubitGates.ipynb).
|
tutorials/Qubit/Qubit.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# - [Lab 2: Ridge Regression](#6.6.1-Ridge-Regression)
# - [Lab 2: The Lasso](#6.6.2-The-Lasso)
# - [Lab 3: Principal Components Regression](#6.7.1-Principal-Components-Regression)
# - [Lab 3: Partial Least Squares](#6.7.2-Partial-Least-Squares)
# # Chapter 6 - Linear Model Selection and Regularization
# +
# # %load ../standard_import.txt
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import glmnet as gln
from sklearn.preprocessing import scale
from sklearn import cross_validation
from sklearn.linear_model import LinearRegression, Ridge, RidgeCV, Lasso, LassoCV
from sklearn.decomposition import PCA
from sklearn.cross_decomposition import PLSRegression
from sklearn.metrics import mean_squared_error
pd.set_option('display.notebook_repr_html', False)
# %matplotlib inline
plt.style.use('seaborn-white')
# -
# # Lab 2
# In R, I exported the dataset from package 'ISLR' to a csv file.
df = pd.read_csv('Data/Hitters.csv', index_col=0).dropna()
df.index.name = 'Player'
df.info()
df.head()
dummies = pd.get_dummies(df[['League', 'Division', 'NewLeague']])
dummies.info()
print(dummies.head())
# +
y = df.Salary
# Drop the column with the independent variable (Salary), and columns for which we created dummy variables
X_ = df.drop(['Salary', 'League', 'Division', 'NewLeague'], axis=1).astype('float64')
# Define the feature set X.
X = pd.concat([X_, dummies[['League_N', 'Division_W', 'NewLeague_N']]], axis=1)
X.info()
# -
X.head(5)
# #### I executed the R code and downloaded the exact same training/test sets used in the book.
X_train = pd.read_csv('data/Hitters_X_train.csv', index_col=0)
y_train = pd.read_csv('data/Hitters_y_train.csv', index_col=0)
X_test = pd.read_csv('data/Hitters_X_test.csv', index_col=0)
y_test = pd.read_csv('data/Hitters_y_test.csv', index_col=0)
# ### 6.6.1 Ridge Regression
# ### Scikit-learn
# The __glmnet__ algorithms in R optimize the objective function using cyclical coordinate descent, while scikit-learn Ridge regression uses linear least squares with L2 regularization. They are rather different implementations, but the general principles are the same.
#
# For the __glmnet() function in R__ the penalty is defined as:
# ### $$ \lambda\bigg(\frac{1}{2}(1−\alpha)|\beta|^2_2 \ +\ \alpha|\beta|_1\bigg) $$
# (See R documentation and https://cran.r-project.org/web/packages/glmnet/vignettes/glmnet_beta.html)<BR>
# The function supports L1 and L2 regularization. For just Ridge regression we need to use $\alpha = 0 $. This reduces the above penalty to
# ### $$ \frac{1}{2}\lambda |\beta|^2_2 $$
# The __sklearn Ridge()__ function has the standard L2 penalty:
# ### $$ \lambda |\beta|^2_2 $$
#
# +
alphas = 10**np.linspace(10,-2,100)*0.5
ridge = Ridge()
coefs = []
for a in alphas:
ridge.set_params(alpha=a)
ridge.fit(scale(X), y)
coefs.append(ridge.coef_)
ax = plt.gca()
ax.plot(alphas, coefs)
ax.set_xscale('log')
ax.set_xlim(ax.get_xlim()[::-1]) # reverse axis
plt.axis('tight')
plt.xlabel('alpha')
plt.ylabel('weights')
plt.title('Ridge coefficients as a function of the regularization');
# -
# The above plot shows that the Ridge coefficients get larger when we decrease alpha.
# #### Alpha = 4
ridge2 = Ridge(alpha=4)
ridge2.fit(scale(X_train), y_train)
pred = ridge2.predict(scale(X_test))
mean_squared_error(y_test, pred)
# #### Alpha = $10^{10}$
# This big penalty shrinks the coefficients to a very large degree and makes the model more biased, resulting in a higher MSE.
ridge2.set_params(alpha=10**10)
ridge2.fit(scale(X_train), y_train)
pred = ridge2.predict(scale(X_test))
mean_squared_error(y_test, pred)
# #### Compute the regularization path using RidgeCV
ridgecv = RidgeCV(alphas=alphas, scoring='mean_squared_error')
ridgecv.fit(scale(X_train), y_train)
ridgecv.alpha_
ridge2.set_params(alpha=ridgecv.alpha_)
ridge2.fit(scale(X_train), y_train)
mean_squared_error(y_test, ridge2.predict(scale(X_test)))
pd.Series(ridge2.coef_.flatten(), index=X.columns)
# ### python-glmnet (update 2016-08-29)
# This relatively new module is a wrapper for the fortran library used in the R package `glmnet`. It gives mostly the exact same results as described in the book. However, the `predict()` method does not give you the regression *coefficients* for lambda values not in the lambda_path. It only returns the predicted values.
# https://github.com/civisanalytics/python-glmnet
# +
grid = 10**np.linspace(10,-2,100)
ridge3 = gln.ElasticNet(alpha=0, lambda_path=grid)
ridge3.fit(X, y)
# -
# #### Lambda 11498
ridge3.lambda_path_[49]
print('Intercept: {:.3f}'.format(ridge3.intercept_path_[49]))
pd.Series(np.round(ridge3.coef_path_[:,49], decimals=3), index=X.columns)
np.sqrt(np.sum(ridge3.coef_path_[:,49]**2))
# #### Lambda 705
ridge3.lambda_path_[59]
print('Intercept: {:.3f}'.format(ridge3.intercept_path_[59]))
pd.Series(np.round(ridge3.coef_path_[:,59], decimals=3), index=X.columns)
np.sqrt(np.sum(ridge3.coef_path_[:,59]**2))
# #### Fit model using just the training set.
ridge4 = gln.ElasticNet(alpha=0, lambda_path=grid, scoring='mean_squared_error', tol=1e-12)
ridge4.fit(X_train, y_train.as_matrix().ravel())
# prediction using lambda = 4
pred = ridge4.predict(X_test, lamb=4)
mean_squared_error(y_test.as_matrix().ravel(), pred)
# #### Lambda chosen by cross validation
ridge5 = gln.ElasticNet(alpha=0, scoring='mean_squared_error')
ridge5.fit(X_train, y_train.as_matrix().ravel())
# Lambda with best CV performance
ridge5.lambda_max_
# Lambda larger than lambda_max_, but with a CV score that is within 1 standard deviation away from lambda_max_
ridge5.lambda_best_
# +
plt.figure(figsize=(15,6))
plt.errorbar(np.log(ridge5.lambda_path_), -ridge5.cv_mean_score_, color='r', linestyle='None', marker='o',
markersize=5, yerr=ridge5.cv_standard_error_, ecolor='lightgrey', capthick=2)
for ref, txt in zip([ridge5.lambda_best_, ridge5.lambda_max_], ['Lambda best', 'Lambda max']):
plt.axvline(x=np.log(ref), linestyle='dashed', color='lightgrey')
plt.text(np.log(ref), .95*plt.gca().get_ylim()[1], txt, ha='center')
plt.xlabel('log(Lambda)')
plt.ylabel('Mean-Squared Error');
# -
# MSE for lambda with best CV performance
pred = ridge5.predict(X_test, lamb=ridge5.lambda_max_)
mean_squared_error(y_test, pred)
# #### Fit model to full data set
ridge6= gln.ElasticNet(alpha=0, scoring='mean_squared_error', n_folds=10)
ridge6.fit(X, y)
# These are not really close to the ones in the book.
pd.Series(ridge6.coef_path_[:,ridge6.lambda_max_inx_], index=X.columns)
# ### 6.6.2 The Lasso
# ### Scikit-learn
#
#
# For both __glmnet__ in R and sklearn __Lasso()__ function the standard L1 penalty is:
# ### $$ \lambda |\beta|_1 $$
# +
lasso = Lasso(max_iter=10000)
coefs = []
for a in alphas*2:
lasso.set_params(alpha=a)
lasso.fit(scale(X_train), y_train)
coefs.append(lasso.coef_)
ax = plt.gca()
ax.plot(alphas*2, coefs)
ax.set_xscale('log')
ax.set_xlim(ax.get_xlim()[::-1]) # reverse axis
plt.axis('tight')
plt.xlabel('alpha')
plt.ylabel('weights')
plt.title('Lasso coefficients as a function of the regularization');
# -
lassocv = LassoCV(alphas=None, cv=10, max_iter=10000)
lassocv.fit(scale(X_train), y_train.as_matrix().ravel())
lassocv.alpha_
lasso.set_params(alpha=lassocv.alpha_)
lasso.fit(scale(X_train), y_train)
mean_squared_error(y_test, lasso.predict(scale(X_test)))
# Some of the coefficients are now reduced to exactly zero.
pd.Series(lasso.coef_, index=X.columns)
# ### python-glmnet
lasso2 = gln.ElasticNet(alpha=1, lambda_path=grid, scoring='mean_squared_error', n_folds=10)
lasso2.fit(X_train, y_train.as_matrix().ravel())
# +
l1_norm = np.sum(np.abs(lasso2.coef_path_), axis=0)
plt.figure(figsize=(10,6))
plt.plot(l1_norm, lasso2.coef_path_.T)
plt.xlabel('L1 norm')
plt.ylabel('Coefficients');
# -
# #### Let glmnet() create a grid to use in CV
lasso3 = gln.ElasticNet(alpha=1, scoring='mean_squared_error', n_folds=10)
lasso3.fit(X_train, y_train.as_matrix().ravel())
# +
plt.figure(figsize=(15,6))
plt.errorbar(np.log(lasso3.lambda_path_), -lasso3.cv_mean_score_, color='r', linestyle='None', marker='o',
markersize=5, yerr=lasso3.cv_standard_error_, ecolor='lightgrey', capthick=2)
for ref, txt in zip([lasso3.lambda_best_, lasso3.lambda_max_], ['Lambda best', 'Lambda max']):
plt.axvline(x=np.log(ref), linestyle='dashed', color='lightgrey')
plt.text(np.log(ref), .95*plt.gca().get_ylim()[1], txt, ha='center')
plt.xlabel('log(Lambda)')
plt.ylabel('Mean-Squared Error');
# -
pred = lasso3.predict(X_test, lamb=lasso3.lambda_max_)
mean_squared_error(y_test, pred)
# #### Fit model on full dataset
lasso4 = gln.ElasticNet(alpha=1, lambda_path=grid, scoring='mean_squared_error', n_folds=10)
lasso4.fit(X, y)
# These are not really close to the ones in the book.
pd.Series(lasso4.coef_path_[:,lasso4.lambda_max_inx_], index=X.columns)
# # Lab 3
# ### 6.7.1 Principal Components Regression
# Scikit-klearn does not have an implementation of PCA and regression combined like the 'pls' package in R.
# https://cran.r-project.org/web/packages/pls/vignettes/pls-manual.pdf
# +
pca = PCA()
X_reduced = pca.fit_transform(scale(X))
print(pca.components_.shape)
pd.DataFrame(pca.components_.T).loc[:4,:5]
# -
# The above loadings are the same as in R.
print(X_reduced.shape)
pd.DataFrame(X_reduced).loc[:4,:5]
# The above principal components are the same as in R.
# Variance explained by the principal components
np.cumsum(np.round(pca.explained_variance_ratio_, decimals=4)*100)
# +
# 10-fold CV, with shuffle
n = len(X_reduced)
kf_10 = cross_validation.KFold(n, n_folds=10, shuffle=True, random_state=1)
regr = LinearRegression()
mse = []
# Calculate MSE with only the intercept (no principal components in regression)
score = -1*cross_validation.cross_val_score(regr, np.ones((n,1)), y.ravel(), cv=kf_10, scoring='mean_squared_error').mean()
mse.append(score)
# Calculate MSE using CV for the 19 principle components, adding one component at the time.
for i in np.arange(1, 20):
score = -1*cross_validation.cross_val_score(regr, X_reduced[:,:i], y.ravel(), cv=kf_10, scoring='mean_squared_error').mean()
mse.append(score)
plt.plot(mse, '-v')
plt.xlabel('Number of principal components in regression')
plt.ylabel('MSE')
plt.title('Salary')
plt.xlim(xmin=-1);
# -
# The above plot indicates that the lowest training MSE is reached when doing regression on 18 components.
regr_test = LinearRegression()
regr_test.fit(X_reduced, y)
regr_test.coef_
# #### Fitting PCA with training data
# +
pca2 = PCA()
X_reduced_train = pca2.fit_transform(scale(X_train))
n = len(X_reduced_train)
# 10-fold CV, with shuffle
kf_10 = cross_validation.KFold(n, n_folds=10, shuffle=False, random_state=1)
mse = []
# Calculate MSE with only the intercept (no principal components in regression)
score = -1*cross_validation.cross_val_score(regr, np.ones((n,1)), y_train, cv=kf_10, scoring='mean_squared_error').mean()
mse.append(score)
# Calculate MSE using CV for the 19 principle components, adding one component at the time.
for i in np.arange(1, 20):
score = -1*cross_validation.cross_val_score(regr, X_reduced_train[:,:i], y_train, cv=kf_10, scoring='mean_squared_error').mean()
mse.append(score)
plt.plot(np.array(mse), '-v')
plt.xlabel('Number of principal components in regression')
plt.ylabel('MSE')
plt.title('Salary')
plt.xlim(xmin=-1);
# -
# The above plot indicates that the lowest training MSE is reached when doing regression on 6 components.
# #### Transform test data with PCA loadings and fit regression on 6 principal components
# +
X_reduced_test = pca2.transform(scale(X_test))[:,:7]
# Train regression model on training data
regr = LinearRegression()
regr.fit(X_reduced_train[:,:7], y_train)
# Prediction with test data
pred = regr.predict(X_reduced_test)
mean_squared_error(y_test, pred)
# -
# ### 6.7.2 Partial Least Squares
# Scikit-learn PLSRegression gives same results as the pls package in R when using 'method='oscorespls'. In the LAB excercise, the standard method is used which is 'kernelpls'.
#
# When doing a slightly different fitting in R, the result is close to the one obtained using scikit-learn.
#
# pls.fit=plsr(Salary~., data=Hitters, subset=train, scale=TRUE, validation="CV", method='oscorespls')
# validationplot(pls.fit,val.type="MSEP", intercept = FALSE)
#
# See documentation:
# http://scikit-learn.org/dev/modules/generated/sklearn.cross_decomposition.PLSRegression.html#sklearn.cross_decomposition.PLSRegression
# +
n = len(X_train)
# 10-fold CV, with shuffle
kf_10 = cross_validation.KFold(n, n_folds=10, shuffle=False, random_state=1)
mse = []
for i in np.arange(1, 20):
pls = PLSRegression(n_components=i)
score = cross_validation.cross_val_score(pls, scale(X_train), y_train, cv=kf_10, scoring='mean_squared_error').mean()
mse.append(-score)
plt.plot(np.arange(1, 20), np.array(mse), '-v')
plt.xlabel('Number of principal components in regression')
plt.ylabel('MSE')
plt.title('Salary')
plt.xlim(xmin=-1);
# +
pls = PLSRegression(n_components=2)
pls.fit(scale(X_train), y_train)
mean_squared_error(y_test, pls.predict(scale(X_test)))
|
regularisation/model_selection_regularisation_ISLR.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="N4USTiLhUVXD"
# ## 実習3-3
# ### CIFER-10をCNNで分類する。
# ### 目的:Data Augmentationによる精度上昇を確認
# + id="nzk7PIdpUVXD" executionInfo={"status": "ok", "timestamp": 1613191642387, "user_tz": -540, "elapsed": 2127, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "03431199727064994008"}}
import keras
from keras.datasets import cifar10
from keras.preprocessing.image import ImageDataGenerator #data augmentationするため
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K
from keras import optimizers
import os
import matplotlib.pyplot as plt
import numpy as np
import time
# + id="6Pnivbr7UVXH" executionInfo={"status": "ok", "timestamp": 1613191642388, "user_tz": -540, "elapsed": 2123, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "03431199727064994008"}}
batch_size = 50
num_classes = 10
epochs = 10
data_augmentation = True #data augmentationを実行するときはTrueに設定
# + id="aZ9vmm-bUVXK" executionInfo={"status": "ok", "timestamp": 1613191642389, "user_tz": -540, "elapsed": 2121, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "03431199727064994008"}}
# イメージサイズの設定
img_rows, img_cols = 32, 32
# + id="YzzEXiaxUVXN" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1613191646327, "user_tz": -540, "elapsed": 6049, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "03431199727064994008"}} outputId="de55373e-860c-46b4-b563-58b6fbb382bc"
# CIFAR-10データをダウンロード。既に訓練データとテストデータに分割されている。
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# + id="DJm6pgKoUVXP" executionInfo={"status": "ok", "timestamp": 1613191646329, "user_tz": -540, "elapsed": 6048, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "03431199727064994008"}}
if K.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], 3, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 3, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 3)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 3)
input_shape = (img_rows, img_cols, 1)
#"channels_first"の場合、入力のshapeは(batch, channels, row, column)となり,
#"channels_last"の場合,入力のshapeは(batch, row, column, channels)となります。
# + id="rVdE1JaNUVXR" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1613191646330, "user_tz": -540, "elapsed": 6042, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "03431199727064994008"}} outputId="98ff8d13-a6fa-490a-8bb9-b94cf8601c97"
#学習用の画像とラベル
print('x_train shape:', x_train.shape)
print('y_train shape:', y_train.shape)
#テスト用の画像とラベル
print('x_test shape:', x_test.shape)
print('y_test shape:', y_test.shape)
# + id="1LMj6FJWUVXT" colab={"base_uri": "https://localhost:8080/", "height": 650} executionInfo={"status": "ok", "timestamp": 1613191650641, "user_tz": -540, "elapsed": 10344, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "03431199727064994008"}} outputId="9a108657-46c8-45e5-8343-a5060bfdafc9"
#10x10でラベルと画像を同時に表示
for i in range(100):
plt.subplot(10,10,i + 1)
plt.axis("off")
plt.imshow(x_train[i,:32,:32])
plt.text(0, -2, y_train[i], fontsize=10, color='red') #ラベルの数字の設定
plt.subplots_adjust(left=0, right=1.5, top=2, bottom=0) #画像間のマージンを設定
plt.show()
# + id="FJ8iJr9dUVXW" colab={"base_uri": "https://localhost:8080/", "height": 67} executionInfo={"status": "ok", "timestamp": 1613191650642, "user_tz": -540, "elapsed": 10336, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "03431199727064994008"}} outputId="eb821415-6b6b-41b5-bbeb-202d9f432b66"
#各数字がそれぞれ何個ずつあるかカウント(データ数に偏りがないことを確認)
a, b = np.unique(y_train, return_counts=True)
c, d = np.unique(y_test, return_counts=True)
print(dict(zip(a, b)))
print(dict(zip(c, d)))
"""
0:'airplane',1:'automobile',2:'bird',3:'cat',4:'deer',5:'dog',6:'frog',7:'horse',8:'ship',9:'truck'
"""
# + id="aeUBiHJSUVXY" executionInfo={"status": "ok", "timestamp": 1613191650643, "user_tz": -540, "elapsed": 10333, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "03431199727064994008"}}
# 教師データ(y_train, y_test)をOne hotなベクトルへ変換する
Y_train = keras.utils.to_categorical(y_train, num_classes)
Y_test = keras.utils.to_categorical(y_test, num_classes)
# + id="F2t7W2hbUVXa" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1613191650644, "user_tz": -540, "elapsed": 10327, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "03431199727064994008"}} outputId="2b41aeb6-febd-44a0-e091-dd293932d2e3"
#One hotベクトルになっていることを確認
Y_train[0:10]
# + id="SMz8cpYZUVXb" executionInfo={"status": "ok", "timestamp": 1613191650645, "user_tz": -540, "elapsed": 10326, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "03431199727064994008"}}
#データの値を正規化
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
# + id="JGtml8slUVXd" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1613191655332, "user_tz": -540, "elapsed": 15007, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "03431199727064994008"}} outputId="6dee9a0d-a0ee-45de-b19d-50cb283975a1"
#CNNのモデル/構造
model = Sequential()
model.add(Conv2D(32, (3, 3), padding='same',
input_shape=x_train.shape[1:]))
model.add(Activation('relu'))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes))
model.add(Activation('softmax'))
model.summary()#モデルの内容を表示
# + id="b-U--_iDUVXg" executionInfo={"status": "ok", "timestamp": 1613191655333, "user_tz": -540, "elapsed": 15005, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "03431199727064994008"}}
# RMSprop optimizerの初期化
opt = keras.optimizers.RMSprop(lr=0.0001, decay=1e-6)
# + id="GSQyl2xAUVXi" executionInfo={"status": "ok", "timestamp": 1613191655336, "user_tz": -540, "elapsed": 15006, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "03431199727064994008"}}
# モデルのコンパイル
model.compile(loss='categorical_crossentropy',
optimizer=opt,
metrics=['accuracy'])
# + id="Grlfjr9JUVXk" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1613191947637, "user_tz": -540, "elapsed": 307301, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "03431199727064994008"}} outputId="035c6b66-3da2-4887-8cf2-83b9945b9635"
# 計算開始
start = time.time() #処理を始めた時間を記録
if not data_augmentation:
print('Not using data augmentation.')
hist = model.fit(x_train, Y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_test, Y_test),
shuffle=True)
else:
print('Using real-time data augmentation.')
# 以下でData Augmentationを行う:
datagen = ImageDataGenerator(
featurewise_center=False, # set input mean to 0 over the dataset
samplewise_center=False, # サンプル平均を0にする。
featurewise_std_normalization=False, # サンプルを標準偏差で割る。
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # ZCA白色化
rotation_range=0, # 画像を指定の角度でランダムに回転させる
width_shift_range=0.1, # 画像を水平方向にランダムに移動する。 (0-1,1は画像サイズを基準にしている)
height_shift_range=0.1, # 画像を垂直方向にランダムに移動する。 (0-1,1は画像サイズを基準にしている)
horizontal_flip=True, # 画像を水平方向にランダムに反転する。
vertical_flip=False) # 画像を垂直方向にランダムに反転する。
# 特徴ごとの正規化に必要な量を計算する
#(ZCAホワイトニングの標準、平均、および主成分が適用される)
datagen.fit(x_train)
# datagen.flow()によって前処理されたbatchを使うときはmodel.fit_generatorを用いる
hist = model.fit(datagen.flow(x_train, Y_train,
batch_size=batch_size),
steps_per_epoch = 50000/batch_size, #1epochで用いるminibatchの数
epochs=epochs,
validation_data=(x_test, Y_test),
workers=4,
)
elapsed_time = time.time() - start #(処理時間)=(処理が終わった時間)ー(処理を始めた時間)
print("計算時間:",elapsed_time,"秒")
# lossとAccuracyを出す
scores = model.evaluate(x_test, Y_test, verbose=0)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
# + id="Tm8FHhlqUVXm" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1613191947639, "user_tz": -540, "elapsed": 307297, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "03431199727064994008"}} outputId="8a561673-9b6a-4325-e307-ee105ec2a451"
# モデルと重みの保存
save_dir = os.path.join(os.getcwd(), 'saved_models') #os.path.join(a,b)でa/bのようにパスを作る。os.getwed()はカレントディレクトリ
model_name = 'keras_cifar10_trained_model.h5'
if not os.path.isdir(save_dir):#'saved_models'というフォルダが無ければ作る
os.makedirs(save_dir)
model_path = os.path.join(save_dir, model_name)#保存先のパスを指定
model.save(model_path)#モデルの保存
print('Saved trained model at %s ' % model_path)
# + id="s9FNhVFbUVXo" colab={"base_uri": "https://localhost:8080/", "height": 278} executionInfo={"status": "ok", "timestamp": 1613191947640, "user_tz": -540, "elapsed": 307291, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "03431199727064994008"}} outputId="bfa7a37b-2516-4664-e8fa-173b361c663d"
loss = hist.history['loss']
val_loss = hist.history['val_loss']
# lossのグラフ
plt.plot(range(epochs), loss, marker='.', label='loss')
plt.plot(range(epochs), val_loss, marker='.', label='val_loss')
plt.legend(loc='best', fontsize=10)
plt.grid()
plt.xlabel('epoch')
plt.ylabel('loss')
plt.show()
# + id="o-UBs18aUVXr" colab={"base_uri": "https://localhost:8080/", "height": 280} executionInfo={"status": "ok", "timestamp": 1613191948115, "user_tz": -540, "elapsed": 307759, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "03431199727064994008"}} outputId="a2648694-b620-4124-ba76-65e6e8608b23"
acc = hist.history['accuracy']
val_acc = hist.history['val_accuracy']
# accuracyのグラフ
plt.plot(range(epochs), acc, marker='.', label='acc')
plt.plot(range(epochs), val_acc, marker='.', label='val_acc')
plt.legend(loc='best', fontsize=10)
plt.grid()
plt.xlabel('epoch')
plt.ylabel('acc')
plt.show()
# + id="7mPAF8p8UVXt" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1613191948699, "user_tz": -540, "elapsed": 308332, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "03431199727064994008"}} outputId="23627a4a-f19a-482b-840f-8dc6653ad1c7"
# CNNの予測値を出力
preds = model.predict(x_test) #softmax後の数値を出力
cls = model.predict_classes(x_test) #予測したクラスを出力
print(preds[0:5])
print(cls[0:5])
print(y_test[0:5]) #予測クラスと教師を比較
# + [markdown] id="bRzxeogoUVXv"
# ### 課題3-3-1
# #### Data Augmentationの使用前後でaccuracyが上昇することを確認してください。
# + id="727wj8aSUVXw" executionInfo={"status": "ok", "timestamp": 1613191949188, "user_tz": -540, "elapsed": 308814, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "03431199727064994008"}}
|
PDL/3-3_Data_Augmentation_CIFER10_CNN.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import QUANTAXIS as QA
"""
在讨论完QA_fetch_get系列以后 我们主要考虑的是从本地获取数据并进行分析
QA_fetch_xxx_adv系列
"""
# +
# 首先进行数据获取 多个股票的日线数据
#QA.QA_fetch_stock_day_adv("000001",'2017-01-01','2017-11-30')
data=QA.QA_fetch_stock_day_adv(["000001",'000002','000004'],'2017-01-01','2017-11-30')
print(data)
# -
c=data.data
c.index.levels[0]
# +
# 一些基础的值
data.open
data.close
data.high
data.low
data.date
data.datetime
data.code
# +
# 对数据进行复权处理
#data=data.to_hfq() # 批量后复权
data=data.to_qfq() # 批量前复权
# +
# 查看数据
# 有三种方式查看数据
data.show()
data()
data.data
# 一般我们推荐直接用()的方式查看数据 这个数据是Pandas.Dataframe格式
# -
# 数据翻转 .reverse()
reversed_data=data.reverse()
print(reversed_data)
print(reverse_data())
# 数据查询 .query()
data.query('date=="2017-01-03"')
# 数据筛选- 选取某一只股票的数据 select_code
data.select_code('000001')
print(data.select_code('000001')())
# 数据筛选- 选取某一段时间内的数据
res=data.select_time('2017-01-01','2017-01-31')
print(res.data)
# 如果end设置为空,则获取到从start开始至今的所有数据
res=data.select_time('2017-11-01')
print(res.data)
# 数据筛选 - 选取某一个时间点向前向后的数据
# method
# lt 小于 <
# lte 小于等于 <=
# gt 大于 >
# gte 大于等于 >=
# e 等于 ==
res=data.select_time_with_gap(gap=5,method='lte',time='2017-01-31')
print(res.data)
# 数据筛选 - 拿到某一天的股票数据
# if_trade的作用是 如果if_trade=False 则遇到停牌时 会返回最后一个交易日的bar
res=data.get_bar(code='000001',time='2017-01-20',if_trade=True)
print(res.data)
# +
# 数据切面 pivot
res=data.pivot('open')
print(res)
# +
# 对数据块应用函数 add_func
# 一般是配合指标函数去计算指标
# QA.QA_indicator_ATR ATR指标
res=data.add_func(QA.QA_indicator_ATR,10)
print(res)
# -
# 格式转换 to_dict, to_list, to_numpy
res=data.to_dict()
print(res)
res=data.to_list()
print(res)
res=data.to_numpy()
print(res)
# +
# 画图
data.plot('000001') #画某一只股票
data.plot() #画全部股票
# -
|
jupyterexample/DataStruct(QA_fetch_adv).ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
# import utils;
# reload(utils)
# from utils import *
# + [markdown] heading_collapsed=true
# ## Setup
# + [markdown] hidden=true
# We're going to download the collected works of Nietzsche to use as our data for this class.
# + hidden=true
path = get_file('nietzsche.txt', origin="https://s3.amazonaws.com/text-datasets/nietzsche.txt")
text = open(path).read()
print('corpus length:', len(text))
# + hidden=true
chars = sorted(list(set(text)))
vocab_size = len(chars)+1
print('total chars:', vocab_size)
# + [markdown] hidden=true
# Sometimes it's useful to have a zero value in the dataset, e.g. for padding
# + hidden=true
chars.insert(0, "\0")
# + hidden=true
''.join(chars[1:-6])
# + [markdown] hidden=true
# Map from chars to indices and back again
# + hidden=true
char_indices = dict((c, i) for i, c in enumerate(chars))
indices_char = dict((i, c) for i, c in enumerate(chars))
# + [markdown] hidden=true
# *idx* will be the data we use from now own - it simply converts all the characters to their index (based on the mapping above)
# + hidden=true
idx = [char_indices[c] for c in text]
# -
text[:1000]
# + hidden=true
idx[:10]
# + hidden=true
''.join(indices_char[i] for i in idx[:70])
# -
# ## Returning sequences
# + [markdown] heading_collapsed=true
# ### Create inputs
# + [markdown] hidden=true
# To use a sequence model, we can leave our input unchanged - but we have to change our output to a sequence (of course!)
#
# Here, c_out_dat is identical to c_in_dat, but moved across 1 character.
# + hidden=true
#c_in_dat = [[idx[i+n] for i in xrange(0, len(idx)-1-cs, cs)]
# for n in range(cs)]
c_out_dat = [[idx[i+n] for i in xrange(1, len(idx)-cs, cs)]
for n in range(cs)]
# + hidden=true
ys = [np.stack(c[:-2]) for c in c_out_dat]
# + [markdown] hidden=true
# Reading down each column shows one set of inputs and outputs.
# + hidden=true
[xs[n][:cs] for n in range(cs)]
# + hidden=true
[ys[n][:cs] for n in range(cs)]
# + [markdown] heading_collapsed=true
# ### Create and train model
# + hidden=true
dense_in = Dense(n_hidden, activation='relu')
dense_hidden = Dense(n_hidden, activation='relu', init='identity')
dense_out = Dense(vocab_size, activation='softmax', name='output')
# + [markdown] hidden=true
# We're going to pass a vector of all zeros as our starting point - here's our input layers for that:
# + hidden=true
inp1 = Input(shape=(n_fac,), name='zeros')
hidden = dense_in(inp1)
# + hidden=true
outs = []
for i in range(cs):
c_dense = dense_in(c_ins[i][1])
hidden = dense_hidden(hidden)
hidden = merge([c_dense, hidden], mode='sum')
# every layer now has an output
outs.append(dense_out(hidden))
# + hidden=true
model = Model([inp1] + [c[0] for c in c_ins], outs)
model.compile(loss='sparse_categorical_crossentropy', optimizer=Adam())
# + hidden=true
zeros = np.tile(np.zeros(n_fac), (len(xs[0]),1))
zeros.shape
# + hidden=true
get_nexts_keras(' this is')
# + [markdown] heading_collapsed=true
# ### One-hot sequence model with keras
# + [markdown] hidden=true
# This is the keras version of the theano model that we're about to create.
# + hidden=true
model=Sequential([
SimpleRNN(n_hidden, return_sequences=True, input_shape=(cs, vocab_size),
activation='relu', inner_init='identity'),
TimeDistributed(Dense(vocab_size, activation='softmax')),
])
model.compile(loss='categorical_crossentropy', optimizer=Adam())
# + hidden=true
oh_ys = [to_categorical(o, vocab_size) for o in ys]
oh_y_rnn=np.stack(oh_ys, axis=1)
oh_xs = [to_categorical(o, vocab_size) for o in xs]
oh_x_rnn=np.stack(oh_xs, axis=1)
oh_x_rnn.shape, oh_y_rnn.shape
# + hidden=true
model.fit(oh_x_rnn, oh_y_rnn, batch_size=64, nb_epoch=8)
# + hidden=true
def get_nexts_oh(inp):
idxs = np.array([char_indices[c] for c in inp])
arr = to_categorical(idxs, vocab_size)
p = model.predict(arr[np.newaxis,:])[0]
print(list(inp))
return [chars[np.argmax(o)] for o in p]
# + hidden=true
get_nexts_oh(' this is')
# + [markdown] heading_collapsed=true
# ## Stateful model with keras
# + hidden=true
bs=64
# + [markdown] hidden=true
# A stateful model is easy to create (just add "stateful=True") but harder to train. We had to add batchnorm and use LSTM to get reasonable results.
#
# When using stateful in keras, you have to also add 'batch_input_shape' to the first layer, and fix the batch size there.
# + hidden=true
model=Sequential([
Embedding(vocab_size, n_fac, input_length=cs, batch_input_shape=(bs,8)),
BatchNormalization(),
LSTM(n_hidden, return_sequences=True, stateful=True),
TimeDistributed(Dense(vocab_size, activation='softmax')),
])
# + hidden=true
model.compile(loss='sparse_categorical_crossentropy', optimizer=Adam())
# + [markdown] hidden=true
# Since we're using a fixed batch shape, we have to ensure our inputs and outputs are a even multiple of the batch size.
# + hidden=true
mx = len(x_rnn)//bs*bs
# + hidden=true
model.fit(x_rnn[:mx], y_rnn[:mx], batch_size=bs, nb_epoch=4, shuffle=False)
# + hidden=true
model.optimizer.lr=1e-4
# + hidden=true
model.fit(x_rnn[:mx], y_rnn[:mx], batch_size=bs, nb_epoch=4, shuffle=False)
# + hidden=true
model.fit(x_rnn[:mx], y_rnn[:mx], batch_size=bs, nb_epoch=4, shuffle=False)
# + [markdown] heading_collapsed=true
# ## Pure python RNN!
# + [markdown] heading_collapsed=true hidden=true
# ### Set up basic functions
# + [markdown] hidden=true
# Now we're going to try to repeat the above theano RNN, using just pure python (and numpy). Which means, we have to do everything ourselves, including defining the basic functions of a neural net! Below are all of the definitions, along with tests to check that they give the same answers as theano. The functions ending in `_d` are the derivatives of each function.
# + hidden=true
def sigmoid(x): return 1/(1+np.exp(-x))
def sigmoid_d(x):
output = sigmoid(x)
return output*(1-output)
# + hidden=true
def relu(x): return np.maximum(0., x)
def relu_d(x): return (x > 0.)*1.
# + hidden=true
relu(np.array([3.,-3.])), relu_d(np.array([3.,-3.]))
# + hidden=true
def dist(a,b): return pow(a-b,2)
def dist_d(a,b): return 2*(a-b)
# + hidden=true
import pdb
# + hidden=true
eps = 1e-7
def x_entropy(pred, actual):
return -np.sum(actual * np.log(np.clip(pred, eps, 1-eps)))
def x_entropy_d(pred, actual): return -actual/pred
# + hidden=true
def softmax(x): return np.exp(x)/np.exp(x).sum()
# + hidden=true
def softmax_d(x):
sm = softmax(x)
res = np.expand_dims(-sm,-1)*sm
res[np.diag_indices_from(res)] = sm*(1-sm)
return res
# + hidden=true
test_preds = np.array([0.2,0.7,0.1])
test_actuals = np.array([0.,1.,0.])
nnet.categorical_crossentropy(test_preds, test_actuals).eval()
# + hidden=true
x_entropy(test_preds, test_actuals)
# + hidden=true
test_inp = T.dvector()
test_out = nnet.categorical_crossentropy(test_inp, test_actuals)
test_grad = theano.function([test_inp], T.grad(test_out, test_inp))
# + hidden=true
test_grad(test_preds)
# + hidden=true
x_entropy_d(test_preds, test_actuals)
# + hidden=true
pre_pred = random(oh_x_rnn[0][0].shape)
preds = softmax(pre_pred)
actual = oh_x_rnn[0][0]
# + hidden=true
np.allclose(softmax_d(pre_pred).dot(loss_d(preds,actual)), preds-actual)
# + hidden=true
softmax(test_preds)
# + hidden=true
nnet.softmax(test_preds).eval()
# + hidden=true
test_out = T.flatten(nnet.softmax(test_inp))
# + hidden=true
test_grad = theano.function([test_inp], theano.gradient.jacobian(test_out, test_inp))
# + hidden=true
test_grad(test_preds)
# + hidden=true
softmax_d(test_preds)
# + hidden=true
act=relu
act_d = relu_d
# + hidden=true
loss=x_entropy
loss_d=x_entropy_d
# + [markdown] hidden=true
# We also have to define our own scan function. Since we're not worrying about running things in parallel, it's very simple to implement:
# + hidden=true
def scan(fn, start, seq):
res = []
prev = start
for s in seq:
app = fn(prev, s)
res.append(app)
prev = app
return res
# + [markdown] hidden=true
# ...for instance, `scan` on `+` is the cumulative sum.
# + hidden=true
scan(lambda prev,curr: prev+curr, 0, range(5))
# + [markdown] heading_collapsed=true hidden=true
# ### Set up training
# + [markdown] hidden=true
# Let's now build the functions to do the forward and backward passes of our RNN. First, define our data and shape.
# + hidden=true
inp = oh_x_rnn
outp = oh_y_rnn
n_input = vocab_size
n_output = vocab_size
# + hidden=true
inp.shape, outp.shape
# + [markdown] hidden=true
# Here's the function to do a single forward pass of an RNN, for a single character.
# + hidden=true
def one_char(prev, item):
# Previous state
tot_loss, pre_hidden, pre_pred, hidden, ypred = prev
# Current inputs and output
x, y = item
pre_hidden = np.dot(x,w_x) + np.dot(hidden,w_h)
hidden = act(pre_hidden)
pre_pred = np.dot(hidden,w_y)
ypred = softmax(pre_pred)
return (
# Keep track of loss so we can report it
tot_loss+loss(ypred, y),
# Used in backprop
pre_hidden, pre_pred,
# Used in next iteration
hidden,
# To provide predictions
ypred)
# + [markdown] hidden=true
# We use `scan` to apply the above to a whole sequence of characters.
# + hidden=true
def get_chars(n): return zip(inp[n], outp[n])
def one_fwd(n): return scan(one_char, (0,0,0,np.zeros(n_hidden),0), get_chars(n))
# + [markdown] hidden=true
# Now we can define the backward step. We use a loop to go through every element of the sequence. The derivatives are applying the chain rule to each step, and accumulating the gradients across the sequence.
# + hidden=true
# "Columnify" a vector
def col(x): return x[:,newaxis]
def one_bkwd(args, n):
global w_x,w_y,w_h
i=inp[n] # 8x86
o=outp[n] # 8x86
d_pre_hidden = np.zeros(n_hidden) # 256
for p in reversed(range(len(i))):
totloss, pre_hidden, pre_pred, hidden, ypred = args[p]
x=i[p] # 86
y=o[p] # 86
d_pre_pred = softmax_d(pre_pred).dot(loss_d(ypred,y)) # 86
d_pre_hidden = (np.dot(d_pre_hidden, w_h.T)
+ np.dot(d_pre_pred,w_y.T)) * act_d(pre_hidden) # 256
# d(loss)/d(w_y) = d(loss)/d(pre_pred) * d(pre_pred)/d(w_y)
w_y -= col(hidden) * d_pre_pred * alpha
# d(loss)/d(w_h) = d(loss)/d(pre_hidden[p-1]) * d(pre_hidden[p-1])/d(w_h)
if (p>0): w_h -= args[p-1][3].dot(d_pre_hidden) * alpha
w_x -= col(x)*d_pre_hidden * alpha
return d_pre_hidden
# + [markdown] hidden=true
# Now we can set up our initial weight matrices. Note that we're not using bias at all in this example, in order to keep things simpler.
# + hidden=true
scale=math.sqrt(2./n_input)
w_x = normal(scale=scale, size=(n_input,n_hidden))
w_y = normal(scale=scale, size=(n_hidden, n_output))
w_h = np.eye(n_hidden, dtype=np.float32)
# + [markdown] hidden=true
# Our loop looks much like the theano loop in the previous section, except that we have to call the backwards step ourselves.
# + hidden=true
overallError=0
alpha=0.0001
for n in range(10000):
res = one_fwd(n)
overallError+=res[-1][0]
deriv = one_bkwd(res, n)
if(n % 1000 == 999):
print ("Error:{:.4f}; Gradient:{:.5f}".format(
overallError/1000, np.linalg.norm(deriv)))
overallError=0
|
deeplearning1/nbs/lesson6-pure-python-Michael.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Variable
# +
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
state = tf.Variable(0, name='counter')
print state.name
one = tf.constant(1)
new_value = tf.add(state,one)
update = tf.assign(state, new_value)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for _ in range(3):
sess.run(update)
print sess.run(state)
# -
# # Session
# +
matrix1 = tf.constant([[3,3]])
matrix2 = tf.constant([[2],
[2]])
product = tf.matmul(matrix1, matrix2)
#sess = tf.Session()
#result=sess.run(product)
#print(result)
#sess.close()
with tf.Session() as sess:
result = sess.run(product)
print result
# -
# # Placeholder
# +
input1 = tf.placeholder(tf.float32)
input2 = tf.placeholder(tf.float32)
output = tf.multiply(input1, input2)
with tf.Session() as sess:
print sess.run(output, feed_dict={input1:[7.], input2:[2.]})
# -
# # 神经网络
# ## 添加层
def add_layer(inputs, in_size, out_size, activation_function=None):
Weights = tf.Variable(tf.random_normal([in_size, out_size]))
biases = tf.Variable(tf.zeros([1,out_size])) + 0.1
Wx_plus_b = tf.matmul(inputs, Weights) + biases
if activation_function is None:
output = Wx_plus_b
else:
output = activation_function(Wx_plus_b)
return output
# ## 训练网络
# +
x_data = np.linspace(-1,1,300)[:,np.newaxis]
noise = np.random.normal(0, 0.05, x_data.shape)
y_data = np.square(x_data) - 0.5 + noise
xs = tf.placeholder(tf.float32,[None,1])
ys = tf.placeholder(tf.float32,[None,1])
l1 = add_layer(xs, 1, 10, activation_function=tf.nn.relu)
prediction = add_layer(l1, 10, 1, activation_function=None)
loss =tf.reduce_mean( tf.reduce_sum(tf.square(ys - prediction), reduction_indices=[1]) )
train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
for i in range(1000):
sess.run(train_step, feed_dict={xs:x_data, ys:y_data})
if i % 50 == 0:
print sess.run(loss, feed_dict={xs:x_data, ys:y_data})
# -
# ## 结果可视化
# * matplotlib
# +
# 在ide里面或者是直接python运行文件,可以看到一个动态的图的过程
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.scatter(x_data, y_data)
plt.ion()
plt.show()
# plt.show(block=False)
with tf.Session() as sess:
sess.run(init)
for i in range(1000):
sess.run(train_step, feed_dict={xs: x_data, ys: y_data})
if i % 50 == 0:
# print sess.run(loss, feed_dict={xs:x_data, ys:y_data})
try:
ax.lines.remove(lines[0])
except Exception:
pass
prediction_value = sess.run(prediction, feed_dict={xs: x_data})
lines = ax.plot(x_data, prediction_value, 'r-', lw=5)
plt.pause(0.1)
# -
# * tensorboard
# +
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
def add_layer(inputs, in_size, out_size, activation_function=None):
with tf.name_scope('layer'):
with tf.name_scope('weights'):
Weights = tf.Variable(tf.random_normal([in_size, out_size]), name='W')
with tf.name_scope('biases'):
biases = tf.Variable(tf.zeros([1,out_size])+0.1, name='b')
with tf.name_scope('Wx_plus_b'):
Wx_plus_b = tf.add(tf.matmul(inputs, Weights) , biases)
if activation_function is None:
output = Wx_plus_b
else:
output = activation_function(Wx_plus_b)
return output
# make some real data
x_data = np.linspace(-1,1,300)[:,np.newaxis]
noise = np.random.normal(0, 0.05, x_data.shape)
y_data = np.square(x_data) - 0.5 + noise
# placeholder for input:x,y
with tf.name_scope('inputs'):
xs = tf.placeholder(tf.float32,[None,1], name='x_inpiut')
ys = tf.placeholder(tf.float32,[None,1], name='y_input')
l1 = add_layer(xs, 1, 10, activation_function=tf.nn.relu)
prediction = add_layer(l1, 10, 1, activation_function=None)
with tf.name_scope('lose'):
loss =tf.reduce_mean( tf.reduce_sum(tf.square(ys - prediction), reduction_indices=[1]) )
with tf.name_scope('train'):
train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)
init = tf.global_variables_initializer()
sess = tf.Session()
writer = tf.summary.FileWriter('./', sess.graph)
# Very important step
sess.run(init)
# +
# -*- coding: utf-8 -*-
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
def add_layer(n_layer, inputs, in_size, out_size, activation_function=None):
with tf.name_scope('layer%d' % n_layer):
with tf.name_scope('weights'):
Weights = tf.Variable(tf.random_normal([in_size, out_size]), name='W')
tf.summary.histogram('/weights', Weights)
with tf.name_scope('biases'):
biases = tf.Variable(tf.zeros([1,out_size])+0.1, name='b')
tf.summary.histogram('/biases', biases)
with tf.name_scope('Wx_plus_b'):
Wx_plus_b = tf.add(tf.matmul(inputs, Weights) , biases)
tf.summary.histogram('Wx_plus_b', Wx_plus_b)
if activation_function is None:
output = Wx_plus_b
else:
output = activation_function(Wx_plus_b)
tf.summary.histogram('/output', output)
return output
# make some real data
x_data = np.linspace(-1,1,300)[:,np.newaxis]
noise = np.random.normal(0, 0.05, x_data.shape)
y_data = np.square(x_data) - 0.5 + noise
# placeholder for input:x,y
with tf.name_scope('inputs'):
xs = tf.placeholder(tf.float32,[None,1], name='x_inpiut')
ys = tf.placeholder(tf.float32,[None,1], name='y_input')
# Add hidden layer
l1 = add_layer(1, xs, 1, 10, activation_function=tf.nn.relu)
prediction = add_layer(2,l1, 10, 1, activation_function=None)
# Erro / Loss
with tf.name_scope('lose'):
loss =tf.reduce_mean( tf.reduce_sum(tf.square(ys - prediction), reduction_indices=[1]) )
tf.summary.scalar('loss', loss)
with tf.name_scope('train'):
train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss)
# Session
sess = tf.Session()
# Summary
merged = tf.summary.merge_all()
writer = tf.summary.FileWriter('./logs', sess.graph)
# Very important step, init Variables
sess.run(tf.global_variables_initializer())
# Train
for i in range(1000):
feed_dict = {xs:x_data, ys:y_data}
sess.run(train_step, feed_dict)
if i % 50 == 0:
result = sess.run(merged, feed_dict)
writer.add_summary(result, i)
# -
# * 找到目录,执行 tensorboard --logdir='./logs'
# * 打开浏览器,点击Graph便可看到显示
# # 分类
|
tutorial.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# !pip install deepface
import sys
import numpy as np
import cv2
import os
from datetime import datetime
from deepface import DeepFace
from deepface.commons import functions, realtime
from tqdm import tqdm
import matplotlib.pyplot as plt
from deepface.detectors import FaceDetector
# +
# model = DeepFace.build_model("Facenet")
# -
path = '../db_path/Rishav'
# +
cap = cv2.VideoCapture(0)
faces = 0
frames = 0
max_faces = 50
max_bbox = np.zeros(4)
while faces < max_faces:
ret, frame = cap.read()
frames += 1
dtString = str(datetime.now().microsecond)
if not (os.path.exists(path)):
os.makedirs(path)
# if frames % 3 == 0:
# cv2.imwrite(os.path.join(path, "{}.jpg".format(dtString)), frame)
print(frame.shape)
cv2.imshow("Face detection", frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
# -
realtime.analysis(path)
length = len(os.listdir(path))
images = os.listdir(path)
for i in tqdm(range(0, length)):
image_path = os.path.join(path,images[i])
img = functions.preprocess_face(image_path, target_size= (160, 160), detector_backend= 'mtcnn')
embedding = model.predict(img)[0].tolist()
# img = DeepFace.detectFace(image_path, detector_backend= 'mtcnn')
print(len(embedding))
plt.imshow(img[0][:,:,::-1])
plt.show()
# cv2.imshow("image", img)
break
# ## Get the Face Embeddings for different models
# +
cap = cv2.VideoCapture(0)
faces = 0
frames = 0
max_faces = 10
max_bbox = np.zeros(4)
model = DeepFace.build_model("Facenet")
while faces < max_faces:
ret, frame = cap.read()
frames += 1
dtString = str(datetime.now().microsecond)
# if not (os.path.exists(path)):
# os.makedirs(path)
if frames % 3 == 0:
try:
img = functions.preprocess_face(frame, target_size= (model.input_shape[1], model.input_shape[2]), detector_backend= 'mtcnn', enforce_detection= False)
embedding = model.predict(img)[0].tolist()
# print(len(embedding))
faces += 1
except Exception as e:
print(e)
continue
cv2.imshow("Face detection", frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
# -
model.summary()
t = (1, 160, 160, 3)
s = t[1], t[2], t[3]
(shape[1], shape[2], shape[3])
type(img)
img1 = np.reshape(img, (shape[1], shape[2], shape[3]))
img1.shape
type(img1)
plt.imshow(img1)
embedding = model.predict(img)[0].tolist()
len(embedding)
g = cv2.imread(path + '25181.jpg')
cv2.imshow("adad", g)
print(g)
faces = FaceDetector.detect_faces(face_detector, detector_backend = 'mtcnn',img, align = False)
# ## To detect and extract Face crops
detector_backend = 'mtcnn'
face_detector = FaceDetector.build_model(detector_backend)
cap = cv2.VideoCapture(0)
faces1= 0
frames = 0
max_faces = 50
max_bbox = np.zeros(4)
while faces1 < max_faces:
ret, frame = cap.read()
frames += 1
dtString = str(datetime.now().microsecond)
if not (os.path.exists(path)):
os.makedirs(path)
if frames % 3== 0:
faces = FaceDetector.detect_faces(face_detector, detector_backend, frame, align=False)
print(frame.shape)
print()
for face, (x, y, w, h) in faces:
# plt.imshow(face)
cv2.rectangle(frame, (x,y), (x+w,y+h), (67,67,67), 3)
cv2.imwrite(os.path.join(path, "{}.jpg".format(dtString)), face)
print('Face detected')
faces1 += 1
cv2.imshow("Face detection", frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
# ## Feature age, gender, facial expression, race
from deepface import DeepFace
cap = cv2.VideoCapture(0)
faces = 0
frames = 0
max_faces = 10
max_bbox = np.zeros(4)
model = DeepFace.build_model("Facenet")
while faces < max_faces:
ret, frame = cap.read()
frames += 1
dtString = str(datetime.now().microsecond)
# if not (os.path.exists(path)):
# os.makedirs(path)
if frames % 1 == 0:
try:
img = functions.preprocess_face(frame, target_size= (model.input_shape[1], model.input_shape[2]), detector_backend= 'mtcnn', enforce_detection= False)
obj = DeepFace.analyze(img)
embedding = model.predict(img)[0].tolist()
print(len(embedding))
print(obj)
faces += 1
except Exception as e:
print(e)
continue
cv2.imshow("Face detection", frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
break
cap.release()
cv2.destroyAllWindows()
pwd
# %cd ..
# %cd ..
|
src/.ipynb_checkpoints/Untitled-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import string
import itertools
import numpy as np
import pandas as pd
from collections import *
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.keras.utils import *
from tensorflow.keras.layers import *
from tensorflow.keras.models import *
from tensorflow.keras.callbacks import *
import tensorflow.keras.backend as K
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, confusion_matrix
# -
def plot_confusion_matrix(cm, classes, title='Confusion matrix', cmap=plt.cm.Blues):
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title, fontsize=25)
#plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=90, fontsize=15)
plt.yticks(tick_marks, classes, fontsize=15)
fmt = '.2f'
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black", fontsize = 14)
plt.ylabel('True label', fontsize=20)
plt.xlabel('Predicted label', fontsize=20)
# +
### READ DATA ###
df = pd.read_json('Sarcasm_Headlines_Dataset_v2.json.zip', lines=True)
diz_label = {0:'sarcastic', 1:'not sarcastic'}
print(df.shape)
df.head()
# +
### PLOT LABEL DISTRIBUTION ###
df.is_sarcastic.map(diz_label).value_counts().plot.pie(figsize=(6,6))
# +
### CLEAN TEXT ###
df['headline'] = df.headline.str.lower().str.replace(r'['+string.digits+string.punctuation+']', ' ')
df['headline'] = df.headline.str.split()
# +
### TRAIN TEST SPLIT ###
y = to_categorical(df.is_sarcastic.factorize()[0])
X_train, X_test, y_train, y_test = train_test_split(df['headline'], y, random_state=33, test_size=0.2)
# +
### HOW MANY WORDS WITH LESS THEN 3 OCCURENCIES? ###
c = Counter(' '.join(X_train.str.join(' ')).split())
word = np.asarray(list(c.keys()))
freq = np.asarray(list(c.values()))
print(len(word), len(word[freq > 3]))
# +
### TOKENIZE AND PAD SENTENCES ###
max_words = len(word[freq > 3]); max_len = 100
tokenizer = Tokenizer(num_words=max_words, lower=True)
tokenizer.fit_on_texts(X_train)
sequence_train = tokenizer.texts_to_sequences(X_train)
sequence_train = pad_sequences(sequence_train, maxlen=max_len)
sequence_test = tokenizer.texts_to_sequences(X_test)
sequence_test = pad_sequences(sequence_test, maxlen=max_len)
sequence_train = np.asarray(sequence_train)
sequence_test = np.asarray(sequence_test)
print(sequence_train.shape, sequence_test.shape)
# +
### DEFINE MODEL WITH MULTI-SAMPLE DROPOUT ###
def get_model(num):
inp = Input(shape=(max_len,))
emb = Embedding(len(tokenizer.word_index) + 1, 64)(inp)
x = SpatialDropout1D(0.2)(emb)
x = GRU(128, return_sequences=True, activation='relu')(x)
out = GRU(32, activation='relu')(x)
dense = []
FC = Dense(32, activation='relu')
for p in np.linspace(0.1,0.5, num):
x = Dropout(p)(out)
x = FC(x)
x = Dense(y_train.shape[1], activation='softmax')(x)
dense.append(x)
out = Average()(dense)
model = Model(inp, out)
model.compile(loss='categorical_crossentropy',
optimizer='adam', metrics=['accuracy'])
return model
# -
model = get_model(3)
model.summary()
# +
es = EarlyStopping(monitor='val_loss', mode='auto', restore_best_weights=True, verbose=1, patience=10)
model = get_model(3)
history = model.fit(sequence_train, y_train, batch_size=512, epochs=50,
verbose=2, validation_split=0.1, callbacks=[es])
# +
### GET PREDICTED CLASS ###
pred_test = np.argmax(model.predict(sequence_test), axis=1)
# -
print(classification_report([diz_label[np.argmax(label)] for label in y_test],
[diz_label[label] for label in pred_test]))
# +
cnf_matrix = confusion_matrix([diz_label[np.argmax(label)] for label in y_test],
[diz_label[label] for label in pred_test])
plt.figure(figsize=(7,7))
plot_confusion_matrix(cnf_matrix, classes=list(diz_label.values()))
plt.show()
|
Multi_Sample_Dropout/Multi_Sample_Dropout.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## [Load Library](https://academy.dqlab.id/main/livecode/288/538/2683)
# +
# memuat numpy sebagai np
import numpy as np
# memuat pandas sebagai pd
import pandas as pd
# -
# ## [Load Dataset](https://academy.dqlab.id/main/livecode/288/538/2684)
# memuat data bernama 'dataset_statistics.csv' dan memasukkan hasilnya ke dalam 'raw_data'
raw_data = pd.read_csv("dataset_statistic.csv", sep=';')
# ## [Inspeksi Data](https://academy.dqlab.id/main/livecode/288/538/2685)
# +
print (raw_data)
# melihat 10 data pada baris pertama
print (raw_data.head(10))
# melihat 5 data pada baris terakhir
print (raw_data.tail())
# -
# ## [Metode Shape](https://academy.dqlab.id/main/livecode/288/538/2703)
# +
# melihat dimensi dari raw_data
print (raw_data.shape)
# mengambil jumlah data
print (raw_data.shape[0])
# -
# ## [Melihat Kolom Dalam Dataset](https://academy.dqlab.id/main/livecode/288/538/2704)
print(raw_data)
print(raw_data.columns)
# ## [Metode Isna](https://academy.dqlab.id/main/livecode/288/538/2705)
print (raw_data.isna())
print (raw_data.isna().sum())
# ## [Metode Describe](https://academy.dqlab.id/main/livecode/288/538/2706)
# +
print (raw_data.describe())
# Mencari nilai maksimum dari tiap kolom
raw_data.max()
# Mencari nilai maksimum dari kolom 'Harga'
raw_data['Harga'].max()
# Mencari nilai minimum dari kolom 'Harga'
raw_data['Harga'].min()
# -
# ## [Metode Sum](https://academy.dqlab.id/main/livecode/288/538/2707)
# +
# menghitung jumlah dari semua kolom
print (raw_data.sum())
# menghitung jumlah dari semua kolom bertipe data numerik saja
raw_data.sum(numeric_only=True)
# -
# ## [Manipulasi Dataframe - Memilih Kolom dan Baris](https://academy.dqlab.id/main/livecode/288/538/2708)
# +
# Memilih kolom 'Pendapatan' saja
print (raw_data['Pendapatan'])
# Memilih kolom 'Jenis Kelamin' dan 'Pendapatan'
print (raw_data[['Jenis Kelamin', 'Pendapatan']])
# -
# ## [Metode Loc](https://academy.dqlab.id/main/livecode/288/538/2709)
# +
# mengambil data dari baris ke-0 sampai baris ke-(10-1) atau baris ke-9
print(raw_data[:10])
# mengambil data dari baris ke-3 sampai baris ke-(5-1) atau baris ke-4
print(raw_data[3:5])
# mengambil data pada baris ke-1, ke-3 dan ke-10
print(raw_data.loc[[1,3,10]])
# Mengambil kolom 'Jenis Kelamin' dan 'Pendapatan' dan ambil baris ke-1 sampai ke-9
print(raw_data[['Jenis Kelamin', 'Pendapatan']][1:10])
# Mengambil kolom 'Harga' dan 'Tingkat Kepuasan' dan ambil baris ke-1, ke-10 dan ke-15
print(raw_data[['Harga', 'Tingkat Kepuasan']].loc[[1,10,15]])
# -
# ## [Rata-rata (Mean)](https://academy.dqlab.id/main/livecode/288/539/2687)
# +
# mengambil hanya data untuk produk 'A'
produk_A = raw_data[raw_data['Produk'] == 'A']
# menghitung rerata pendapatan menggunakan method .mean pada objek pandas DataFrame
print (produk_A['Pendapatan'].mean())
# menghitung rerata pendapatan menggunakan method .mean pada objek pandas DataFrame dengan numpy
print (np.mean(produk_A['Pendapatan']))
# -
# ## [Median](https://academy.dqlab.id/main/livecode/288/539/2688)
# +
print (raw_data)
# Hitung median dari pendapatan menggunakan pandas
print (produk_A['Pendapatan'].median())
# Hitung median dari pendapatan menggunakan numpy
print (np.median(produk_A['Pendapatan']))
# -
# ## [Modus](https://academy.dqlab.id/main/livecode/288/539/2689)
# Melihat jumlah dari masing-masing produk
print (raw_data['Produk'].value_counts())
# ## [Kuantil](https://academy.dqlab.id/main/livecode/288/539/2712)
# +
# mencari median atau 50% dari data menggunakan pandas
print (raw_data['Pendapatan'].quantile(q = 0.5))
# mencari median atau 50% dari data menggunakan pandas
print (np.quantile(raw_data['Pendapatan'], q=0.5))
# -
# ## [Agregasi Data dengan method .agg()](https://academy.dqlab.id/main/livecode/288/539/2713)
# +
# menghitung rerata dan median usia (age) dan insulin (insu)
print (raw_data[['Pendapatan', 'Harga']].agg([np.mean, np.median]))
# menghitung rerata dan median Pendapatan dan Harga dari tiap produk
print (raw_data[['Pendapatan', 'Harga', 'Produk']].groupby('Produk').agg([np.mean, np.median]))
# -
# ## [Proporsi Kategori](https://academy.dqlab.id/main/livecode/288/540/2691)
# cari proporsi tiap Produk
print (raw_data['Produk'].value_counts()/raw_data.shape[0])
# ## [Ukuran Sebaran pada Data Interval dan Rasio](https://academy.dqlab.id/main/livecode/288/540/2692)
# Cari nilai rentang dari kolom 'Pendapatan'
print (raw_data['Pendapatan'].max() - raw_data['Pendapatan'].min())
# ## [Variansi](https://academy.dqlab.id/main/livecode/288/540/2693)
# +
# menghitung variansi umur menggunakan method .var() dari pandas
print (raw_data['Pendapatan'].var())
# menghitung variansi umur menggunakan method .var() dari numpy
print (np.var(raw_data['Pendapatan']))
# mengatur variansi populasi dengan method `.var()` dari pandas
print (raw_data['Pendapatan'].var(ddof=0))
# -
# ## [Deviasi Baku (Standard Deviation)](https://academy.dqlab.id/main/livecode/288/540/2694)
# +
# menghitung deviasi baku sampel pendapatan menggunakan method std() dari pandas
print (raw_data['Pendapatan'].std())
# menghitung deviasi baku sampel pendapatan menggunakan method std() dari numpy
print (np.std(raw_data['Pendapatan'], ddof = 1))
# -
# ## [Menghitung Korelasi](https://academy.dqlab.id/main/livecode/288/541/2720)
# +
# menghitung korelasi dari setiap pasang variabel pada raw_data
print (raw_data.corr())
# mencari korelasi 'kendall' untuk tiap pasang variabel
print (raw_data.corr(method='kendall'))
# mencari korelasi 'spearman' untuk tiap pasang variabel
print (raw_data.corr(method='spearman'))
|
Learn/Python/Fundamental/Statistic using Python for Data Science/Statistic using Python for Data Science.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "-"}
# 
#
#
# # __Titanic Survivor Analysis__
#
# #### Author: Several publc Kaggle Kernels
# #### Edits: <NAME>, <NAME>, and <NAME>
#
# **Sources:**
# * **Training + explanations**: https://www.kaggle.com/c/titanic
#
# ___
# ___
#
# -
#
# # Understanding the connections between passanger information and survival rate
#
# The sinking of the RMS Titanic is one of the most infamous shipwrecks in history. On April 15, 1912, during her maiden voyage, the Titanic sank after colliding with an iceberg, killing 1502 out of 2224 passengers and crew. This sensational tragedy shocked the international community and led to better safety regulations for ships.
#
# One of the reasons that the shipwreck led to such loss of life was that there were not enough lifeboats for the passengers and crew. Although there was some element of luck involved in surviving the sinking, some groups of people were more likely to survive than others, such as women, children, and the upper-class.
#
# <br>
# The data conists of passanger information for the maiden, and tragic, voyage of the Titanic ocean liner. The set is comprised of 1309 unique entries, each associated with information realated to a passanger. The set is broken is broken down to 891/328 (testing/training) split for the purposes of modeling
#
# **Our task is to train a machine learning model on the training set in order to predict if the passengers in the test set survived or not.**
#
# <br>
#
# ### Table of Contents
#
# > #### [Part 0: Install Additional Required Software](#Part-0:-Install-Additional-Required-Software)
#
# > #### [Part 1: Import Packages + Libraries + Dependencies](#Part-1:-Import-Packages-+-Libraries-+-Dependencies)
#
# > #### [Part 2: Preprocessing and Exploratory Data Anlysis](#Part-2:-Preprocessing-and-Exploratory-Data-Anlysis)
#
# > #### [Part 3: Establishing a Hypothesis](#Part-3:-Establishing-a-Hypothesis)
#
# > #### [Part 4: Machine Learning!](#Part-4:-Machine-Learning!)
#
# ### Additional Material
#
# > #### [Appendix I: Why are our models maxing out at around 80%?](#Appendix-I:-Why-are-our-models-maxing-out-at-around-80%?)
#
# > #### [Appendix II: Resources and references to material we won't cover in detail](#Appendix-II:-Resources-and-references-to-material-we-won't-cover-in-detail)
#
# <br>
#
# ___
# ___
# !conda install py-xgboost --y
# <a id='sec1'></a>
#
# ## Part 0: Install Additional Required Software
#
# __Note:__To follow along with notebook, you will need to install the __xgboost__ package in your pyhton enviroment:
#
# try (in terminal):
# ```
# $ conda install py-xgboost
# ```
#
# or
#
# try (directly in notebook)
# ```
# # !conda install py-xgboost --y
# ```
# <a id='sec2'></a>
# ___
# ## Part 1: Import Packages + Libraries + Dependencies
# ### Import packages
# +
# No warnings
import warnings
warnings.filterwarnings('ignore') # Filter out warnings
# data analysis and wrangling
import pandas as pd
import numpy as np
# visualization
import matplotlib.pyplot as plt
# %matplotlib inline
import seaborn as sns
# machine learning
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC, LinearSVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB # Gaussian Naive Bays
from sklearn.linear_model import Perceptron
from sklearn.tree import DecisionTreeClassifier
import xgboost as xgb
# -
# ### Styling and fancy distribution plot
# +
# Plot styling and display options
sns.set(style='white', context='notebook', palette='deep')
plt.rcParams[ 'figure.figsize' ] = 10 , 6
pd.set_option('display.max_columns', 100) # Print 100 Pandas columns
# Special distribution plot (will be used later)
def plot_distribution( df , var , target , **kwargs ):
row = kwargs.get( 'row' , None )
col = kwargs.get( 'col' , None )
facet = sns.FacetGrid( df , hue=target , aspect=4 , row = row , col = col )
facet.map( sns.kdeplot , var , shade= True )
facet.set( xlim=( 0 , df[ var ].max() ) )
facet.add_legend()
plt.tight_layout()
# -
# ### Load Data
# !head data/train.csv
# +
train_df = pd.read_csv('data/train.csv')
test_df = pd.read_csv('data/test.csv')
combine = [train_df, test_df]
# combine is used to ensure whatever preprocessing is done on training data
# is also done on test data
# NOTE! When we change train_df or test_df the objects in combine
# will also change (combine is only a pointer to the objects)
# -
# <a id='sec3'></a>
# ___
# ## Part 2: Exploring and Preprocessing the Data
# **Data descriptions**
#
# <img src="data/Titanic_Variable.png">
# +
# Features/Variable names
train_df.columns
# +
# preview the data
train_df.head(5)
# +
# General data statistics
train_df.describe()
# +
# Data Frame information (null, data type etc)
train_df.info()
# -
# ### Histograms
train_df.hist(figsize=(13,10))
plt.show()
# ### Insights into the data
train_df[['Pclass', 'Survived']].groupby(['Pclass']).mean()
# # Balanced data set?
# +
# Balanced data set?
target_count = train_df['Survived'].value_counts()
target_count
# -
# __Note:__ If the goal is Prediction, unbalanced data introduce bias into model.
#
# Balanced data are good for classification, but you loose information such as appearance frequencies -- which may affect accuracy metrics themselves, as well as production performance.
# +
# What is base line for prediction accuracy?
target_count[0]/(sum(target_count))
# -
# ___
#
# > #### __Brief Remarks Regarding the Data__
#
# > * `PassengerId` is a random number (incrementing index) and thus does not contain any valuable information.
#
# > * `Survived, Passenger Class, Age, Siblings Spouses, Parents Children` and `Fare` are numerical values (no need to transform them) -- but, we might want to group them (i.e. create categorical variables).
#
# > * `Sex, Embarked` are categorical features that we need to map to integer values. `Name, Ticket` and `Cabin` might also contain valuable information.
#
# ___
# ### Droping Redundant Data
# __Note:__ It is important to remove variables that convey information already captured by some other variable. Doing so removes the correlation, while also diminishing potential overfit.
# +
# Check dimensions of the train and test datasets
print("Shapes Before: (train) (test) = ", \
train_df.shape, test_df.shape)
# +
# Drop columns 'Ticket', 'Cabin', need to do it for both test and training
train_df = train_df.drop(['PassengerId','Ticket', 'Cabin'], axis=1)
test_df = test_df.drop(['Ticket', 'Cabin'], axis=1)
combine = [train_df, test_df]
print("Shapes After: (train) (test) =", train_df.shape, test_df.shape)
# +
# Check if there are null values in the datasets
print(train_df.isnull().sum())
print()
print(test_df.isnull().sum())
# -
# <a id='sec4'></a>
# ____
# ## Part 3: Establishing Hypotheses
#
# ### _The Title of the person is a feature that can predict survival_
# +
# List example titles in Name column
train_df.Name[:5]
# +
# from the Name column we will extract title of each passenger
# and save that in a column in the dataset called 'Title'
# if you want to match Titles or Names with any other expression
# refer to this tutorial on regex in python:
# https://www.tutorialspoint.com/python/python_reg_expressions.htm
# Create column called Title
for dataset in combine:
dataset['Title'] = dataset['Name'].str.extract(' ([A-Za-z]+)\.',\
expand=False)
# +
# Double check that our titles makes sense (by comparing to sex)
pd.crosstab(train_df['Title'], train_df['Sex'])
# -
# >___
# > #### __Jonkheer?__
#
# > Most popular during medieval times, the title of _Jonkheer_ was given to a young and unmarried child of a high-ranking knight or nobleman -- __Considered the lowest rank of nobility.__
#
# >___
# same but for test set
pd.crosstab(test_df['Title'], test_df['Sex'])
# +
# We see common titles like Miss, Mrs, Mr, Master are dominant, we will
# correct some Titles to standard forms and replace the rarest titles
# with single name 'Rare'
for dataset in combine:
dataset['Title'] = dataset['Title'].\
replace(['Lady', 'Countess','Capt', 'Col', 'Don', 'Dr',\
'Major', 'Rev', 'Sir', 'Jonkheer', 'Dona'], 'Rare')
dataset['Title'] = dataset['Title'].replace('Mlle', 'Miss') #Mademoiselle
dataset['Title'] = dataset['Title'].replace('Ms', 'Miss')
dataset['Title'] = dataset['Title'].replace('Mme', 'Mrs') #Madame
# +
# We now have more logical (contemporary) titles, and fewer groups
train_df[['Title', 'Survived']].groupby(['Title']).mean()
# +
# We can plot the survival chance for each title
sns.countplot(x='Survived', hue="Title", data=train_df, order=[1,0])
plt.xticks(range(2),['Made it','Deceased']);
# +
# Title dummy mapping: Map titles to binary dummy columns
for dataset in combine:
binary_encoded = pd.get_dummies(dataset.Title)
newcols = binary_encoded.columns
dataset[newcols] = binary_encoded
train_df.head()
# +
# Remove unique variables for analysis (Title is generally bound to Name, so it's also dropped)
train_df = train_df.drop(['Name', 'Title'], axis=1)
test_df = test_df.drop(['Name', 'Title'], axis=1)
combine = [train_df, test_df]
# +
# sanity check
train_df.head()
# -
# ### Map Sex column to binary (male = 0, female = 1) categories
# +
# convert categorical variable to numeric
for dataset in combine:
dataset['Sex'] = dataset['Sex']. \
map( {'female': 1, 'male': 0} ).astype(int)
train_df.head()
# -
# ### Handle missing values for age
# We will now guess values of age based on sex (male / female)
# and socioeconomic class (1st, 2nd, 3rd) of the passenger.
#
# The row indicates the sex, male = 0, female = 1
#
# > __IDEA:__ Wealth (indicated by class accomodation), as well as Gender, historically are indicative of age.
#
# > This approach gives us a more refined estimate than only taking the median / mean, etc.
# +
# create empty array for later use
guess_ages = np.zeros((2,3),dtype=int)
guess_ages
# +
# Fill the NA's for the Age columns
# with "qualified guesses"
for idx,dataset in enumerate(combine):
# method adds a counter to an iterable and returns it in a form of enumerate object.
if idx==0:
print('Working on Training Data set\n')
else:
print('-'*35)
print('Working on Test Data set\n')
print('Guess values of age based on sex and pclass of the passenger...')
for i in range(0, 2):
for j in range(0,3):
guess_df = dataset[(dataset['Sex'] == i) \
& (dataset['Pclass'] == j+1)]['Age'].dropna()
# Extract the median age for this group
# (less sensitive) to outliers
age_guess = guess_df.median()
# Convert random age float to int
guess_ages[i,j] = int(age_guess)
print('Guess_Age table:\n',guess_ages)
print ('\nAssigning age values to NAN age values in the dataset...')
for i in range(0, 2):
for j in range(0, 3):
dataset.loc[ (dataset.Age.isnull()) & (dataset.Sex == i) \
& (dataset.Pclass == j+1),'Age'] = guess_ages[i,j]
dataset['Age'] = dataset['Age'].astype(int)
print()
print('Done! \n\n\n')
train_df.head()
# -
# ### Split age into bands / categorical ranges and look at survival rates
# Age bands
train_df['AgeBand'] = pd.cut(train_df['Age'], 5)
train_df[['AgeBand', 'Survived']].groupby(['AgeBand'], as_index=False)\
.mean().sort_values(by='AgeBand', ascending=True)
# ### Distribution of suvival relative to age
# +
# Plot distributions of Age of passangers who survived
# or did not survive
plot_distribution( train_df , var = 'Age' , target = 'Survived' ,\
row = 'Sex' )
# Recall: {'male': 0, 'female': 1}
# +
# Change Age column to
# map Age ranges (AgeBands) to integer values of categorical type
for dataset in combine:
dataset.loc[ dataset['Age'] <= 16, 'Age'] = 0
dataset.loc[(dataset['Age'] > 16) & (dataset['Age'] <= 32), 'Age'] = 1
dataset.loc[(dataset['Age'] > 32) & (dataset['Age'] <= 48), 'Age'] = 2
dataset.loc[(dataset['Age'] > 48) & (dataset['Age'] <= 64), 'Age'] = 3
dataset.loc[ dataset['Age'] > 64, 'Age']=4
train_df.head()
# Note we could just run
# dataset['Age'] = pd.cut(dataset['Age'], 5,labels=[0,1,2,3,4])
# +
# remove AgeBand column
train_df = train_df.drop(['AgeBand'], axis=1)
combine = [train_df, test_df]
train_df.head()
# -
# ### Travel Party Size
#
# How did the number of people the person traveled with impact the chance of survival?
# +
# SibSp = Number of Sibling / Spouses
# Parch = Parents / Children
for dataset in combine:
dataset['FamilySize'] = dataset['SibSp'] + dataset['Parch'] + 1
# Survival chance against FamilySize
train_df[['FamilySize', 'Survived']].groupby(['FamilySize'], as_index=True) \
.mean().sort_values(by='Survived', ascending=False)
# +
# Plot it, 1 is survived
sns.countplot(x='Survived', hue="FamilySize", data=train_df, order=[1,0]);
# -
# +
# Create binary variable if the person was alone or not
for dataset in combine:
dataset['IsAlone'] = 0
dataset.loc[dataset['FamilySize'] == 1, 'IsAlone'] = 1
train_df[['IsAlone', 'Survived']].groupby(['IsAlone'], as_index=True).mean()
# +
# We will only use the binary IsAlone feature for further analysis
for df in combine:
df.drop(['Parch', 'SibSp', 'FamilySize'], axis=1, inplace=True)
train_df.head()
# -
# # Feature construction
# +
# We can also create new features based on intuitive combinations
# Here is an example when we say that the age times socioclass is a determinant factor
for dataset in combine:
dataset['Age*Class'] = dataset.Age * dataset.Pclass
train_df.loc[:, ['Age*Class', 'Age', 'Pclass']].head()
# -
train_df[['Age*Class', 'Survived']].groupby(['Age*Class'], as_index=True).mean()
# ### Port the person embarked from
# Let's see how that influences chance of survival
# <img src= "data/images/titanic_voyage_map.png">
# >___
#
# > #### __Interesting Fact:__
#
# > Third Class passengers were the first to board, with First and Second Class passengers following up to an hour before departure.
#
# > Third Class passengers were inspected for ailments and physical impairments that might lead to their being refused entry to the United States, while First Class passengers were personally greeted by Captain Smith.
# +
# To replace Nan value in 'Embarked', we will use the mode
# in 'Embaraked'. This will give us the most frequent port
# the passengers embarked from
freq_port = train_df['Embarked'].dropna().mode()[0]
print('Most frequent port of Embarkation:',freq_port)
# +
# Fill NaN 'Embarked' Values in the datasets
for dataset in combine:
dataset['Embarked'] = dataset['Embarked'].fillna(freq_port)
train_df[['Embarked', 'Survived']].groupby(['Embarked'], as_index=True) \
.mean().sort_values(by='Survived', ascending=False)
# -
# ### Survival relative to port of origin
# +
# Plot of relationship between survival and origin
sns.countplot(x='Survived', hue="Embarked", data=train_df, order=[1,0])
plt.xticks(range(2),['Made it!', 'Deceased']);
# +
# Create categorical dummy variables for Embarked values
for dataset in combine:
binary_encoded = pd.get_dummies(dataset.Embarked)
newcols = binary_encoded.columns
dataset[newcols] = binary_encoded
train_df.head()
# +
# Drop Embarked
for dataset in combine:
dataset.drop('Embarked', axis=1, inplace=True)
# -
# ### Fare Amount
# What is the relationship between _Fare_ and survival
# +
# Fill the NA values in the Fares column with the median
test_df['Fare'].fillna(test_df['Fare'].dropna().median(), inplace=True)
test_df.head()
# +
# q cut will find ranges equal to the quartile of the data
train_df['FareBand'] = pd.qcut(train_df['Fare'], 4)
train_df[['FareBand', 'Survived']].groupby(['FareBand'], as_index=False).mean().sort_values(by='FareBand', ascending=True)
# +
for dataset in combine:
dataset['Fare']=pd.qcut(train_df['Fare'],4,labels=np.arange(4))
dataset['Fare'] = dataset['Fare'].astype(int)
train_df[['Fare','FareBand']].head()
# +
# Drop FareBand
train_df = train_df.drop(['FareBand'], axis=1)
combine = [train_df, test_df]
# -
# ### Finished -- Preprocessing Complete!
# +
# All features are approximately on the same scale
# no need for feature engineering / normalization
train_df.head(7)
# -
test_df.head(7)
# ### Sanity Check: View the correlation between features in our processed dataset
# +
# Uncorrelated features are generally more powerful predictors
colormap = plt.cm.viridis
plt.figure(figsize=(12,12))
plt.title('Pearson Correlation of Features', y=1.05, size=15)
sns.heatmap(train_df.corr().round(2)\
,linewidths=0.1,vmax=1.0, square=True, cmap=colormap, \
linecolor='white', annot=True);
# -
# <a id='sec5'></a>
# ___
# ## Part 4: Machine Learning!
# Now we will Model, Predict, and Choose from algorithms for classification.
# We will try using different classifiers to model and predict.
#
# We ultimately will choose the best model from:
# 1. Logistic Regression
# 2. K-Nearest Neighbors (KNN)
# 3. Support Vector Machines (SVM)
# 4. Perceptron
# 5. XGBoost
# 6. Random Forest
# 7. Dense Neural Network
# ### Setup Training and Validation Sets
# +
X = train_df.drop("Survived", axis=1) # Training & Validation data
Y = train_df["Survived"] # Response / Target Variable
X_submission = test_df.drop("PassengerId", axis=1).copy()
print(X.shape, Y.shape)
# +
# Split training set so that we validate on 20% of the data
# Note that our algorithms will never have seen the validation
# data during training. This is to evaluate how good our estimators are.
np.random.seed(1337) # set random seed for reproducibility
from sklearn.model_selection import train_test_split
X_train, X_val, Y_train, Y_val = train_test_split(X, Y, test_size=0.2)
print(X_train.shape, Y_train.shape)
print(X_val.shape, Y_val.shape)
# -
# ___
# > ### Scikit-Learn general ML workflow
# > 1. __Instantiate__ model object
# > 2. __Fit__ model to training data
# > 3. __Predict & Evaluate__ predict output for data not used during training and compare predicitons against true output values to form an accuracy measure.
#
# ___
# ### Comparing Modeling Approaches (Algorithms)
# #### 1. Logistic Regression
# +
logreg = LogisticRegression() # instantiate
logreg.fit(X_train, Y_train) # fit
Y_pred = logreg.predict(X_val) # predict
acc_logreg = sum(Y_pred == Y_val)/len(Y_val)*100 # evaluate
print('Logistic Regression labeling accuracy:', str(round(acc_logreg,2)),'%')
# +
# we could also use scikit learn's method score
# that predicts and then compares to validation set labels
acc_log_2 = logreg.score(X_val, Y_val) # evaluate
print('Logistic Regression using built-in method:', str(round(acc_log_2*100,2)),'%')
# -
# #### 2. KNN
# +
knn = KNeighborsClassifier(n_neighbors = 3) # instantiate
knn.fit(X_train, Y_train) # fit
acc_knn = knn.score(X_val, Y_val) # predict + evaluate
print('K-Nearest Neighbors labeling accuracy:', str(round(acc_knn*100,2)),'%')
# -
# #### 3. SVM
# +
# Support Vector Machines Classifier (non-linear kernel)
svc = SVC() # instantiate
svc.fit(X_train, Y_train) # fit
acc_svc = svc.score(X_val, Y_val) # predict + evaluate
print('Support Vector Machines labeling accuracy:', str(round(acc_svc*100,2)),'%')
# -
# #### 4. Perceptron
# +
perceptron = Perceptron() # instantiate
perceptron.fit(X_train, Y_train) # fit
acc_perceptron = perceptron.score(X_val, Y_val) # predict + evalaute
print('Perceptron labeling accuracy:', str(round(acc_perceptron*100,2)),'%')
# -
# #### 5. XGBoost
# +
# XGBoost, same API as scikit-learn
gradboost = xgb.XGBClassifier(n_estimators=1000) # instantiate
gradboost.fit(X_train, Y_train) # fit
acc_xgboost = gradboost.score(X_val, Y_val) # predict + evalute
print('XGBoost labeling accuracy:', str(round(acc_xgboost*100,2)),'%')
# -
# #### 6. Random Forest
# +
# Random Forest
random_forest = RandomForestClassifier(n_estimators=500) # instantiate
random_forest.fit(X_train, Y_train) # fit
acc_rf = random_forest.score(X_val, Y_val) # predict + evaluate
print('K-Nearest Neighbors labeling accuracy:', str(round(acc_rf*100,2)),'%')
# -
Y_submission = gradboost.predict(X_submission)
submission = pd.DataFrame({
"PassengerId": test_df["PassengerId"],
"Survived": Y_submission
})
submission.to_csv('titanic.csv', index=False)
# ## Dense Neural Network
# +
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
# +
# instantiate
model = Sequential()
model.add( Dense(units=300, activation='relu', input_shape=(14,) ))
model.add( Dense(units=100, activation='relu'))
model.add( Dense(units=50, activation='relu'))
model.add( Dense(units=1, activation='sigmoid') )
# +
# fit
model.compile(loss = 'binary_crossentropy', optimizer = 'adam',\
metrics = ['accuracy'])
model.fit(X_train.values, Y_train.values, epochs = 50, batch_size= 50)
# -
# Evaluate the model Accuracy on test set
print('Neural Network accuracy:',str(round(model.evaluate(X_val.values, \
Y_val.values, batch_size=50,verbose=False)[1]*100,2)),'%')
# ### Importance scores in the random forest model
# +
# Look at importnace of features for random forest
def plot_model_var_imp( model , X , y ):
imp = pd.DataFrame(
model.feature_importances_ ,
columns = [ 'Importance' ] ,
index = X.columns
)
imp = imp.sort_values( [ 'Importance' ] , ascending = True )
imp[ : 10 ].plot( kind = 'barh' )
print ('Training accuracy Random Forest:',model.score( X , y ))
plot_model_var_imp(random_forest, X_train, Y_train)
# -
# # Compete on Kaggle!
# How to create a Kaggle submission with a Random Forest Classifier
Y_submission = svc.predict(X_submission)
submission = pd.DataFrame({
"PassengerId": test_df["PassengerId"],
"Survived": Y_submission
})
submission.to_csv('titanic.csv', index=False)
# <a id='sec6'></a>
# ___
# ___
#
# ## Appendix I: Why are our models maxing out at around 80%?
#
# #### __<NAME>__
#
# <img src= "data/images/john-jacob-astor.jpg">
#
# <NAME> perished in the disaster even though our model predicted he would survive. Astor was the wealthiest person on the Titanic -- his ticket fare was valued at over 35,000 USD in 2016 -- it seems likely that he would have been among of the approximatelly 35 percent of men in first class to survive. However, this was not the case: although his pregnant wife survived, <NAME>’s body was recovered a week later, along with a gold watch, a diamond ring with three stones, and no less than 92,481 USD (2016 value) in cash.
#
# <br >
#
#
# #### __<NAME>__
#
# <img src= "data/images/olaus-jorgensen-abelseth.jpg">
#
# Avelseth was a 25-year-old Norwegian sailor, a man in 3rd class, and not expected to survive by classifier. However, once the ship sank, he survived by swimming for 20 minutes in the frigid North Atlantic water before joining other survivors on a waterlogged collapsible boat.
#
# Abelseth got married three years later, settled down as a farmer in North Dakota, had 4 kids, and died in 1980 at the age of 94.
#
# <br >
#
# ### __Key Takeaway__
#
# As engineers and busines professionals, we are trained to as ourselves, what could we do to improve on an 80 percent average. As it is often the case, it’s easy to forget that these data points represent real people. Each time our model was wrong we should be glad -- in such misclasifications we will likely find incredible stories of human nature and courage triumphing over extremely difficult odds.
#
# __It is important to never lose sight of the human element when analyzing data that deals with people.__
#
# In the case of this dataset, the moment we are disappointed that our accuracy was not higher, we are disappointed that more poeple did not die.
# <a id='sec7'></a>
# ___
# ## Appendix II: Resources and references to material we won't cover in detail
# > * **Gradient Boosting:** http://blog.kaggle.com/2017/01/23/a-kaggle-master-explains-gradient-boosting/
#
# > * **Jupyter Notebook (tutorial):** https://www.datacamp.com/community/tutorials/tutorial-jupyter-notebook
#
# > * **K-Nearest Neighbors (KNN):** https://towardsdatascience.com/introduction-to-k-nearest-neighbors-3b534bb11d26
#
# > * **Logistic Regression:** https://towardsdatascience.com/5-reasons-logistic-regression-should-be-the-first-thing-you-learn-when-become-a-data-scientist-fcaae46605c4
#
# > * **Naive Bayes:** http://scikit-learn.org/stable/modules/naive_bayes.html
#
# > * **Perceptron:** http://aass.oru.se/~lilien/ml/seminars/2007_02_01b-Janecek-Perceptron.pdf
#
# > * **Random Forest:** https://medium.com/@williamkoehrsen/random-forest-simple-explanation-377895a60d2d
#
# > * **Support Vector Machines (SVM):** https://towardsdatascience.com/https-medium-com-pupalerushikesh-svm-f4b42800e989
#
#
# <br>
# ___
# ___
# 
|
05a-tools-titanic/titanic.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python
# language: python
# name: conda-env-python-py
# ---
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false}
# <a href="https://www.bigdatauniversity.com"><img src="https://ibm.box.com/shared/static/cw2c7r3o20w9zn8gkecaeyjhgw3xdgbj.png" width="400" align="center"></a>
#
# <h1><center>Polynomial Regression</center></h1>
#
# <h4>About this Notebook</h4>
# In this notebook, we learn how to use scikit-learn for Polynomial regression. We download a dataset that is related to fuel consumption and Carbon dioxide emission of cars. Then, we split our data into training and test sets, create a model using training set, evaluate our model using test set, and finally use model to predict unknown value.
# -
# <h1>Table of contents</h1>
#
# <div class="alert alert-block alert-info" style="margin-top: 20px">
# <ol>
# <li><a href="#download_data">Downloading Data</a></li>
# <li><a href="#polynomial_regression">Polynomial regression</a></li>
# <li><a href="#evaluation">Evaluation</a></li>
# <li><a href="#practice">Practice</a></li>
# </ol>
# </div>
# <br>
# <hr>
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false}
# ### Importing Needed packages
# + button=false deletable=true new_sheet=false run_control={"read_only": false}
import matplotlib.pyplot as plt
import pandas as pd
import pylab as pl
import numpy as np
# %matplotlib inline
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false}
# <h2 id="download_data">Downloading Data</h2>
# To download the data, we will use !wget to download it from IBM Object Storage.
# + button=false deletable=true new_sheet=false run_control={"read_only": false}
# !wget -O FuelConsumption.csv https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/ML0101ENv3/labs/FuelConsumptionCo2.csv
# -
# __Did you know?__ When it comes to Machine Learning, you will likely be working with large datasets. As a business, where can you host your data? IBM is offering a unique opportunity for businesses, with 10 Tb of IBM Cloud Object Storage: [Sign up now for free](http://cocl.us/ML0101EN-IBM-Offer-CC)
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false}
#
# ## Understanding the Data
#
# ### `FuelConsumption.csv`:
# We have downloaded a fuel consumption dataset, **`FuelConsumption.csv`**, which contains model-specific fuel consumption ratings and estimated carbon dioxide emissions for new light-duty vehicles for retail sale in Canada. [Dataset source](http://open.canada.ca/data/en/dataset/98f1a129-f628-4ce4-b24d-6f16bf24dd64)
#
# - **MODELYEAR** e.g. 2014
# - **MAKE** e.g. Acura
# - **MODEL** e.g. ILX
# - **VEHICLE CLASS** e.g. SUV
# - **ENGINE SIZE** e.g. 4.7
# - **CYLINDERS** e.g 6
# - **TRANSMISSION** e.g. A6
# - **FUEL CONSUMPTION in CITY(L/100 km)** e.g. 9.9
# - **FUEL CONSUMPTION in HWY (L/100 km)** e.g. 8.9
# - **FUEL CONSUMPTION COMB (L/100 km)** e.g. 9.2
# - **CO2 EMISSIONS (g/km)** e.g. 182 --> low --> 0
#
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false}
# ## Reading the data in
# + button=false deletable=true new_sheet=false run_control={"read_only": false}
df = pd.read_csv("FuelConsumption.csv")
# take a look at the dataset
df.head()
# -
# Lets select some features that we want to use for regression.
# + button=false deletable=true new_sheet=false run_control={"read_only": false}
cdf = df[['ENGINESIZE','CYLINDERS','FUELCONSUMPTION_COMB','CO2EMISSIONS']]
cdf.head(9)
# -
# Lets plot Emission values with respect to Engine size:
# + button=false deletable=true new_sheet=false run_control={"read_only": false}
plt.scatter(cdf.ENGINESIZE, cdf.CO2EMISSIONS, color='blue')
plt.xlabel("Engine size")
plt.ylabel("Emission")
plt.show()
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false}
# #### Creating train and test dataset
# Train/Test Split involves splitting the dataset into training and testing sets respectively, which are mutually exclusive. After which, you train with the training set and test with the testing set.
# + button=false deletable=true new_sheet=false run_control={"read_only": false}
msk = np.random.rand(len(df)) < 0.8
train = cdf[msk]
test = cdf[~msk]
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false}
# <h2 id="polynomial_regression">Polynomial regression</h2>
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false}
# Sometimes, the trend of data is not really linear, and looks curvy. In this case we can use Polynomial regression methods. In fact, many different regressions exist that can be used to fit whatever the dataset looks like, such as quadratic, cubic, and so on, and it can go on and on to infinite degrees.
#
# In essence, we can call all of these, polynomial regression, where the relationship between the independent variable x and the dependent variable y is modeled as an nth degree polynomial in x. Lets say you want to have a polynomial regression (let's make 2 degree polynomial):
#
#
# $y = b + \theta_1 x + \theta_2 x^2$
#
# Now, the question is: how we can fit our data on this equation while we have only x values, such as __Engine Size__?
# Well, we can create a few additional features: 1, $x$, and $x^2$.
#
#
#
# __PloynomialFeatures()__ function in Scikit-learn library, drives a new feature sets from the original feature set. That is, a matrix will be generated consisting of all polynomial combinations of the features with degree less than or equal to the specified degree. For example, lets say the original feature set has only one feature, _ENGINESIZE_. Now, if we select the degree of the polynomial to be 2, then it generates 3 features, degree=0, degree=1 and degree=2:
# + button=false deletable=true new_sheet=false run_control={"read_only": false}
from sklearn.preprocessing import PolynomialFeatures
from sklearn import linear_model
train_x = np.asanyarray(train[['ENGINESIZE']])
train_y = np.asanyarray(train[['CO2EMISSIONS']])
test_x = np.asanyarray(test[['ENGINESIZE']])
test_y = np.asanyarray(test[['CO2EMISSIONS']])
poly = PolynomialFeatures(degree=2)
train_x_poly = poly.fit_transform(train_x)
train_x_poly
# -
# **fit_transform** takes our x values, and output a list of our data raised from power of 0 to power of 2 (since we set the degree of our polynomial to 2).
#
# $
# \begin{bmatrix}
# v_1\\
# v_2\\
# \vdots\\
# v_n
# \end{bmatrix}
# $
# $\longrightarrow$
# $
# \begin{bmatrix}
# [ 1 & v_1 & v_1^2]\\
# [ 1 & v_2 & v_2^2]\\
# \vdots & \vdots & \vdots\\
# [ 1 & v_n & v_n^2]
# \end{bmatrix}
# $
#
# in our example
#
# $
# \begin{bmatrix}
# 2.\\
# 2.4\\
# 1.5\\
# \vdots
# \end{bmatrix}
# $
# $\longrightarrow$
# $
# \begin{bmatrix}
# [ 1 & 2. & 4.]\\
# [ 1 & 2.4 & 5.76]\\
# [ 1 & 1.5 & 2.25]\\
# \vdots & \vdots & \vdots\\
# \end{bmatrix}
# $
# It looks like feature sets for multiple linear regression analysis, right? Yes. It Does.
# Indeed, Polynomial regression is a special case of linear regression, with the main idea of how do you select your features. Just consider replacing the $x$ with $x_1$, $x_1^2$ with $x_2$, and so on. Then the degree 2 equation would be turn into:
#
# $y = b + \theta_1 x_1 + \theta_2 x_2$
#
# Now, we can deal with it as 'linear regression' problem. Therefore, this polynomial regression is considered to be a special case of traditional multiple linear regression. So, you can use the same mechanism as linear regression to solve such a problems.
#
#
#
# so we can use __LinearRegression()__ function to solve it:
# + button=false deletable=true new_sheet=false run_control={"read_only": false}
clf = linear_model.LinearRegression()
train_y_ = clf.fit(train_x_poly, train_y)
# The coefficients
print ('Coefficients: ', clf.coef_)
print ('Intercept: ',clf.intercept_)
# -
# As mentioned before, __Coefficient__ and __Intercept__ , are the parameters of the fit curvy line.
# Given that it is a typical multiple linear regression, with 3 parameters, and knowing that the parameters are the intercept and coefficients of hyperplane, sklearn has estimated them from our new set of feature sets. Lets plot it:
plt.scatter(train.ENGINESIZE, train.CO2EMISSIONS, color='blue')
XX = np.arange(0.0, 10.0, 0.1)
yy = clf.intercept_[0]+ clf.coef_[0][1]*XX+ clf.coef_[0][2]*np.power(XX, 2)
plt.plot(XX, yy, '-r' )
plt.xlabel("Engine size")
plt.ylabel("Emission")
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false}
# <h2 id="evaluation">Evaluation</h2>
# +
from sklearn.metrics import r2_score
test_x_poly = poly.fit_transform(test_x)
test_y_ = clf.predict(test_x_poly)
print("Mean absolute error: %.2f" % np.mean(np.absolute(test_y_ - test_y)))
print("Residual sum of squares (MSE): %.2f" % np.mean((test_y_ - test_y) ** 2))
print("R2-score: %.2f" % r2_score(test_y_ , test_y) )
print(test_y_[0],test_y[0])
# -
# <h2 id="practice">Practice</h2>
# Try to use a polynomial regression with the dataset but this time with degree three (cubic). Does it result in better accuracy?
# +
# write your code here
poly_ = PolynomialFeatures(degree=3)
train_x_poly_ = poly_.fit_transform(train_x)
train_x_poly_
res = linear_model.LinearRegression()
train_yy = res.fit(train_x_poly_, train_y)
# The coefficients
print ('Coefficients: ', res.coef_)
print ('Intercept: ',res.intercept_)
plt.scatter(train.ENGINESIZE, train.CO2EMISSIONS, color='blue')
XX = np.arange(0.0, 10.0, 0.1)
yy = res.intercept_[0]+ res.coef_[0][1]*XX+ res.coef_[0][2]*np.power(XX, 2)
plt.plot(XX, yy, '-r' )
plt.xlabel("Engine size")
plt.ylabel("Emission")
test_x_poly_ = poly_.fit_transform(test_x)
test_yy = res.predict(test_x_poly_)
print("Mean absolute error: %.2f" % np.mean(np.absolute(test_yy - test_y)))
print("Residual sum of squares (MSE): %.2f" % np.mean((test_yy - test_y) ** 2))
print("R2-score: %.2f" % r2_score(test_yy , test_y) )
print(test_yy[0],test_y[0])
# -
# Double-click __here__ for the solution.
#
# <!-- Your answer is below:
#
# poly3 = PolynomialFeatures(degree=3)
# train_x_poly3 = poly3.fit_transform(train_x)
# clf3 = linear_model.LinearRegression()
# train_y3_ = clf3.fit(train_x_poly3, train_y)
# # The coefficients
# print ('Coefficients: ', clf3.coef_)
# print ('Intercept: ',clf3.intercept_)
# plt.scatter(train.ENGINESIZE, train.CO2EMISSIONS, color='blue')
# XX = np.arange(0.0, 10.0, 0.1)
# yy = clf3.intercept_[0]+ clf3.coef_[0][1]*XX + clf3.coef_[0][2]*np.power(XX, 2) + clf3.coef_[0][3]*np.power(XX, 3)
# plt.plot(XX, yy, '-r' )
# plt.xlabel("Engine size")
# plt.ylabel("Emission")
# test_x_poly3 = poly3.fit_transform(test_x)
# test_y3_ = clf3.predict(test_x_poly3)
# print("Mean absolute error: %.2f" % np.mean(np.absolute(test_y3_ - test_y)))
# print("Residual sum of squares (MSE): %.2f" % np.mean((test_y3_ - test_y) ** 2))
# print("R2-score: %.2f" % r2_score(test_y3_ , test_y) )
#
#
# -->
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false}
# <h2>Want to learn more?</h2>
#
# IBM SPSS Modeler is a comprehensive analytics platform that has many machine learning algorithms. It has been designed to bring predictive intelligence to decisions made by individuals, by groups, by systems – by your enterprise as a whole. A free trial is available through this course, available here: <a href="http://cocl.us/ML0101EN-SPSSModeler">SPSS Modeler</a>
#
# Also, you can use Watson Studio to run these notebooks faster with bigger datasets. Watson Studio is IBM's leading cloud solution for data scientists, built by data scientists. With Jupyter notebooks, RStudio, Apache Spark and popular libraries pre-packaged in the cloud, Watson Studio enables data scientists to collaborate on their projects without having to install anything. Join the fast-growing community of Watson Studio users today with a free account at <a href="https://cocl.us/ML0101EN_DSX">Watson Studio</a>
#
# <h3>Thanks for completing this lesson!</h3>
#
# <h4>Author: <a href="https://ca.linkedin.com/in/saeedaghabozorgi"><NAME></a></h4>
# <p><a href="https://ca.linkedin.com/in/saeedaghabozorgi"><NAME></a>, PhD is a Data Scientist in IBM with a track record of developing enterprise level applications that substantially increases clients’ ability to turn data into actionable knowledge. He is a researcher in data mining field and expert in developing advanced analytic methods like machine learning and statistical modelling on large datasets.</p>
#
# <hr>
#
# <p>Copyright © 2018 <a href="https://cocl.us/DX0108EN_CC">Cognitive Class</a>. This notebook and its source code are released under the terms of the <a href="https://bigdatauniversity.com/mit-license/">MIT License</a>.</p>
|
ML0101EN-Reg-Polynomial-Regression-Co2-py-v1.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "-"}
# **Time operations**
# -
# (readapted from http://www.saltycrane.com/blog/2008/11/python-datetime-time-conversions/)
from datetime import datetime
import time
import calendar
datetime.today()
# + [markdown] slideshow={"slide_type": "-"}
# # conversions to strings
# + slideshow={"slide_type": "-"}
# datetime object to string
dt_obj = datetime.now()
date_str = dt_obj.strftime("%Y-%m-%d %H:%M:%S")
print(date_str)
# -
# time tuple to string
time_tuple = (2008, 11, 12, 13, 51, 18, 2, 317, 0)
date_str = time.strftime("%Y-%m-%d %H:%M:%S", time_tuple)
print(date_str)
# # conversions to datetime objects
# time tuple to datetime object
time_tuple = (2008, 11, 12, 13, 51, 18, 2, 317, 0)
dt_obj = datetime(*time_tuple[0:6])
print(repr(dt_obj))
# date string to datetime object
date_str = "2008-11-10 17:53:59"
dt_obj = datetime.strptime(date_str, "%Y-%m-%d %H:%M:%S")
print(repr(dt_obj))
# timestamp to datetime object in local time
timestamp = 1226527167.595983
dt_obj = datetime.fromtimestamp(timestamp)
print(repr(dt_obj))
# timestamp to datetime object in UTC
timestamp = 1226527167.595983
dt_obj = datetime.utcfromtimestamp(timestamp)
print(repr(dt_obj))
# # conversions to time tuples
# datetime object to time tuple
dt_obj = datetime.now()
time_tuple = dt_obj.timetuple()
print(repr(time_tuple))
# string to time tuple
date_str = "2008-11-10 17:53:59"
time_tuple = time.strptime(date_str, "%Y-%m-%d %H:%M:%S")
print(repr(time_tuple))
# timestamp to time tuple in UTC
timestamp = 1226527167.595983
time_tuple = time.gmtime(timestamp)
print(repr(time_tuple))
# timestamp to time tuple in local time
timestamp = 1226527167.595983
time_tuple = time.localtime(timestamp)
print(repr(time_tuple))
# # conversions to timestamps
# time tuple in local time to timestamp
time_tuple = (2008, 11, 12, 13, 59, 27, 2, 317, 0)
timestamp = time.mktime(time_tuple)
print(repr(timestamp))
# time tuple in utc time to timestamp
time_tuple_utc = (2008, 11, 12, 13, 59, 27, 2, 317, 0)
timestamp_utc = calendar.timegm(time_tuple_utc)
print(repr(timestamp_utc))
# # But also
import numpy as np
date = np.array('2015-07-04', dtype=np.datetime64)
date + np.arange(12)
import pandas as pd
date + pd.to_timedelta(np.arange(12), 'D')
# Please find more tools and examples
# * https://jakevdp.github.io/PythonDataScienceHandbook/03.11-working-with-time-series.html
# <div class="alert alert-success">
# <b>Exercise:</b> Crete you own time series with date as index and floats as data
# </div>
|
notebooks/4_Time_operations.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import numpy as np
import scipy.io
import random, math
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
# %matplotlib inline
# -
def Plot2D(T, title, x, y, num_to_plot=40):
# This method picks a bunch of random samples (images in your case)
# to plot onto the chart:
fig = plt.figure()
ax = fig.add_subplot(111)
ax.set_title(title)
ax.set_xlabel('Component: {0}'.format(x))
ax.set_ylabel('Component: {0}'.format(y))
x_size = (max(T[:,x]) - min(T[:,x])) * 0.08
y_size = (max(T[:,y]) - min(T[:,y])) * 0.08
for i in range(num_to_plot):
img_num = int(random.random() * num_images)
x0, y0 = T[img_num,x]-x_size/2., T[img_num,y]-y_size/2.
x1, y1 = T[img_num,x]+x_size/2., T[img_num,y]+y_size/2.
img = df.iloc[img_num,:].reshape(num_pixels, num_pixels)
ax.imshow(img, aspect='auto', cmap=plt.cm.gray, interpolation='nearest', zorder=100000, extent=(x0, x1, y0, y1))
# It also plots the full scatter:
ax.scatter(T[:,x],T[:,y], marker='.',alpha=0.7)
plt.show()
# +
# A .MAT file is a .MATLAB file. The faces dataset could have came
# in through .png images, but we'll show you how to do that in
# anither lab. For now, you'll see how to import .mats:
mat = scipy.io.loadmat('Datasets/face_data.mat')
df = pd.DataFrame(mat['images']).T
num_images, num_pixels = df.shape
num_pixels = int(math.sqrt(num_pixels))
# Rotate the pictures, so we don't have to crane our necks:
for i in range(num_images):
df.loc[i,:] = df.loc[i,:].reshape(num_pixels, num_pixels).T.reshape(-1)
# -
# TODO: Implement PCA here. Reduce the dataframe df down
# to THREE components. Once you've done that, call Plot2D.
#
# The format is: Plot2D(T, title, x, y, num_to_plot=40):
# T is your transformed data, NDArray.
# title is your chart title
# x is the principal component you want displayed on the x-axis, Can be 0 or 1
# y is the principal component you want displayed on the y-axis, Can be 1 or 2
from sklearn.decomposition import PCA
pca = PCA(n_components = 3, svd_solver = 'full')
pca.fit(df)
Tpca = pca.transform(df)
Plot2D(Tpca, 'PCA', 0, 1, num_to_plot=40)
# TODO: Implement Isomap here. Reduce the dataframe df down
# to THREE components. Once you've done that, call Plot2D using
# the first two components.
from sklearn import manifold
iso = manifold.Isomap(n_neighbors=4, n_components=3)
iso.fit(df)
Tiso = iso.transform(df)
Plot2D(Tiso, 'Isomap', 0, 1, num_to_plot=40)
# TODO: Implement PCA here. Reduce the dataframe df down
# to THREE components. Once you've done that, call Plot2D.
#
# The format is: Plot2D(T, title, x, y, num_to_plot=40):
# T is your transformed data, NDArray.
# title is your chart title
# x is the principal component you want displayed on the x-axis, Can be 0 or 1
# y is the principal component you want displayed on the y-axis, Can be 1 or 2
from sklearn.decomposition import PCA
pca = PCA(n_components = 3, svd_solver = 'full')
pca.fit(df)
Tpca = pca.transform(df)
Plot2D(Tpca, 'PCA', 1, 2, num_to_plot=40)
# TODO: Implement Isomap here. Reduce the dataframe df down
# to THREE components. Once you've done that, call Plot2D using
# the first two components.
from sklearn import manifold
iso = manifold.Isomap(n_neighbors=3, n_components=3)
iso.fit(df)
Tiso = iso.transform(df)
Plot2D(Tiso, 'Isomap', 1, 2, num_to_plot=40)
# TODO: Implement Isomap here. Reduce the dataframe df down
# to THREE components. Once you've done that, call Plot2D using
# the first two components.
from sklearn import manifold
iso = manifold.Isomap(n_neighbors=8, n_components=3)
iso.fit(df)
Tiso = iso.transform(df)
Plot2D(Tiso, 'Isomap', 1, 2, num_to_plot=40)
|
Module4/assignment4.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
# # Step 1: Building List and Labels
# ## Collecting instances from 311 calls, crimes, blight violations, and demolition permits.
#
# Data already cleaned by [this notebook](./Cleaning_data.ipynb)
# The collection of data was saved at __../data/events.csv__
data_events = pd.read_csv('../data/events.csv')
data_events.head(10)
data_events.shape
# To get rid of duplicates with same coordinates and possibly different address names
building_pool = data_events.drop_duplicates(subset=['lon','lat'])
building_pool.shape
# +
# 1. sort data according to longitude
# init new_data
# 2. for each record:
# if record[lon] - prev[lon] > length:
# add new record into new_data
# else:
# find previous coords that are close
# if no coords in bbox:
# add new record into new_data
# else:
# for each of these coords:
# if record in bbox:
# append event_id
#
# At the same time, if building is assigned one permit or more for demolition, blighted will be assigned to one.
#
def gen_buildings(data):
'''generate buildings from coordinates'''
from assign_bbox import nearest_pos, is_in_bbox, raw_dist # defined in assign_bbox.py in current dir
new_data = {'addr': [], 'lon': [], 'lat': [], 'event_id_list': [], 'blighted': []}
data_sorted = data.sort_values(by='lon', inplace=False)
length = 4.11e-4 # longitude
width = 2.04e-4 # latitude
prev_lon = 0
prev_lat = 0
max_distX = abs(length/2)
max_distY = abs(width/2)
for i, entry in data_sorted.iterrows():
lon = entry['lon']
lat = entry['lat']
b = entry['type']
if abs(lon - prev_lon) > length:
new_data['addr'].append(entry['addr'])
new_data['lon'].append(lon)
new_data['lat'].append(lat)
# below line is different from the loop for events_part2
new_data['event_id_list'].append([entry['event_id']])
if b == 4: # if demolition permit
new_data['blighted'].append(1)
else:
new_data['blighted'].append(0)
prev_lon = lon
prev_lat = lat
else:
listX = np.array(new_data['lon'])
listY = np.array(new_data['lat'])
poses = nearest_pos((lon,lat), listX, listY, length, width)
# if already in new_data
if poses.size > 0:
has_pos = False
for pos in poses:
temp_lon = new_data['lon'][pos]
temp_lat = new_data['lat'][pos]
if (abs(temp_lon - lon) < max_distX) & (abs(temp_lat - lat) < max_distY):
new_data['event_id_list'][pos] += [entry['event_id']]
if b == 4:
new_data['blighted'][pos] = 1
has_pos = True
if has_pos:
continue
new_data['addr'].append(entry['addr'])
new_data['lon'].append(lon)
new_data['lat'].append(lat)
# below line is different from the loop for events_part2
new_data['event_id_list'].append([entry['event_id']])
if b == 4:
new_data['blighted'].append(1)
else:
new_data['blighted'].append(0)
prev_lon = lon
prev_lat = lat
return pd.DataFrame(new_data)
# -
buildings_concise = gen_buildings(building_pool)
buildings_concise.shape# shorter than before
buildings_concise.tail()
buildings = buildings_concise
# ### Get rid of void coordinates
buildings = buildings[(buildings['lat']>42.25) & (buildings['lat']<42.5) & (buildings['lon']>-83.3) & (buildings['lon']<-82.9)]
buildings.shape
buildings['blighted'].value_counts()
# ## Recap of [step 0](./Building_size_estimation.ipynb)
# ## Adopting building coordinates
# It turns out that there is a slight mismatch between real world building coordinates w.r.t given data. So that only median building dimension info is reserved from the building info we got from online open data at data.detroitmi.gov.
data_dir = '../data/'
buildings_step_0 = pd.read_csv(data_dir+'buildings_step_0.csv')
permits = pd.read_csv(data_dir+'permits.csv')
permits = permits[['PARCEL_NO', 'BLD_PERMIT_TYPE', 'addr', 'lon', 'lat']]
permits['BLD_PERMIT_TYPE'].unique()
# ### For example: the very first entry of permit has coordinate:
demo01 = permits.loc[0,['PARCEL_NO','addr','lon','lat']]
print(demo01)
# ### In real world data, this corresponds to:
c = buildings_step_0['addr'].apply(lambda x: x == permits.loc[0,'addr'])
buildings_step_0[c][['PARCELNO','lon','lat','addr']]
# The coordinate of this building from data.detroitmi.gov is slightly different from data given in our course material.
# ### Only building dimension info is adopted for our analysis.
length = 0.000411
width = 0.000204 # These results come from step 0.
# +
buildings.loc[:,'llcrnrlon'] = buildings.loc[:,'lon'] - length/2
buildings.loc[:,'llcrnrlat'] = buildings.loc[:,'lat'] - width/2
buildings.loc[:,'urcrnrlon'] = buildings.loc[:,'lon'] + length/2
buildings.loc[:,'urcrnrlat'] = buildings.loc[:,'lat'] + width/2
buildings.loc[:,'building_id'] = np.arange(0,buildings.shape[0])
buildings = buildings.reindex()
# -
buildings.tail()
buildings.to_csv('../data/buildings.csv', index=False)
# ### Visualization
from bbox import draw_screen_bbox
from matplotlib import pyplot as plt
# %matplotlib inline
buildings = pd.read_csv('../data/buildings.csv')
bboxes = buildings.loc[:,['llcrnrlon','llcrnrlat','urcrnrlon','urcrnrlat']]
bboxes = bboxes.as_matrix()
# +
fig = plt.figure(figsize=(8,6), dpi=2000)
for box in bboxes:
draw_screen_bbox(box, fig)
plt.xlim(-83.3,-82.9)
plt.ylim(42.25,42.45)
plt.savefig('../data/buildings_distribution.png')
plt.show()
# -
# ### Distribution of blighted buildings
blighted_buildings = buildings[buildings.loc[:,'blighted'] == 1]
blighted_bboxes = blighted_buildings.loc[:,['llcrnrlon','llcrnrlat','urcrnrlon','urcrnrlat']]
blighted_bboxes = blighted_bboxes.as_matrix()
# +
fig = plt.figure(figsize=(8,6), dpi=2000)
for box in blighted_bboxes:
draw_screen_bbox(box, fig)
plt.xlim(-83.3,-82.9)
plt.ylim(42.25,42.46)
plt.title("Distribution of Blighted Buildings in Detroit")
plt.savefig('../data/blighted_buildings_distribution.png')
plt.show()
# -
|
src/Building_List_and_Label.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import ipywidgets as widgets
import pandas as pd
from IPython.display import display
from localpackage.game import game
from localpackage.utils import returnFreq
from datetime import datetime
from bqplot import *
import bqplot.pyplot as plt
from bqplot.interacts import (
PanZoom
)
import numpy as np
import math
regions=['UK','EW','EN','SC','WA','NI','GB']
lbeq=widgets.Label(value='=',layout=widgets.Layout(display="flex", justify_content="flex-start", width="10px"))
# -
# <h1><center>Ogden Multipliers</center></h1>
# # Main parameters
# +
#Game
dr=widgets.FloatSlider(value=-0.25,min=-3, max=3,step=0.05, description='Disc. Rate:')
#td=widgets.DatePicker(description='Trial Date:',disabled=False, visible=False)
og=widgets.Dropdown(
options=['Ogden 7', 'Ogden 8'],
value='Ogden 8',
description='Ogden:',
disabled=False,
)
fatal=widgets.Checkbox(value=False,description='Fatal',disabled=False,indent=False,layout=widgets.Layout(width='100px'))
yrattained=widgets.Checkbox(value=False,description='Auto Year Attained',disabled=False,indent=False,layout=widgets.Layout(width='200px'))
projection=widgets.Checkbox(value=True,description='Principal Projection',disabled=False,indent=False,layout=widgets.Layout(width='200px'))
button = widgets.Button(
description='RESET',
disabled=False,
button_style='danger', # 'success', 'info', 'warning', 'danger' or ''
tooltip='Click me',
icon='check' # (FontAwesome names without the `fa-` prefix)
)
dt=widgets.DatePicker(
description='Trial date:',
disabled=False,
value=datetime.now(),
continuous_update=False
)
display(widgets.HBox([dt,dr,og]))
g={'discountRate':dr.value,'Ogden':7, 'projection':projection.value}
# -
# # Other options
display(widgets.HBox([fatal, yrattained,projection, button]))
def on_value_change(change):
fillResults()
# # People
# +
#Person
def persondetails(person):
label=widgets.Label(value=person.capitalize()+":",layout=widgets.Layout(width='100px'))
age=widgets.FloatSlider(value=30,min=0,max=125, step=1,description='Age at trial:', layout=widgets.Layout(width='400px'))
sex=widgets.Dropdown(
options=['Male', 'Female'],
value='Male',
description='Sex:',
disabled=False,
layout=widgets.Layout(width='175px')
)
rg=widgets.Dropdown(
options=regions,
value='UK',
description='Region:',
disabled=False,
layout=widgets.Layout(width='175px'),
)
deltale=widgets.FloatText(value=0.0, description='deltaLE:', disabled=False, layout=widgets.Layout(width='150px'))
ageatdeath=widgets.FloatSlider(value=age.value-5,min=0,max=age.value, step=1,description='Age at death:', layout=widgets.Layout(width='400px'))
i2=[label,age,sex,rg,deltale,ageatdeath]
h2=widgets.HBox(i2)
display(h2)
return age, sex, rg, deltale, ageatdeath, label, h2
def getvaluesPerson(person):
person=persons[person]
return person[0].value, person[1].value, person[2].value, person[3].value, person[4].value, person[5].value
# +
#Row person
def rowPerson(person):
age, sex, region, deltale, ageatdeath, label=getvaluesPerson(person)
label=widgets.Label(value=person.capitalize()+":",layout=widgets.Layout(width='100px'))
r=widgets.FloatRangeSlider(
value=[age, 125],
min=0,
max=125,
step=1,
description='Age Range:',
disabled=False,
continuous_update=True,
orientation='horizontal',
readout=True,
readout_format='.1f',
layout=widgets.Layout(width='400px')
)
fr=widgets.Text(value='Y', description='Freq:',layout=widgets.Layout(width='175px'))
op=widgets.Text(value='AMI', description='Discounts:',layout=widgets.Layout(width='150px'))
out=widgets.Output(layout=widgets.Layout(display="flex", justify_content="flex-start", width="60px", border='solid'))
i4=[label,r,fr,op,lbeq,out]
h4=widgets.HBox(i4)
display(h4)
return r, fr, op, label, h4, out
def getvaluesRow(person):
row=rows[person]
return row[0].value[0],row[0].value[1],row[1].value,row[2].value, row[3].value
# +
def getdataSet(og,region):
if og=='Ogden 7':
return {'year':2008,'region':region,'yrAttainedIn':2011}
elif og=='Ogden 8':
return {'year':2018,'region':region,'yrAttainedIn':2022}
else:
#error
print('Wrong Ogden')
def getGame():
claimants=[]
age, sex, region, deltale, ageatdeath, label=getvaluesPerson('CLAIMANT')
if fatal.value:
claimant = {'name': 'CLAIMANT', 'age': age,'aad':ageatdeath, 'sex': sex, 'dataSet': getdataSet(og.value,region), 'deltaLE': deltale, 'cont':1}
else:
claimant = {'name': 'CLAIMANT', 'age': age, 'sex': sex, 'dataSet': getdataSet(og.value,region), 'deltaLE': deltale, 'cont':1}
age, sex, region, deltale, ageatdeath,label=getvaluesPerson('DEPENDENT')
dependent= {'name': 'DEPENDENT', 'age': age, 'sex': sex, 'dependenton':'CLAIMANT', 'dataSet':getdataSet(og.value,region),'deltaLE': deltale, 'cont':1}
claimants.append(claimant)
claimants.append(dependent)
eg={'game':{'trialDate': dt.value.strftime("%d/%m/%Y"), 'projection': projection.value, 'autoYrAttained':yrattained.value,'discountRate':dr.value/100, 'Ogden':7, 'claimants':claimants}}
return game(eg)
def stringifyResult(result):
return "Past ("+result[0]+") + Int("+result[1]+") + Future("+result[2]+") = " + result[3]
def getResult(person, thisGame):
fromAge,toAge, frequency, options, label=getvaluesRow(person)
if (fromAge==toAge):
result=thisGame.getClaimant(person).M(fromAge, freq=frequency, options=options)
else:
result=thisGame.getClaimant(person).M(fromAge, toAge, freq=frequency, options=options)
return result
def agegap():
ageC, sex, region, deltale, ageatdeath, label=getvaluesPerson('CLAIMANT')
ageD, sex, region, deltale, ageatdeath, label=getvaluesPerson('DEPENDENT')
return ageC-ageD
def getLeftX(fat):
ageC, sex, region, deltale, ageatdeathC, label=getvaluesPerson('CLAIMANT')
ageD, sex, region, deltale, ageatdeathD, label=getvaluesPerson('DEPENDENT')
fromAgeC,toAge, frequency, options, label=getvaluesRow('CLAIMANT')
fromAgeD,toAge, frequency, options, label=getvaluesRow('DEPENDENT')
if fat: return min(ageC,fromAgeC,ageatdeathC)-1
return min(ageC,fromAgeC)-1
def getageatDeath():
age, sex, region, deltale, ageatdeath, label=getvaluesPerson('CLAIMANT')
return ageatdeath
def updateBQplot(person, thisGame,Res):
agap=0
if person=='DEPENDENT': agap=agegap()
rows['CLAIMANT'][2].value=rows['CLAIMANT'][2].value.upper()
rows['DEPENDENT'][2].value=rows['DEPENDENT'][2].value.upper()
age, sex, region, deltale, ageatdeath, label=getvaluesPerson(person)
fromAge,toAge, frequency, options, label=getvaluesRow(person)
pdf_fig, pdf_line, pdf_age, pdf_aad, pdf_fill, pdf_start, pdf_end= pdf[person]
curve=thisGame.getClaimant(person).getCurve()
if not fromAge==toAge:
title=curve.getTitle(result=Res,fromAge=fromAge,toAge=toAge,freq=frequency,cont=1,options=options)
else:
title=curve.getTitle(result=Res,fromAge=fromAge,freq=frequency,cont=1,options=options)
#The plot
LxNoI,Lx,Rng=curve.getCurve(options=options,cont=1)
pdf_fig.title=title
#scale
leftX=getLeftX(fatal.value)-agap
pdf_line.scales['x'].min=leftX
pdf_line.scales['x'].max=125-agap
pdf_line.scales['y'].min=0
pdf_line.scales['y'].max=2
pdf_line.x=Rng
pdf_line.y=Lx
#age at trial
pdf_age.x=[age, age]
pdf_age.y=[0, 1]
#Age at death
pdf_aad.x=[getageatDeath()-agap,getageatDeath()-agap]
pdf_aad.y=[0,1]
#Sort out fill range
mask=(Rng>=fromAge) & (Rng<=toAge)
R=Rng[mask]
L=Lx[mask]
#add interp points
Ll=np.interp(fromAge,Rng,Lx)
Ul=np.interp(toAge,Rng,Lx)
R=np.insert(R,0,fromAge)
L=np.insert(L,0,Ll)
R=np.append(R,toAge)
L=np.append(L,Ul)
#single point
y1=np.interp(fromAge,Rng,Lx)
pdf_start.x=[fromAge,fromAge]
pdf_start.y=[0,y1]
y2=np.interp(toAge,Rng,Lx)
pdf_end.x=[toAge,toAge]
pdf_end.y=[0,y2]
#the fill
pdf_fill.x=R
pdf_fill.y=L
def fillResults():
thisGame=getGame()
rC, frC, opC, labelRC, h4C, outC=rows['CLAIMANT']
rD, frD, opD, labelRD, h4D, outD=rows['DEPENDENT']
outC.clear_output()
outD.clear_output()
with outC:
cRes=getResult('CLAIMANT',thisGame)
cResStr=["{:2.2f}".format(f) for f in cRes]
print(cResStr[3])
outC.clear_output(wait=True)
with outD:
dRes=getResult('DEPENDENT',thisGame)
dResStr=["{:2.2f}".format(f) for f in dRes]
print (dResStr[3])
outD.clear_output(wait=True)
updateBQplot('CLAIMANT',thisGame,cRes)
if fatal.value: updateBQplot('DEPENDENT',thisGame,dRes)
def setFatal():
ageC, sexC, regionC, deltaleC, ageatdeathC, labelPC, hPC=persons['CLAIMANT']
ageD, sexD, regionD, deltaleD, ageatdeathD, labelPD, hPD=persons['DEPENDENT']
rC, frC, opC, labelRC, h4C, outC=rows['CLAIMANT']
rD, frD, opD, labelRD, h4D, outD=rows['DEPENDENT']
pdf_figC, pdf_lineC, pdf_ageC, pdf_aadC, pdf_fillC, pdf_startC, pdf_endC= pdf['CLAIMANT']
if fatal.value:#it's a fatal game
pdf['DEPENDENT']=drawFigure('DEPENDENT')
pdf_figD, pdf_lineD, pdf_ageD, pdf_aadD, pdf_fillD, pdf_startD, pdf_endD=pdf['DEPENDENT']
labelPC.value='Deceased:'
labelRC.value='Deceased:'
hPD.layout.display=None
h4D.layout.display=None
ageatdeathD.layout.display='none' #hide slider for age at death of D
ageatdeathC.layout.display=None #show slider for age at death of C
pdf_aadC.display_legend=True
pdf_aadD.display_legend=True
pdf_aadC.visible=True
pdf_aadD.visible=True
else:
plt.figure('DEPENDENT')
plt.figure('DEPENDENT').title=''
plt.clear()
labelPC.value='Claimant:'
labelRC.value='Claimant:'
ageatdeathC.layout.display='none' #hide slider for age at death of C
pdf_aadC.display_legend=False
pdf_aadC.visible=False
hPD.layout.display='none'
h4D.layout.display='none'
def on_fatal_change(change):
setFatal()
def on_value_change_age(change):
print(dt.value)
ageC, sex, region, deltale, ageatdeathC, label=getvaluesPerson('CLAIMANT')
a,s,r,d,aad,lbl,h=persons['CLAIMANT']
if ageatdeathC>ageC:
aad.value=ageC
aad.max=ageC
fillResults()
def delete():
[persons['CLAIMANT'][x].close() for x in range(0,len(persons['CLAIMANT']))]
[persons['DEPENDENT'][x].close() for x in range(0,len(persons['DEPENDENT']))]
[rows['CLAIMANT'][x].close() for x in range(0,len(rows['CLAIMANT']))]
[rows['DEPENDENT'][x].close() for x in range(0,len(rows['DEPENDENT']))]
def setEventHandlers():
#SET UP EVENT HANDLERS
[persons['CLAIMANT'][x].observe(on_value_change,names='value') for x in [1,2,3,5,6]]
[persons['DEPENDENT'][x].observe(on_value_change,names='value') for x in range(0,len(persons['DEPENDENT']))]
[rows['CLAIMANT'][x].observe(on_value_change,names='value') for x in range(0,len(rows['CLAIMANT'])-1)]
[rows['DEPENDENT'][x].observe(on_value_change,names='value') for x in range(0,len(rows['DEPENDENT'])-1)]
persons['CLAIMANT'][4].observe(on_value_change_age,names='value') #claimant's age at death
persons['CLAIMANT'][0].observe(on_value_change_age,names='value') #claimant's age
og.observe(on_value_change, names='value')
dr.observe(on_value_change, names='value')
dt.observe(on_value_change, names='value')
og.observe(on_value_change, names='value')
yrattained.observe(on_value_change, names='value')
projection.observe(on_value_change, names='value')
fatal.observe(on_fatal_change,names='value')
# -
def drawFigure(person):
curve=thisGame.getClaimant(person).getCurve()
age, sex, region, deltale, ageatdeath, label=getvaluesPerson(person)
fromAge,toAge, frequency, options, label=getvaluesRow(person)
if not fromAge==toAge:
title=curve.getTitle(result=[0,0,0,0],fromAge=fromAge,toAge=toAge,freq=frequency,cont=1,options=options)
else:
title=curve.getTitle(result=[0,0,0,0],fromAge=fromAge,freq=frequency,cont=1,options=options)
st,en,factor,timeInterval=returnFreq(frequency)
#The plot
LxNoI,Lx,Rng=curve.getCurve(options=options,cont=1)
axes_options={'x':dict(label='Age'),'y':dict(label='Multiplier')}
panzoom = PanZoom(scales={'x': [LinearScale()], 'y': [LinearScale(allow_padding=False)]})
pdf_fig=plt.figure(person, interaction=panzoom)
pdf_fig.title=''
plt.scales(scales={'x':LinearScale(),'y':LinearScale(allow_padding=False)})
#scale
agap=0
if person=='DEPENDENT': agap=agegap()
leftX=getLeftX(fatal.value)-agap
pdf_fig.layout.height='400px'
pdf_line=plt.plot(Rng,Lx, axes_options=axes_options)
pdf_line.scales['x'].min=leftX
pdf_line.scales['x'].max=125-agap
pdf_line.scales['y'].min=0
pdf_line.scales['y'].max=2
pdf_age=plt.vline(age,colors=['green'],labels=['Age at trial'], display_legend=True)
pdf_aad=plt.vline(ageatdeath,colors=['black'],labels=['Age at death'],display_legend=fatal.value,visible=True)
mask=(Rng>=fromAge) & (Rng<=toAge)#Boolean mask
R=Rng[mask]
L=Lx[mask]
pdf_fill=plt.plot(R,L,fill='bottom',preserve_domain={'x':True,'y':True},colors=['red'],fill_opacities=[0.5])
#single point
y1=np.interp(fromAge,Rng,Lx)
pdf_start=plt.plot(colors=['red'] ,preserve_domain={'x': True, 'y': False}, x=[fromAge, fromAge], y=[0, y1])
y2=np.interp(toAge,Rng,Lx)
pdf_end=plt.plot(colors=['red'] ,preserve_domain={'x': True, 'y': False}, x=[toAge, toAge], y=[0, y2])
tb=Toolbar(figure=pdf_fig)
display(pdf_fig,tb)
return pdf_fig, pdf_line, pdf_age, pdf_aad, pdf_fill, pdf_start, pdf_end
pdf={}
rows={}
persons={}
# +
thisGame=None
def create():
persons['CLAIMANT']=persondetails('CLAIMANT')
persons['DEPENDENT']=persondetails('DEPENDENT')
rows['CLAIMANT']=rowPerson('CLAIMANT')
rows['DEPENDENT']=rowPerson('DEPENDENT')
rows['DEPENDENT'][2].value='AMID'
setEventHandlers()
global thisGame
thisGame=getGame()
pdf['CLAIMANT']=drawFigure('CLAIMANT')
pdf['DEPENDENT']=drawFigure('DEPENDENT')
on_value_change(None)
setFatal()
def reset(e):
dt.value=datetime.now()
dr.value=-0.25
og.value='Ogden 8'
fatal.value=False
yrattained.value=False
pC=persons['CLAIMANT']
pC[0].value=30
pC[1].value='Male'
pC[2].value='UK'
pC[3].value=0
pC[4].value=25
rC=rows['CLAIMANT']
rC[0].value=[30,125]
rC[1].value='Y'
rC[2].value="AMI"
pD=persons['DEPENDENT']
pD[0].value=40
pD[1].value='Male'
pD[2].value='UK'
pD[3].value=0
pD[4].value=25
rD=rows['DEPENDENT']
rD[0].value=[30,125]
rD[1].value='Y'
rD[2].value="AMID"
button.on_click(reset)
create()
reset(None)
# -
# #Notes
# * **Date of trial**:
# * The date of trial is assumed to be today for the purposes of calculating interest and the year attained by the person (see below).
# * **Discounts**:
# * 'A' means discount for accelerated receipt
# * 'M' means discount for mortality
# * 'I' means add interest on past losses
# * 'D' means discount for the mortality of the deceased since death
# * e.g. 'AM' means discount for accelerated receipt and mortality only
# * **Freq**: i.e. frequency of the loss
# * e.g. '3Y' means loss every 3 years
# * e.g. '6M' means loss every 6 months
# * e.g. '3.5W' means loss every 3.5 weeks
# * e.g. '4D' means loss every 4 days
# * e.g. '<4Y' means loss every 4 years with first loss at the start of the period
# * e.g. '4Y>' means loss every 4 years with first loss after 4 years
# * **Auto year attained**:
# * By default the Ogden Tables assume the person has survived to a given year (Ogden 7: 2011, Ogden 8: 2022).
# * This may not be true. For example: a Claimant alive today has survived until now; a deceased person who died in 2015 only survived until 2015.
# * If this option is selected, the correct year to which the person has survived is used. This can make a significant difference for dependency claims.
# * **Principal Projection**
# * When selected, the principal projection is used. The principal projection is the middle of a range of ONS estimates for improvements in mortality from the actual observed mortality in 2008 (Ogden 7) or 2018 (Ogden 8).
# * When not selected, mortality is assumed to be that projected in the year attained but with no improvements thereafter.
#
# #Contact
# * **Author**: <NAME>, <EMAIL>
# * **Open source**: code, including ONS data tables, are available to view on github, https://github.com/chapmanwilliam/Ogden8
# * **Credit**: With thanks to <NAME> of the Government Actuarial Service for his help with the Ogden 8 source data.
|
Ogden.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python
# language: python
# name: conda-env-python-py
# ---
# <center>
# <img src="https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-PY0101EN-SkillsNetwork/IDSNlogo.png" width="300" alt="cognitiveclass.ai logo" />
# </center>
#
# <h3 align=center>For Loops </h3>
#
# Use loops to print out the elements in the list <code>A</code>
#
# +
A=[3,4,5]
for i in A:
print(i)
# -
# <details><summary>Click here for the solution</summary>
#
# ```python
# for i in A:
# print(i)
#
# ```
#
# </details>
#
#
# <h3 align=center>While Loops</h3>
#
# Find the value of <code> x </code> that will print out the sequence <code> 1,2,..,10 </code>
#
x = 11
y=1
while(y != x):
print(y)
y=y+1
# <details><summary>Click here for the solution</summary>
#
# ```python
# x = 11
#
# ```
#
# </details>
#
# <hr>
#
# <h3 align="center"> © IBM Corporation 2020. All rights reserved. <h3/>
#
|
notebooks/Module3-Pyhton-Programming-Fundamentals/PY0101EN-3.2_notebook_quizz_Loops.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import sys
from awsglue.transforms import *
from awsglue.utils import getResolvedOptions
from pyspark.context import SparkContext
from awsglue.context import GlueContext
from awsglue.job import Job
## @params: [JOB_NAME]
args = getResolvedOptions(sys.argv, ['JOB_NAME'])
sc = SparkContext()
glueContext = GlueContext(sc)
spark = glueContext.spark_session
job = Job(glueContext)
job.init(args['JOB_NAME'], args)
## @type: DataSource
## @args: [database = "db_tlc_etl_green_car", table_name = "green", transformation_ctx = "datasource0"]
## @return: datasource0
## @inputs: []
datasource0 = glueContext.create_dynamic_frame.from_catalog(database = "db_tlc_etl_green_car", table_name = "green", transformation_ctx = "datasource0")
## @type: ApplyMapping
## @args: [mapping = [("vendorid", "long", "vendorid", "long"), ("lpep_pickup_datetime", "string", "lpep_pickup_datetime", "string"), ("lpep_dropoff_datetime", "string", "lpep_dropoff_datetime", "string"), ("ratecodeid", "long", "ratecodeid", "long"), ("pulocationid", "long", "pulocationid", "long"), ("dolocationid", "long", "dolocationid", "long"), ("passenger_count", "long", "passenger_count", "long"), ("trip_distance", "double", "trip_distance", "double"), ("fare_amount", "double", "fare_amount", "double"), ("extra", "double", "extra", "double"), ("mta_tax", "double", "mta_tax", "double"), ("tip_amount", "double", "tip_amount", "double"), ("tolls_amount", "double", "tolls_amount", "double"), ("improvement_surcharge", "double", "improvement_surcharge", "double"), ("total_amount", "double", "total_amount", "double"), ("payment_type", "long", "payment_type", "long"), ("trip_type", "long", "trip_type", "long")], transformation_ctx = "applymapping1"]
## @return: applymapping1
## @inputs: [frame = datasource0]
applymapping1 = ApplyMapping.apply(frame = datasource0, mappings = [("vendorid", "long", "vendorid", "long"), ("lpep_pickup_datetime", "string", "lpep_pickup_datetime", "string"), ("lpep_dropoff_datetime", "string", "lpep_dropoff_datetime", "string"), ("ratecodeid", "long", "ratecodeid", "long"), ("pulocationid", "long", "pulocationid", "long"), ("dolocationid", "long", "dolocationid", "long"), ("passenger_count", "long", "passenger_count", "long"), ("trip_distance", "double", "trip_distance", "double"), ("fare_amount", "double", "fare_amount", "double"), ("extra", "double", "extra", "double"), ("mta_tax", "double", "mta_tax", "double"), ("tip_amount", "double", "tip_amount", "double"), ("tolls_amount", "double", "tolls_amount", "double"), ("improvement_surcharge", "double", "improvement_surcharge", "double"), ("total_amount", "double", "total_amount", "double"), ("payment_type", "long", "payment_type", "long"), ("trip_type", "long", "trip_type", "long")], transformation_ctx = "applymapping1")
## @type: ResolveChoice
## @args: [choice = "make_struct", transformation_ctx = "resolvechoice2"]
## @return: resolvechoice2
## @inputs: [frame = applymapping1]
resolvechoice2 = ResolveChoice.apply(frame = applymapping1, choice = "make_struct", transformation_ctx = "resolvechoice2")
## @type: DropNullFields
## @args: [transformation_ctx = "dropnullfields3"]
## @return: dropnullfields3
## @inputs: [frame = resolvechoice2]
dropnullfields3 = DropNullFields.apply(frame = resolvechoice2, transformation_ctx = "dropnullfields3")
## @type: DataSink
## @args: [connection_type = "s3", connection_options = {"path": "s3://data-etl-o-target-0/result"}, format = "parquet", transformation_ctx = "datasink4"]
## @return: datasink4
## @inputs: [frame = dropnullfields3]
datasink4 = glueContext.write_dynamic_frame.from_options(frame = dropnullfields3, connection_type = "s3", connection_options = {"path": "s3://data-etl-o-target-0/result"}, format = "parquet", transformation_ctx = "datasink4")
job.commit()
# -
rulesult:
waitig to finish ,which will result target files
s3://data-etl-o-target-0/result/*.parquet
|
glue-scripts_simple.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.1 64-bit (''ex_design_analysis'': pipenv)'
# metadata:
# interpreter:
# hash: d93d3809a412eeca67f3d81705e284a9fa16a5e112e379b94b99b867ad05122c
# name: python3
# ---
# # Friedman test
# +
import numpy as np
import pandas as pd
import scipy.stats as stats
import matplotlib.pyplot as plt
import seaborn as sns
import os
# # %matplotlib inline
plt.style.use('seaborn-darkgrid')
# -
CSV_PATH = '../../../../../data/statistical_test/friedman_ex_data.csv'
ALPHA = 0.05
NUM_OF_PARTICIPANTS = 8
# OUTPUT_PATH = 'output/'
# if not os.path.isdir(OUTPUT_PATH):
# os.makedirs(OUTPUT_PATH)
data = pd.read_csv(CSV_PATH, index_col=0)
display(data)
# + tags=[]
# Compare groups
_, p = stats.friedmanchisquare(data['Standard'], data['Prediction'], data['Speech'])
print('p={:.5f}'.format(p))
if p > ALPHA:
print('Same distributions')
exit()
else:
print('Different distributions. You can do a post-hoc test.')# Compare groups
# -
# ## Post-hoc test (Wilcoxon test)
# P-value needs to be corrected to avoid multiplicity of statistical tests.
#
# I use Bonferroni correction here.
# + tags=[]
# Standard vs Prediction
_, p = stats.wilcoxon(data['Standard'], data['Prediction'])
print('Standard vs Prediction: p={:.5f}'.format(p * 3)) # Bonferroni correction
# Prediction vs Speech
_, p = stats.wilcoxon(data['Prediction'], data['Speech'])
print('Prediction vs Speech: p={:.5f}'.format(p * 3)) # Bonferroni correction
# Speech vs Standard
_, p = stats.wilcoxon(data['Speech'], data['Standard'])
print('Speech vs Standard: p={:.5f}'.format(p * 3)) # Bonferroni correction
# -
# ## Visualization
# 3 different data visualizations (Bar plot, Box plot, Violin plot)
# ### Bar plot (average & standard error)
# pros: easy to compare multiple data (average)
#
# cons: less informative (average and standard error)
# +
# Mean
standard_mu = data['Standard'].mean()
prediction_mu = data['Prediction'].mean()
speech_mu = data['Speech'].mean()
# Standard deviation
standard_sd = data['Standard'].std()
prediction_sd = data['Prediction'].std()
speech_sd = data['Speech'].std()
# Standard error
standard_se = standard_sd / np.sqrt(NUM_OF_PARTICIPANTS)
prediction_se = prediction_sd / np.sqrt(NUM_OF_PARTICIPANTS)
speech_se = speech_sd / np.sqrt(NUM_OF_PARTICIPANTS)
y = np.array([standard_mu, prediction_mu, speech_mu])
e = np.array([standard_se, prediction_se, speech_se])
x = np.array(["Standard", 'Prediction', 'Speech'])
x_position = np.arange(len(x))
error_bar_set = dict(lw=1, capthik=1, capsize=10)
fig = plt.figure(figsize=(10, 6))
ax = fig.add_subplot(1, 1, 1)
ax.bar(x_position, y, yerr=e, tick_label=x, error_kw=error_bar_set, color=['salmon', 'palegreen', 'aqua'])
ax.set_xlabel('Conditions', fontsize=14)
ax.set_ylabel('Performance', fontsize=14)
ax.set_ylim(1, 5)
# plt.savefig(os.path.join(OUTPUT_PATH, 'friedman_bar.pdf'))
plt.show()
# -
# ### Boxplot
# pros:
# more informative than bar plot
#
# cons:
# unable to understand the data distribution (box plot only show summary statistics)
# +
# error bar: min/max
# box: 25/50(median)/75 percentile
# circle: outlier (1.5 times bigger/smaller than box)
fig = plt.figure(figsize=(10, 6))
ax = fig.add_subplot(1, 1, 1)
ax.boxplot([data['Standard'], data['Prediction'], data['Speech']], labels=['Standard', 'Prediction', 'Speech'])
ax.set_xlabel('Conditions', fontsize=14)
ax.set_ylabel('Performance', fontsize=14)
ax.set_ylim(1, 5)
# plt.savefig(os.path.join(OUTPUT_PATH, 'friedmanfriedman_box.pdf'))
plt.show()
# -
# ### Violin plot
# pros: more informative than box plot (beacuse violin plot represents data distribution)
#
# cons:less popular (their meaning can be harder to grasp for many readers not familiar with the violin plot representation)
# +
# Similar to box plot, but also represents kernel density estimation (estimated distribution of data)
fig = plt.figure(figsize=(10, 6))
ax = fig.add_subplot(1, 1, 1)
sns.violinplot(data=[data['Standard'], data['Prediction'], data['Speech']], palette=['salmon', 'palegreen', 'aqua'])
ax.set_xticklabels(['Standard', 'Prediction', 'Speech'])
ax.set_xlabel('Conditions', fontsize=14)
ax.set_ylabel('Performance', fontsize=14)
ax.set_ylim(0, 5)
# plt.savefig(os.path.join(OUTPUT_PATH, 'friedmanfriedman_violin.pdf'))
plt.show()
# -
|
src/statistical_test/mean/multiple/non-parametric/friedman.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Critères d'évaluation en apprentissage supervisé
#
# ## Théorie
#
# ### Introduction
#
# Tout au long de ce cours nous avons étudié différentes méthodes d'apprentissage supervisé, et à chaque fois nous avons cherché à optimiser l'_accuracy_, c'est-à-dire à minimiser l'erreur de généralisation. Mais cette approche pour évaluer la pertinence de notre modèle et sa robustesse est-elle suffisante ? En particulier dans ce notebook nous allons nous intéresser au cas de la classification binaire (deux classes), nous allons voir que dans le cas de la classification, plusieurs métriques interviennent, elles peuvent être contradictoires (dans le sens où elles varient de manière opposée) et il sera donc nécessaire de faire un compromis sur certaines grandeurs en fonction de l'application que l'on considère.
#
# Considérons un exemple de classification binaire que nous déroulerons tout au long de cette partie théorique pour illustrer nos propos. Nous allons considérer une tâche de classification simple consistant à identifier des chiens dans des images, notre système prend donc en entrée des images et prédit en sortie la présence ou l'absence de chien dans cette image.
#
# ### Matrice de confusion
#
# Une métrique très souvent utilisée en classification est la matrice de confusion $\mathbf{M_c}$ qui résume de façon compacte les résultats de la classification:
#
# $$ \mathbf{M_c} = \left[ \begin{matrix}
# TP & FP \\
# FN & TN
# \end{matrix} \right]$$
#
# $TP$ : _True positive_, le nombre d'échantillons qui ont été labélisés comme contenant des chiens et qui contiennent effectivement des chiens.
#
# $FP$ : _False positive_, ou erreur de type I, le nombre d'échantillons qui ont été labelisés comme contenant des chiens alors qu'ils n'en contiennent pas.
#
# $FN$ : _False negative_, ou erreur de type II, le nombre d'échantillons qui ont été labelisés comme ne contenant pas de chiens alors qu'ils en contiennent.
#
# $TN$ : _True negative_, le nombre d'échantillons qui ont été labélisés comme ne contenant pas de chiens et qui n'en contiennent effectivement pas.
#
# Nous pouvons ensuite définir le nombre $P$ d'échantillons positifs et le nombre $N$ d'échantillons négatifs (en réalité, pas après prédiction). La métrique que nous avons utilisé tout au long du cours et qui est souvent utilisé est l'**accuracy**, celle-ci est définie par:
#
# $$ \mathbf{accuracy} = \frac{TP + TN}{P + N} = \frac{TP + TN}{TP + TN + FP + FN} = \frac{tr(\mathbf{M_c})}{TOTAL}$$
#
# Concrètement, elle représente le ratio entre le nombre d'échantillons bien classés (positifs ou négatifs) et le nombre total d'échantillons. Elle ne donne cependant aucune information sur les échantillons qui sont mal classés. Elle est souvent mal utilisée car elle n'est pertinente que dans le cas où nous avons autant de d'échantillons positifs que négatifs à classer. Il faut aussi les prédictions et les erreurs de prédiction soient de même importance, ce qui est rarement le cas.
#
# On utilise également deux autres grandeurs, la _**precision**_ et le _**recall**_, celles-ci sont définies par:
#
# $$ \mathbf{precision} = \frac{TP}{TP + FP}$$
#
# $$ \mathbf{recall} = \frac{TP}{TP + FN} = \frac{TP}{P} = TPR $$
#
# $TPR$ est le _True Positive Rate_, on peut également définir le _False Positive Rate_, $FPR = 1 - TPR = \frac{FP}{N}$.
#
# L'image suivante résume assez bien ce que nous venons de voir, les éléments bien classés sont sur la partie gauche de l'image.
#
# <img src="img/Precisionrecall.svg" title="Source: https://en.wikipedia.org/wiki/Precision_and_recall">
#
#
# Enfin, pour essayer de concilier _recall_ et _precision_, il est possible d'utiliser le _F-score_, il s'agit de la moyenne harmonique des deux :
#
# $$ \textrm{F-score} = \mathbf{F_1} = 2\cdot \frac{\mathbf{precision} \cdot \mathbf{recall}}{\mathbf{precision} + \mathbf{recall}}$$
#
# On définit plus généralement la mesure $\mathbf{F_\beta}$ qui permet de donner plus de poids à l'un ou à l'autre suivant la valeur de $\beta$ (pour $\beta \in \mathbb{R}^+$) :
# $$\mathbf{F_\beta} = (1+\beta^2)\cdot \frac{\mathbf{precision} \cdot \mathbf{recall}}{\beta^2 \cdot \mathbf{precision} + \mathbf{recall}} $$
# Un des points les plus importants de ce notebook et qu'il faut retenir absolument est qu'il faut choisir un critère d'évaluation qui soit cohérent avec la tâche que l'on souhaite accomplir. Aucune de ces mesures n'est meilleure que les autres, cela dépend du contexte.
#
# Par exemple, changeons de cas d'étude et prenons le cas d'un algorithme de détection de tumeurs dans des images médicales (scanner, IRM ou autre), ce que nous souhaitons dans ce cas c'est minimiser le nombre de faux négatifs, $FN$, c'est-à-dire que nous voulons minimiser le nombre de cas où l'algorithme prédit l'absence de tumeur dans l'image alors qu'il y en a une, les conséquences de telles erreurs sont évidentes. Dans ce cas, on ne se focalisera donc pas sur le $FP$, le cas où l'algorithme prédit la présence d'une tumeur alors qu'il n'y en a pas, cela donnera plus de travail aux médecins mais minimisera le risque de non-détection.
#
# #### Un exemple
#
# Commençons par nous convaincre que l'_accuracy_ n'est pas une bonne mesure [1], nous considérons un jeu de données sur le cancer du sein, il contient des données sur 286 femmes atteintes d'un cancer, parmi elles, 201 n'ont pas eu de récidive et 85 ont eu une récidive. Nous voulons construire un classifieur binaire utilisant 9 _features_ pour prédire la présence ou l'absence de récidive. Imaginons que nous avons construit trois modèles M1, M2 et M3, le modèle M1 prédit l'absence de récidive dans tous les cas, le modèle M2 prédit la présence de récidive dans tous les cas, et le modèle M3 est un peu moins radical et prédit 23 récidives (10 sont correctes) et 263 non récidives (188 sont correctes). Regardons leur _accuracy_ :
#
# $$\mathbf{accuracy(M_1)} = 201/286 \approx 70 \%$$
# $$\mathbf{accuracy(M_2)} = 85/286 \approx 30 \%$$
# $$\mathbf{accuracy(M_3)} = \frac{10+188}{286} \approx 69.23 \%$$
#
# En utilisant seulement l'_accuracy_, nous aurions tendance à dire que les modèle M1 et M3 sont assez performants. Pourtant, un coup d'oeil rapide aux matrices de confusion suffit à nous montrer qu'ils sont très différents:
#
# $$ \mathbf{M_{C1}} = \left[ \begin{matrix}
# 0 & 0 \\
# 85 & 201
# \end{matrix} \right]
# $$
#
# $$ \mathbf{M_{C2}} = \left[ \begin{matrix}
# 85 & 201 \\
# 0 & 0
# \end{matrix} \right]
# $$
#
# $$ \mathbf{M_{C3}} = \left[ \begin{matrix}
# 10 & 13 \\
# 75 & 188
# \end{matrix} \right]
# $$
#
# M3 est le seul à prédire à la fois de vrais positifs et de vrais négatifs. Regardons maintenant les autres grandeurs:
#
# * Precision :
# $$\mathbf{precision(M1)} = \frac{0}{0} = NaN $$
# $$\mathbf{precision(M2)} = \frac{85}{286} \approx 0.30 $$
# $$\mathbf{precision(M3)} = \frac{10}{23} \approx 0.43 $$
#
# * Recall
#
# $$\mathbf{recall(M1)} = \frac{0}{0+85} = 0 $$
# $$\mathbf{recall(M2)} = \frac{85}{0+85} = 1$$
# $$\mathbf{recall(M3)} = \frac{10}{10+75} \approx 0.12 $$
#
# * F-score
#
# $$ \mathbf{F1(M1)} = 0 $$
# $$ \mathbf{F1(M2)} \approx 0.46 $$
# $$ \mathbf{F1(M3)} \approx 0.19 $$
#
# Dans notre exemple, nous voulions maximiser le _recall_, ce qui revient à minimiser $FN$.
#
#
#
# ## AUC - Area Under the Curve
#
#
# Pour mesurer la performance d'un classifieur binaire, on peut tracer la courbe ROC (Receiver Operating Characteristic), celle-ci représente la variation de 'performance' du classifieur lorsque le seuil de décision varie. Concrètement, c'est la courbe qui relie les points dans le plan _FPR_ et _TPR_ (ou _recall_) lorsqu'on fait varier le seuil.
#
# <img src="img/Roccurves.png" title="Source: https://en.wikipedia.org/wiki/Receiver_operating_characteristic">
#
# La diagonale représente une classifieur qui tirerait au hasard sa prédiction avec une probabilité 0.5. Si la courbe est au dessus de la diagonale, le classifieur fait mieux qu'un tirage aléatoire, si elle en dessous il fait moins bien (dans ce cas il suffit d'inverser les prédicitions pour en faire un meilleur). Mais pour comparer plusieurs classifieurs entre-eux, comparer les courbes entre-elles n'est pas la méthode la plus précise comme on peut le voir sur la figure ci-dessus [3]. Il faut utiliser une grandeur quantitative, l'aire sous la courbe (AUC), dont la valeur varie entre 0.5 et 1.0 pour un classifieur performant.
#
# Il est possible d'utiliser des tests statistiques pour vérifier que les performances d'un classifieurs sont meilleures, en terme d'AUC.
# ## La pratique
import numpy as np
from keras.models import Sequential
from keras.layers import Input, Dense, Activation
from keras.optimizers import SGD, Adam, RMSprop
from keras.utils import to_categorical
from keras.callbacks import EarlyStopping
# %matplotlib inline
import matplotlib.pyplot as plt
from math import inf
from sklearn.svm import SVC
from sklearn.decomposition import PCA
from sklearn.ensemble import RandomForestClassifier
import pandas as pd
# Considérons un exemple simple de classification binaire, nous avons deux nuages de points générés suivant des loi normales : $\mathcal{N}(\mu_1,cov_1)$ et $\mathcal{N}(\mu_2,cov_2)$
# +
N = 1000
test = 400
mu1 = [1,1]
cov1 = [[4,3],[3,3]]
X1 = np.random.multivariate_normal(mu1, cov1, N)
#mu2 = [4,7]
mu2 = [4,5]
cov2 = [[1,0.5],[0.5,4]]
X2 = np.random.multivariate_normal(mu2, cov2, N)
# -
plt.plot(X1[:,0],X1[:,1],marker='.',linestyle='')
plt.plot(X2[:,0],X2[:,1],marker='.',linestyle='',color='r')
def zscore(X):
return((X - np.mean(X, axis=0))/np.std(X, axis=0))
# +
# Creation labels
y1 = np.zeros(N)
y2 = np.ones(N)
# Concatenation
X = np.concatenate((X1,X2))
X = zscore(X)
y = np.concatenate((y1,y2))
s = np.arange(2*N)
np.random.shuffle(s)
X = X[s]
y = y[s]
X_test = X[-test:,:]
X_train = X[:-test,:]
y_test = y[-test:]
y_train = y[:-test]
# -
plt.plot(X_train[:,0],X_train[:,1],marker='.',linestyle='')
# +
nn = Sequential()
nn.add(Dense(5, input_shape=(2,), kernel_initializer='uniform'))
nn.add(Activation('relu'))
#nn.add(Dense(15, kernel_initializer='uniform'))
#nn.add(Activation('relu'))
nn.add(Dense(1, kernel_initializer='uniform'))
nn.add(Activation('sigmoid'))
#print(nn.summary())
nn.compile(optimizer=RMSprop(lr=0.01), loss='binary_crossentropy', metrics=['binary_accuracy'])
history = nn.fit(X_train,y_train, epochs=15, batch_size=100)
# -
plt.plot(history.history['binary_accuracy'])
# récupère les prédictions du classifieur sur la base de test
y_pred = nn.predict(X_test)
res = nn.evaluate(X_test,y_test,batch_size=test)
# return [loss, bin_accuracy] sur la base de test
print("Test binary accuracy: {}%".format(round(res[1]*100,4)))
# ### Calcul de la matrice de confusion
#
# <div class="alert alert-block alert-warning">
# Question : Implémentez les fonctions ci-dessous qui calculent les 4 coefficients de la matrice de confusion étant donnés $y_{pred}$ le vecteur des classes prédites par le classifieur et $y_{test}$ les vrais labels.
# +
def true_positive(y_pred, y_test, threshold):
return None
def false_positive(y_pred, y_test, threshold):
return None
def false_negative(y_pred, y_test, threshold):
return None
def true_negative(y_pred, y_test, threshold):
return None
# -
def ConfusionMatrix(y_pred, y_test,threshold):
mat_conf = np.empty(4)
mat_conf[0] = true_positive(y_pred, y_test, threshold)
mat_conf[1] = false_positive(y_pred, y_test, threshold)
mat_conf[2] = false_negative(y_pred, y_test, threshold)
mat_conf[3] = true_negative(y_pred, y_test, threshold)
print(mat_conf[0],mat_conf[1])
print(mat_conf[2],mat_conf[3])
return mat_conf
threshold = 0.5
mat_conf = ConfusionMatrix(y_pred, y_test, threshold)
# ### Precision, Recall and F-Factor
#
# <div class="alert alert-block alert-warning">
# Question : Implémentez les fonctions $precision$, $recall$ et $f\_measure$ pour qu'elles retournent respectivement la métrique correspondante.
# +
def precision(TP,FP):
return None
def recall(TP,P):
return None
def f_measure(precision,recall):
return None
def measure(y_pred, y_test,threshold):
TP = true_positive(y_pred, y_test, threshold)
FP = false_positive(y_pred, y_test, threshold)
FN = false_negative(y_pred, y_test, threshold)
TN = true_negative(y_pred, y_test, threshold)
P = sum(y_test == 1)
N = sum(y_test == 0)
return TP,FP,FN,TN,P,N
# -
[TP,FP,FN,TN,P,N] = measure(y_pred, y_test, threshold)
prec = precision(TP,FP)
rec = recall (TP,P)
F_factor = f_measure(prec,rec)
print(TP,FP,FN,TN,P,N)
print("precision = ",prec)
print("recall = ",rec)
print("F_factor = ",F_factor)
# ### Tracé de la courbe ROC
#
# Le pseudo-code de l'algorithme pour extraire les coordonnées des points de la courbe ROC est présenté ci-dessous.
# <img src="img/algo_roc" style="width: 450px;">
#
# <div class="alert alert-block alert-warning">
# Question : écrire le code de cet algorithme dans la fonction ci-dessous pour retourner une matrice ($N_{test},2$), avec $N_{test}$ la taille de la liste retournée par le pseudo-code.
# </div>
def generateROCpoints(y_pred, y_test):
## Mettre le code ici ###
R = [[None]] ### A enlever (juste pour éviter quelques erreurs tant que le code n'est pas rempli)
########################
# A ce point de l'algo vous devriez avoir R sous la forme R = [[x0,y0],[x1,y1],...,[xN,yN]]
# L'algo du pseudo-code retourne une liste de liste, R, mais pour tracer la courbe il est plus
# facile d'utiliser une matrice R_mat.
R_mat = np.empty((len(R),len(R[0])))
for i in range(len(R)):
R_mat[i,:] = R[i]
return R_mat
R_mat = generateROCpoints(y_pred, y_test)
## Decommenter la ligne suivante une fois la fonction generateROCpoints implémentée correctement
#plt.plot(R_mat[:,0], R_mat[:,1]) #### A DECOMMENTER
plt.plot([0,1],[0,1])
plt.title('ROC curve')
plt.xlabel('False positive rate')
plt.ylabel('True positive rate')
plt.show()
# ### Aire sous la courbe ROC
#
# <img src="img/algo_auc" style="width: 450px">
#
# <div class="alert alert-block alert-warning">
# Question : écrire le code de cet algorithme dans la fonction AreaUnderCurve($y_{pred}, y_{test}$) ci-dessous.
# </div>
# +
def trap_area(X1,X2,Y1,Y2):
base = abs(X1-X2)
height_avg = (Y1+Y2)/2
area = base*height_avg
return area
def AreaUnderCurve(y_pred, y_test):
return None
# -
Area = AreaUnderCurve(y_pred, y_test)
Area
# ## À vous de jouer !
#
# ### Un exemple dans le domaine médical
# <div class="alert alert-block alert-warning">
# Pour le jeu de données suivant sur le cancer du sein (label 0 = tumeur bégnine, 1 = tumeur maligne) implémenter différents algorithmes de classification binaire (SVM, arbre, réseau de neurones) et comparez leur performance pour les grandeurs introduites précédemment.
# +
## Chargement des données
names = ['id', 'clumpThick', 'unifCellSize', 'unifCellShape', 'margAdh', 'SECS', 'bareNuclei', 'blandChrom', 'normalNucl','mistoses','class']
dataframe = pd.read_csv('data/breast-cancer-wisconsin.data', names=names, na_filter='?')
data = dataframe.values
X = data[:,1:-1]
y = data[:,-1]
# Les labels dans le dataset sont 2 et 4 au lieu des traditionnels 0 et 1, on les remplace.
y[y == 2] = 0
y[y == 4] = 1
size_test = 200 # doit être plus petit que la taille du dataset
X_train = X[:-size_test,:]
y_train = y[:-size_test]
X_test = X[-size_test:,:]
y_test = y[-size_test:]
# Pour plotter un plt.bar afin de comparer les trois classifieurs
prec_list = np.zeros(3)
rec_list = np.zeros(3)
fScrore_list = np.zeros(3)
# -
# <div class="alert alert-block alert-warning">
# 1er indice : commencez par regarder la répartition des classes. Que peut-on en dire ?
# <div class="alert alert-block alert-success">
# Réponse :
# ## Avec une SVM
# ## Avec une forêt aléatoire
# ## Avec un réseau de neurones
# +
fig, ax = plt.subplots(2,2)
ax[0,0].bar([1,2,3],prec_list,color=['r','b','g'])
ax[0,0].get_xaxis().set_visible(False)
ax[0,0].set_title('Precision')
ax[0,1].bar([1,2,3],rec_list,color=['r','b','g'])
ax[0,1].get_xaxis().set_visible(False)
ax[0,1].set_title('Recall')
ax[1,0].bar([1,2,3],fScrore_list,color=['r','b','g'])
ax[1,0].set_title('F-Score')
ax[1,0].get_xaxis().set_visible(False)
ax[1,1].get_xaxis().set_visible(False)
ax[1,1].get_yaxis().set_visible(False)
# En rouge la SVM, en bleu la forêt aléatoire et en vert le réseau de neurones
# -
# <div class="alert alert-block alert-warning">
# Quelle conclusion tirez-vous de vos différentes expériences sur ce jeu de données?
# <div class="alert alert-block alert-success">
# Réponse :
# # Régression
#
# Le cas de la régression est moins problématique, pour mesurer la performance d'un modèle de régression, il suffit de mesurer son l'écart entre la prédiction et la vraie valeur (Mean Squared Error, Mean Absolute Error, qui donnent un ordre de grandeur de la magnitude de l'écart mais pas de signe). On peut également calculer le coefficient de détermination $R^2$.
#
#
#
# ## References
#
# [1] https://machinelearningmastery.com/classification-accuracy-is-not-enough-more-performance-measures-you-can-use/
#
# [2] https://machinelearningmastery.com/assessing-comparing-classifier-performance-roc-curves-2/
#
# [3] https://en.wikipedia.org/wiki/Receiver_operating_characteristic
#
# [4] Les fichiers pdf dans l'archive
#
# [5] <NAME> and <NAME>: "Cancer diagnosis via linear
# programming", SIAM News, Volume 23, Number 5, September 1990, pp 1 & 18.
#
# <img src="img/img.jpg" style="width: 450px;" title="Machine learning memes for convolutional teens">
#
# <img src="img/headache.png" style="width: 450px;" title="Machine learning memes for convolutional teens">
|
Evaluation criteria for supervized learning.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
s1 = u'my string'
type(s1)
from __future__ import unicode_literals
heights = {'Fred': 175, 'Anne': 166, 'Joe': 192}
heights.values()
|
Untitled.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # How to Access the `items` of an `Object`
# ## The `list`
# > - [ ] Create a `list` of your best friends
lista_bf = ['maria', 'pepe', 'alberto']
# > - [ ] Access the 2nd element ↓
lista_bf[1]
# ## The `dict`
# > - [ ] Create a `dict` of your best friends
diccionario_bf = {'primera': 'maria', 'segundo': 'pepe', 'tercero': 'alberto'}
# > - [ ] Access the 2nd element ↓
diccionario_bf[1]
diccionario_bf.keys()
diccionario_bf['segundo']
# ## The `DataFrame`
# > - [ ] Create a `dict` with your best **friend's personal data**
maria = {
'altura': 1.78,
'peso': 54,
'edad': 18
}
maria['edad']
# > - [ ] Create a `dict` with your second best **friend's personal data**
juan = {
'altura': 1.87,
'peso': 76,
'edad': 24
}
juan['edad']
# > - [ ] Create a `nested dict` with your **3 best friends' personal data**
diccionario_bf = {
'maria': {
'altura': 1.78,
'peso': 54,
'edad': 18
},
'juan': {
'altura': 1.87,
'peso': 76,
'edad': 24
},
'pepe': {
'altura': 1.84,
'peso': 94,
'edad': 33
}
}
diccionario_bf
# > - [ ] Access the `age` of your `2nd best friend`
diccionario_bf['pepe']
diccionario_bf['pepe']['peso']
# > - [ ] Convert the `dict` to a `DataFrame`
diccionario_bf
df = pd.DataFrame(diccionario_bf)
df
# - [ ] Access the `age` of your `2nd best friend`
df['pepe']
df['pepe']['edad']
# > - What would have happened if the `DataFrame` looks like this ↓
df = df.transpose() #!
df
df['pepe']
# > - [ ] Is your best friends' name a `key` of the `DataFrame`?
df.keys()
# > - [ ] How then can you access your second best friend's age?
df['edad']
df['edad']['pepe']
# ## Recap
# ### The `list`
lista_bf
lista_bf[1]
# ### The `dictionary`
diccionario_bf
diccionario_bf['pepe']
diccionario_bf['pepe']['edad']
# ### The `DataFrame`
df
df['edad']
df['edad']['pepe']
# ## What the heck is a `key`?
#
# > - A `key` that opens a door to get the `values`
# > - [ ] For example, get the values contained in the `age` key
df = pd.read_excel('internet_usage_spain.xlsx', sheet_name=1, index_col=0)
df.head()
df[age]
age
# `age = ?`
df['age']
# > - [ ] Access the `name` of the people
df['name']
# > - What is the error saying?
# > - [ ] Could you ask which are the `keys` for the `df`?
df.head() #!
df.keys()
# > - [ ] Which `keys` could you access then?
df.columns
df['age']
# > - [ ] How could you then access the `names`?
df.head() #!
df.index
# # Disecting the Objects to Understand the Elements of Programming
# > - Objects are **data structures** that store information.
# > - [ ] Which **syntax** do we use to access the information?
# ## Dot Notation `.`
# > - Show just the `age` column from `df`
df
df.age
# > - [ ] Could we **access to more information** than just the columns?
df.
# ### The `function()`
df.hist
df.hist()
df.describe()
df.boxplot()
# ### The `instance`
df.size
df.shape
df.axes
# ### Recap
# #### The `instance`
# The **instance** (object) may contain:
# - `function`
# - more `instance`s
df.
df.describe
df.shape
# #### The `function()`
# The **function** contains nothing
# - ` `; it's the endpoint of programming
pandas.read_csv
pandas.read_csv()
pandas.read_csv.
# #### Library
# The **library** may contain:
# - `module` (subfolder)
# - `function`
# - object `class` **to be created**
# - object `instance` **(object) already created**
import pandas
pandas.
pandas.api
pandas.array
pandas.DataFrame
pandas.describe_option
# # Masking & Filtering the `DataFrame`
#
# > - [ ] **Select** elements of the `object` **based on a Condition**
df.head() #!
# ## Filter people older than 70
# ## Filter people without studies
# ## Filter people older than 70 and without studies
# ## Filter people older than 70 or without studies
|
I Resolving Python with Data Science/02_Disecting the Object Like an Onion/script.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
# ### 1- Check the path
# !ls
# ### 2- Load and Create a DataFrame
# #### Load the Geospatial Coordinates
geo_coordinates = pd.read_csv('datasets/Geospatial_Coordinates.csv')
geo_coordinates.rename(columns={'Postal Code':'Postcode'}, inplace=True)
geo_coordinates.head()
# #### Load the Neighbohood
neigh = pd.read_csv('datasets/neighbor.csv')
neigh.head()
# ### 3- Merge the two Dataframes
result = pd.merge(neigh, geo_coordinates, on='Postcode')
# #### Display the final result
result.style
result.to_csv('datasets/final_neighborhoods')
|
.ipynb_checkpoints/geo_coord-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R [conda env:py3_physeq]
# language: R
# name: conda-env-py3_physeq-r
# ---
# + [markdown] toc=true
# <h1>Table of Contents<span class="tocSkip"></span></h1>
# <div class="toc"><ul class="toc-item"><li><span><a href="#Goal" data-toc-modified-id="Goal-1"><span class="toc-item-num">1 </span>Goal</a></span></li><li><span><a href="#Var" data-toc-modified-id="Var-2"><span class="toc-item-num">2 </span>Var</a></span></li><li><span><a href="#Init" data-toc-modified-id="Init-3"><span class="toc-item-num">3 </span>Init</a></span></li><li><span><a href="#LLMGA" data-toc-modified-id="LLMGA-4"><span class="toc-item-num">4 </span>LLMGA</a></span><ul class="toc-item"><li><span><a href="#Setup" data-toc-modified-id="Setup-4.1"><span class="toc-item-num">4.1 </span>Setup</a></span></li><li><span><a href="#config" data-toc-modified-id="config-4.2"><span class="toc-item-num">4.2 </span>config</a></span></li><li><span><a href="#Run" data-toc-modified-id="Run-4.3"><span class="toc-item-num">4.3 </span>Run</a></span></li></ul></li><li><span><a href="#--WAITING--" data-toc-modified-id="--WAITING---5"><span class="toc-item-num">5 </span>--WAITING--</a></span></li><li><span><a href="#Summary" data-toc-modified-id="Summary-6"><span class="toc-item-num">6 </span>Summary</a></span><ul class="toc-item"><li><span><a href="#No.-of-genomes" data-toc-modified-id="No.-of-genomes-6.1"><span class="toc-item-num">6.1 </span>No. of genomes</a></span></li><li><span><a href="#CheckM" data-toc-modified-id="CheckM-6.2"><span class="toc-item-num">6.2 </span>CheckM</a></span></li><li><span><a href="#Taxonomy" data-toc-modified-id="Taxonomy-6.3"><span class="toc-item-num">6.3 </span>Taxonomy</a></span><ul class="toc-item"><li><span><a href="#Taxonomic-novelty" data-toc-modified-id="Taxonomic-novelty-6.3.1"><span class="toc-item-num">6.3.1 </span>Taxonomic novelty</a></span></li><li><span><a href="#Quality-~-taxonomy" data-toc-modified-id="Quality-~-taxonomy-6.3.2"><span class="toc-item-num">6.3.2 </span>Quality ~ taxonomy</a></span></li></ul></li></ul></li><li><span><a href="#sessionInfo" data-toc-modified-id="sessionInfo-7"><span class="toc-item-num">7 </span>sessionInfo</a></span></li></ul></div>
# -
# # Goal
#
# * running `LLMGA` on metagenome datasets
# * studyID = PRJEB23642
# * host = Minke whale
# # Var
# +
work_dir = '/ebio/abt3_projects/Georg_animal_feces/data/metagenome/multi-study/BioProjects/'
tmp_out_dir = '/ebio/abt3_projects/databases_no-backup/animal_gut_metagenomes/multi-study_MG-asmbl/'
pipeline_dir = '/ebio/abt3_projects/methanogen_host_evo/bin/llmga-find-refs/'
studyID = 'PRJEB23642'
threads = 24
# -
# # Init
# +
library(dplyr)
library(tidyr)
library(ggplot2)
library(data.table)
set.seed(8304)
source('/ebio/abt3_projects/Georg_animal_feces/code/misc_r_functions/init.R')
# -
# # LLMGA
# ## Setup
out_dir = file.path(tmp_out_dir, studyID)
make_dir(out_dir)
out_dir = file.path(out_dir, 'LLMGA')
make_dir(out_dir)
ref_genomes = file.path(work_dir, studyID, 'LLMGA-find-refs/references/ref_genomes.fna')
cat(ref_genomes)
# ## config
cat_file(file.path(out_dir, 'config.yaml'))
# ## Run
# ```
# (snakemake_dev) @ rick:/ebio/abt3_projects/methanogen_host_evo/bin/llmga
# $ screen -L -S llmga-PRJEB23642 ./snakemake_sge.sh /ebio/abt3_projects/databases_no-backup/animal_gut_metagenomes/multi-study_MG-asmbl/PRJEB23642/LLMGA/config.yaml cluster.json /ebio/abt3_projects/databases_no-backup/animal_gut_metagenomes/multi-study_MG-asmbl/PRJEB23642/LLMGA/SGE_log 24
# ```
pipelineInfo('/ebio/abt3_projects/methanogen_host_evo/bin/llmga')
# # Summary
# +
asmbl_dir = out_dir = file.path(tmp_out_dir, studyID, 'LLMGA')
checkm_markers_file = file.path(asmbl_dir, 'checkm', 'markers_qa_summary.tsv')
gtdbtk_bac_sum_file = file.path(asmbl_dir, 'gtdbtk', 'gtdbtk_bac_summary.tsv')
gtdbtk_arc_sum_file = file.path(asmbl_dir, 'gtdbtk', 'gtdbtk_ar_summary.tsv')
bin_dir = file.path(asmbl_dir, 'bin')
das_tool_dir = file.path(asmbl_dir, 'bin_refine', 'DAS_Tool')
drep_dir = file.path(asmbl_dir, 'drep', 'drep')
# +
# bin genomes
## maxbin2
bin_files = list.files(bin_dir, '*.fasta$', full.names=TRUE, recursive=TRUE)
bin = data.frame(binID = gsub('\\.fasta$', '', basename(bin_files)),
fasta = bin_files,
binner = bin_files %>% dirname %>% basename,
sample = bin_files %>% dirname %>% dirname %>% basename)
## metabat2
bin_files = list.files(bin_dir, '*.fa$', full.names=TRUE, recursive=TRUE)
X = data.frame(binID = gsub('\\.fa$', '', basename(bin_files)),
fasta = bin_files,
binner = bin_files %>% dirname %>% basename,
sample = bin_files %>% dirname %>% dirname %>% basename)
## combine
bin = rbind(bin, X)
X = NULL
bin %>% dfhead
# +
# DAS-tool genomes
dastool_files = list.files(das_tool_dir, '*.fa$', full.names=TRUE, recursive=TRUE)
dastool = data.frame(binID = gsub('\\.fa$', '', basename(dastool_files)),
fasta = dastool_files)
dastool %>% dfhead
# +
# drep genome files
P = file.path(drep_dir, 'dereplicated_genomes')
drep_files = list.files(P, '*.fa$', full.names=TRUE)
drep = data.frame(binID = gsub('\\.fa$', '', basename(drep_files)),
fasta = drep_files)
drep %>% dfhead
# +
# checkm info
markers_sum = read.delim(checkm_markers_file, sep='\t')
markers_sum %>% nrow %>% print
drep_j = drep %>%
inner_join(markers_sum, c('binID'='Bin.Id'))
drep_j %>% dfhead
# +
# gtdb
## bacteria
X = read.delim(gtdbtk_bac_sum_file, sep='\t') %>%
dplyr::select(-other_related_references.genome_id.species_name.radius.ANI.AF.) %>%
separate(classification, c('Domain', 'Phylum', 'Class', 'Order', 'Family', 'Genus', 'Species'), sep=';')
X %>% nrow %>% print
## archaea
if(file.size(gtdbtk_arc_sum_file) > 0){
Y = read.delim(gtdbtk_arc_sum_file, sep='\t') %>%
dplyr::select(-other_related_references.genome_id.species_name.radius.ANI.AF.) %>%
separate(classification, c('Domain', 'Phylum', 'Class', 'Order', 'Family', 'Genus', 'Species'), sep=';')
Y %>% nrow %>% print
X = rbind(X,Y)
}
## combined
drep_j = drep_j %>%
left_join(X, c('binID'='user_genome'))
## status
X = Y = NULL
drep_j %>% dfhead
# -
# ## No. of genomes
cat('Number of binned genomes:', bin$fasta %>% unique %>% length)
cat('Number of DAS-Tool passed genomes:', dastool$binID %>% unique %>% length)
cat('Number of 99% ANI de-rep genomes:', drep_j$binID %>% unique %>% length)
# ## CheckM
# +
# checkm stats
p = drep_j %>%
dplyr::select(binID, Completeness, Contamination) %>%
gather(Metric, Value, -binID) %>%
ggplot(aes(Value)) +
geom_histogram(bins=30) +
labs(y='No. of MAGs\n(>=99% ANI derep.)') +
facet_grid(Metric ~ ., scales='free_y') +
theme_bw()
dims(4,3)
plot(p)
# -
# ## Taxonomy
# +
# summarizing by taxonomy
p = drep_j %>%
unite(Taxonomy, Phylum, Class, sep=';', remove=FALSE) %>%
group_by(Taxonomy, Phylum) %>%
summarize(n = n()) %>%
ungroup() %>%
ggplot(aes(Taxonomy, n, fill=Phylum)) +
geom_bar(stat='identity') +
coord_flip() +
labs(y='No. of MAGs\n(>=99% ANI derep.)') +
theme_bw()
dims(7,5)
plot(p)
# +
# summarizing by taxonomy
p = drep_j %>%
unite(Taxonomy, Phylum, Class, Family, sep=';', remove=FALSE) %>%
group_by(Taxonomy, Phylum) %>%
summarize(n = n()) %>%
ungroup() %>%
ggplot(aes(Taxonomy, n, fill=Phylum)) +
geom_bar(stat='identity') +
coord_flip() +
labs(y='No. of MAGs\n(>=99% ANI derep.)') +
theme_bw()
dims(7,7.5)
plot(p)
# -
# ### Taxonomic novelty
# +
# no close ANI matches
p = drep_j %>%
unite(Taxonomy, Phylum, Class, sep=';', remove=FALSE) %>%
mutate(closest_placement_ani = closest_placement_ani %>% as.character,
closest_placement_ani = ifelse(closest_placement_ani == 'N/A',
0, closest_placement_ani),
closest_placement_ani = ifelse(is.na(closest_placement_ani),
0, closest_placement_ani),
closest_placement_ani = closest_placement_ani %>% as.Num) %>%
mutate(has_species_placement = ifelse(closest_placement_ani >= 95,
'ANI >= 95%', 'No match')) %>%
ggplot(aes(Taxonomy, fill=Phylum)) +
geom_bar() +
facet_grid(. ~ has_species_placement) +
coord_flip() +
labs(y='Closest placement ANI') +
theme_bw()
dims(7,4)
plot(p)
# +
p = drep_j %>%
filter(Genus == 'g__') %>%
unite(Taxonomy, Phylum, Class, Order, Family, sep='; ', remove=FALSE) %>%
mutate(Taxonomy = stringr::str_wrap(Taxonomy, 45),
Taxonomy = gsub(' ', '', Taxonomy)) %>%
group_by(Taxonomy, Phylum) %>%
summarize(n = n()) %>%
ungroup() %>%
ggplot(aes(Taxonomy, n, fill=Phylum)) +
geom_bar(stat='identity') +
coord_flip() +
labs(y='No. of MAGs lacking a\ngenus-level classification') +
theme_bw() +
theme(
axis.text.y = element_text(size=8)
)
dims(6.5,7)
plot(p)
# -
# ### Quality ~ taxonomy
# +
p = drep_j %>%
unite(Taxonomy, Phylum, Class, sep='; ', remove=FALSE) %>%
dplyr::select(Taxonomy, Phylum, Completeness, Contamination) %>%
gather(Metric, Value, -Taxonomy, -Phylum) %>%
ggplot(aes(Taxonomy, Value, color=Phylum)) +
geom_boxplot() +
facet_grid(. ~ Metric, scales='free_x') +
coord_flip() +
labs(y='CheckM quality') +
theme_bw()
dims(7,5)
plot(p)
# +
# just unclassified at genus/species
p = drep_j %>%
filter(Genus == 'g__' | Species == 's__') %>%
unite(Taxonomy, Phylum, Class, sep='; ', remove=FALSE) %>%
dplyr::select(Taxonomy, Phylum, Completeness, Contamination) %>%
gather(Metric, Value, -Taxonomy, -Phylum) %>%
ggplot(aes(Taxonomy, Value, color=Phylum)) +
geom_boxplot() +
facet_grid(. ~ Metric, scales='free_x') +
coord_flip() +
labs(y='CheckM quality') +
theme_bw()
dims(7,5)
plot(p)
# -
# # sessionInfo
sessionInfo()
|
multi-study/05m_LLMGA_PRJEB23642.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 07. Adjusting timeouts
# ## Overview
# In this notebook, you will learn how to:
#
# - Adjust deployment timeouts, if your deployments fail too often.
# ## Import idact
# It's recommended that *idact* is installed with *pip*.
# Alternatively, make sure the dependencies are installed: `pip install -r requirements.txt`, and add *idact* to path, for example:
# `import sys`
# `sys.path.append('<YOUR_IDACT_PATH>')`
# We will use a wildcard import for convenience:
from idact import *
import bitmath
# ## Load the cluster
# Let's load the environment and the cluster. Make sure to use your cluster name.
load_environment()
cluster = show_cluster("test")
cluster
# ## Adjust deployment timeouts
# Sometimes a timeout occurs during a deployment, and may even cause it to fail.
# If you find this happens too often, you may need to adjust the timeouts for your cluster.
#
# In order to do that, copy the retry name from the info message preceding the failure that looks similar to this:
#
# `2018-11-12 22:14:00 INFO: Retried and failed: config.retries[Retry.PORT_INFO].{count=5, seconds_between=5}
# `
#
# First, you can look up what the current config is for this retry:
cluster.config.retries[Retry.PORT_INFO]
# Then adjust the retry count and/or seconds between retries:
cluster.config.retries[Retry.PORT_INFO] = set_retry(count=6,
seconds_between=10)
cluster.config.retries[Retry.PORT_INFO]
# Alternatively:
cluster.config.retries[Retry.PORT_INFO].count = 6
cluster.config.retries[Retry.PORT_INFO].seconds_between = 10
cluster.config.retries[Retry.PORT_INFO]
# Remember to save the environment after making any real changes:
# +
# save_environment()
# -
# ### Defaults
# You can view the default values for retries by calling:
get_default_retries()
# ## Next notebook
# In the next notebook, we will look at quick Jupyter deployment from a command line script: `idact-notebook`.
|
notebooks/07-Adjusting_timeouts.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 2928, "status": "ok", "timestamp": 1568892773296, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-wdXb0DL2Ohg/AAAAAAAAAAI/AAAAAAAAAtk/gU50EgBa5uo/s64/photo.jpg", "userId": "11395127353489346000"}, "user_tz": -330} id="H9EU0e8yzFOm" outputId="24401aa9-8cc6-47f5-c239-4b3ec24e641c"
# Credits: https://github.com/keras-team/keras/blob/master/examples/mnist_cnn.py
from __future__ import print_function
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D,BatchNormalization
from keras import backend as K
import matplotlib.pyplot as plt
from keras.initializers import glorot_normal , he_normal
batch_size = 128
num_classes = 10
epochs = 12
# input image dimensions
img_rows, img_cols = 28, 28
# the data, split between train and test sets
# + colab={} colab_type="code" id="abJshbiEwlF_"
# %matplotlib notebook
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import time
# https://gist.github.com/greydanus/f6eee59eaf1d90fcb3b534a25362cea4
# https://stackoverflow.com/a/14434334
# this function is used to update the plots for each epoch and error
def plt_dynamic(x, vy, ty, ax, colors=['b']):
ax.plot(x, vy, 'b', label="Validation Loss")
ax.plot(x, ty, 'r', label="Train Loss")
plt.legend()
plt.grid()
fig.canvas.draw()
# + colab={"base_uri": "https://localhost:8080/", "height": 52} colab_type="code" executionInfo={"elapsed": 3836, "status": "ok", "timestamp": 1568892774236, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-wdXb0DL2Ohg/AAAAAAAAAAI/AAAAAAAAAtk/gU50EgBa5uo/s64/photo.jpg", "userId": "11395127353489346000"}, "user_tz": -330} id="9ll8LXc6-Vm9" outputId="64950b51-efb6-45fe-b383-69d7c69e6c68"
(x_train, y_train), (x_test, y_test) = mnist.load_data()
if K.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
# + colab={"base_uri": "https://localhost:8080/", "height": 70} colab_type="code" executionInfo={"elapsed": 4365, "status": "ok", "timestamp": 1568892774792, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-wdXb0DL2Ohg/AAAAAAAAAAI/AAAAAAAAAtk/gU50EgBa5uo/s64/photo.jpg", "userId": "11395127353489346000"}, "user_tz": -330} id="k_H2PM65zlBq" outputId="908d95b8-9d80-43a5-ea5b-a36bfdfec913"
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
# + [markdown] colab_type="text" id="DwqDPvyxzlBs"
# <h2>1.0 Number of convolusion Layer 2 </2>
# + [markdown] colab_type="text" id="e__KqxLIzlBt"
# #### 1.1 Without droupout or Batch Normilization
# + colab={"base_uri": "https://localhost:8080/", "height": 776} colab_type="code" executionInfo={"elapsed": 84695, "status": "ok", "timestamp": 1568628303737, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-wdXb0DL2Ohg/AAAAAAAAAAI/AAAAAAAAAtk/gU50EgBa5uo/s64/photo.jpg", "userId": "11395127353489346000"}, "user_tz": -330} id="-x74v8na9XO-" outputId="39a054c3-c7f6-4357-acfe-962553c64483"
model1 = Sequential()
#conv_layer one
model1.add(Conv2D(32, kernel_size=(3, 3),activation='relu',input_shape=input_shape,padding ='same',kernel_initializer= he_normal()))
model1.add(MaxPooling2D(pool_size=(2, 2)))
#conv_layer two
model1.add(Conv2D(64, (3, 3), activation='relu',padding ='same'))
#top layer
model1.add(Flatten())
model1.add(Dense(128, activation='relu'))
model1.add(Dense(num_classes, activation='softmax'))
model1.compile(loss="categorical_crossentropy",
optimizer="adam",
metrics=['accuracy'])
history = model1.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
score = model1.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
# + colab={"base_uri": "https://localhost:8080/", "height": 283} colab_type="code" executionInfo={"elapsed": 1232, "status": "ok", "timestamp": 1568615395661, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-wdXb0DL2Ohg/AAAAAAAAAAI/AAAAAAAAAtk/gU50EgBa5uo/s64/photo.jpg", "userId": "11395127353489346000"}, "user_tz": -330} id="3ki7r-Qk5h47" outputId="adb4c12b-7783-4a4b-cbef-a04488a65f0a"
fig,ax = plt.subplots(1,1)
ax.set_xlabel('epoch') ; ax.set_ylabel('Categorical Crossentropy Loss')
# list of epoch numbers
x = list(range(1,epochs+1))
# print(history.history.keys())
# dict_keys(['val_loss', 'val_acc', 'loss', 'acc'])
# history = model_drop.fit(X_train, Y_train, batch_size=batch_size, epochs=nb_epoch, verbose=1, validation_data=(X_test, Y_test))
# we will get val_loss and val_acc only when you pass the paramter validation_data
# val_loss : validation loss
# val_acc : validation accuracy
# loss : training loss
# acc : train accuracy
# for each key in histrory.histrory we will have a list of length equal to number of epochs
vy = history.history['val_loss']
ty = history.history['loss']
plt_dynamic(x, vy, ty, ax)
# + [markdown] colab_type="text" id="ED-DHa8azlBx"
# ### 1.2 With Droupout's
# + colab={"base_uri": "https://localhost:8080/", "height": 565} colab_type="code" executionInfo={"elapsed": 1164539, "status": "ok", "timestamp": 1568629489125, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-wdXb0DL2Ohg/AAAAAAAAAAI/AAAAAAAAAtk/gU50EgBa5uo/s64/photo.jpg", "userId": "11395127353489346000"}, "user_tz": -330} id="cN2T7bdaNbRJ" outputId="e707f99c-3f27-4967-8a9c-5058f48d0d61"
model2 = Sequential()
#conv_layer one
model2.add(Conv2D(32, kernel_size=(3, 3),activation='relu',input_shape=input_shape,padding ='same'))
model2.add(MaxPooling2D(pool_size=(2, 2)))
model2.add(Dropout(0.25))
#conv_layer two
model2.add(Conv2D(64, (3, 3), activation='relu',padding ='same'))
model2.add(Dropout(0.25))
#top layer
model2.add(Flatten())
model2.add(Dense(128, activation='relu'))
model2.add(Dropout(0.5))
model2.add(Dense(num_classes, activation='softmax'))
model2.compile(loss="categorical_crossentropy",
optimizer="adam",
metrics=['accuracy'])
history = model2.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
score = model2.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
# + colab={"base_uri": "https://localhost:8080/", "height": 283} colab_type="code" executionInfo={"elapsed": 1164358, "status": "ok", "timestamp": 1568629489130, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-wdXb0DL2Ohg/AAAAAAAAAAI/AAAAAAAAAtk/gU50EgBa5uo/s64/photo.jpg", "userId": "11395127353489346000"}, "user_tz": -330} id="qH1ZFGKbfyF9" outputId="3e8ce2d4-42ce-4f1a-c608-777d7777391a"
fig,ax = plt.subplots(1,1)
ax.set_xlabel('epoch') ; ax.set_ylabel('Categorical Crossentropy Loss')
# list of epoch numbers
x = list(range(1,epochs+1))
# print(history.history.keys())
# dict_keys(['val_loss', 'val_acc', 'loss', 'acc'])
# history = model_drop.fit(X_train, Y_train, batch_size=batch_size, epochs=nb_epoch, verbose=1, validation_data=(X_test, Y_test))
# we will get val_loss and val_acc only when you pass the paramter validation_data
# val_loss : validation loss
# val_acc : validation accuracy
# loss : training loss
# acc : train accuracy
# for each key in histrory.histrory we will have a list of length equal to number of epochs
vy = history.history['val_loss']
ty = history.history['loss']
plt_dynamic(x, vy, ty, ax)
# + [markdown] colab_type="text" id="2674HXy1zlB0"
# ### 1.3 With only Batch Normilization
# + colab={"base_uri": "https://localhost:8080/", "height": 547} colab_type="code" executionInfo={"elapsed": 2602881, "status": "ok", "timestamp": 1568630928161, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-wdXb0DL2Ohg/AAAAAAAAAAI/AAAAAAAAAtk/gU50EgBa5uo/s64/photo.jpg", "userId": "11395127353489346000"}, "user_tz": -330} id="rA96DJquzlB0" outputId="61d0989b-dea4-42e7-9d3d-1763ee6f0a5b"
model3 = Sequential()
#conv_layer one
model3.add(Conv2D(32, kernel_size=(3, 3),activation='relu',input_shape=input_shape,padding ='same'))
model3.add(MaxPooling2D(pool_size=(2, 2)))
#conv_layer two
model3.add(Conv2D(64, (3, 3), activation='relu',padding ='same'))
model3.add(BatchNormalization())
#top layer
model3.add(Flatten())
model3.add(BatchNormalization())
model3.add(Dense(128, activation='relu'))
model3.add(BatchNormalization())
model3.add(Dense(num_classes, activation='softmax'))
model3.compile(loss="categorical_crossentropy",
optimizer="adam",
metrics=['accuracy'])
history = model3.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
score = model3.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
# + colab={"base_uri": "https://localhost:8080/", "height": 283} colab_type="code" executionInfo={"elapsed": 1145, "status": "ok", "timestamp": 1568631242110, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-wdXb0DL2Ohg/AAAAAAAAAAI/AAAAAAAAAtk/gU50EgBa5uo/s64/photo.jpg", "userId": "11395127353489346000"}, "user_tz": -330} id="iCJ5SHtx6GAB" outputId="2d4b67fb-beab-4134-898f-4b397473d341"
fig,ax = plt.subplots(1,1)
ax.set_xlabel('epoch') ; ax.set_ylabel('Categorical Crossentropy Loss')
# list of epoch numbers
x = list(range(1,epochs+1))
# print(history.history.keys())
# dict_keys(['val_loss', 'val_acc', 'loss', 'acc'])
# history = model_drop.fit(X_train, Y_train, batch_size=batch_size, epochs=nb_epoch, verbose=1, validation_data=(X_test, Y_test))
# we will get val_loss and val_acc only when you pass the paramter validation_data
# val_loss : validation loss
# val_acc : validation accuracy
# loss : training loss
# acc : train accuracy
# for each key in histrory.histrory we will have a list of length equal to number of epochs
vy = history.history['val_loss']
ty = history.history['loss']
plt_dynamic(x, vy, ty, ax)
# + [markdown] colab_type="text" id="BxeJ15S_-VOd"
# ### 1.4 With Droupout's and Batch Normilization
# + colab={"base_uri": "https://localhost:8080/", "height": 512} colab_type="code" executionInfo={"elapsed": 1740531, "status": "ok", "timestamp": 1568647949991, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-wdXb0DL2Ohg/AAAAAAAAAAI/AAAAAAAAAtk/gU50EgBa5uo/s64/photo.jpg", "userId": "11395127353489346000"}, "user_tz": -330} id="xwqxt_3_-VOm" outputId="32d9ee19-1657-4aa2-f8d1-76fa1d67e8e5"
model4 = Sequential()
#conv_layer one
model4.add(Conv2D(32, kernel_size=(3, 3),activation='relu',input_shape=input_shape,padding ='same',kernel_initializer= he_normal(seed=None)))
model4.add(MaxPooling2D(pool_size=(2, 2)))
model4.add(Dropout(0.25))
#conv_layer two
model4.add(Conv2D(64, (3, 3), activation='relu',padding ='same',kernel_initializer= he_normal(seed=None)))
model4.add(BatchNormalization())
model4.add(Dropout(0.25))
#top layer
model4.add(Flatten())
model4.add(BatchNormalization())
model4.add(Dropout(0.5))
model4.add(Dense(128, activation='relu',kernel_initializer= he_normal(seed=None)))
model4.add(BatchNormalization())
model4.add(Dropout(0.5))
model4.add(Dense(num_classes, activation='softmax'))
model4.compile(loss="categorical_crossentropy",
optimizer="adam",
metrics=['accuracy'])
history = model4.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
score = model4.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
# + colab={"base_uri": "https://localhost:8080/", "height": 283} colab_type="code" executionInfo={"elapsed": 1734618, "status": "ok", "timestamp": 1568647949992, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-wdXb0DL2Ohg/AAAAAAAAAAI/AAAAAAAAAtk/gU50EgBa5uo/s64/photo.jpg", "userId": "11395127353489346000"}, "user_tz": -330} id="Ckhhw1gk-VO1" outputId="fc962ded-f09b-4865-cb62-61f492d84505"
fig,ax = plt.subplots(1,1)
ax.set_xlabel('epoch') ; ax.set_ylabel('Categorical Crossentropy Loss')
# list of epoch numbers
x = list(range(1,epochs+1))
# print(history.history.keys())
# dict_keys(['val_loss', 'val_acc', 'loss', 'acc'])
# history = model_drop.fit(X_train, Y_train, batch_size=batch_size, epochs=nb_epoch, verbose=1, validation_data=(X_test, Y_test))
# we will get val_loss and val_acc only when you pass the paramter validation_data
# val_loss : validation loss
# val_acc : validation accuracy
# loss : training loss
# acc : train accuracy
# for each key in histrory.histrory we will have a list of length equal to number of epochs
vy = history.history['val_loss']
ty = history.history['loss']
plt_dynamic(x, vy, ty, ax)
# + [markdown] colab_type="text" id="t22nSoyFzlB3"
# ### 1.5 With Droupout's and Batch Normilization and with different Kernel size
# + colab={"base_uri": "https://localhost:8080/", "height": 512} colab_type="code" executionInfo={"elapsed": 2529810, "status": "ok", "timestamp": 1568633799121, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-wdXb0DL2Ohg/AAAAAAAAAAI/AAAAAAAAAtk/gU50EgBa5uo/s64/photo.jpg", "userId": "11395127353489346000"}, "user_tz": -330} id="n-XgYDJRzlB3" outputId="fc805b13-751a-4cbf-c9ec-ec7903ea5e83"
model5 = Sequential()
#conv_layer one
model5.add(Conv2D(32, kernel_size=(3, 3),activation='relu',input_shape=input_shape,padding ='same',kernel_initializer= he_normal(seed=None)))
model5.add(MaxPooling2D(pool_size=(2, 2)))
model5.add(Dropout(0.25))
#conv_layer two
model5.add(Conv2D(64, (5, 5), activation='relu',padding ='same'))
model5.add(BatchNormalization())
model5.add(Dropout(0.25))
#top layer
model5.add(Flatten())
model5.add(BatchNormalization())
model5.add(Dropout(0.5))
model5.add(Dense(128, activation='relu'))
model5.add(BatchNormalization())
model5.add(Dropout(0.5))
model5.add(Dense(num_classes, activation='softmax'))
model5.compile(loss="categorical_crossentropy",
optimizer="adam",
metrics=['accuracy'])
history = model5.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
score = model5.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
# + colab={"base_uri": "https://localhost:8080/", "height": 283} colab_type="code" executionInfo={"elapsed": 2529108, "status": "ok", "timestamp": 1568633800023, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-wdXb0DL2Ohg/AAAAAAAAAAI/AAAAAAAAAtk/gU50EgBa5uo/s64/photo.jpg", "userId": "11395127353489346000"}, "user_tz": -330} id="2cyyz6LD6Kgv" outputId="e049c64f-559a-4f89-9f49-645544d5889c"
fig,ax = plt.subplots(1,1)
ax.set_xlabel('epoch') ; ax.set_ylabel('Categorical Crossentropy Loss')
# list of epoch numbers
x = list(range(1,epochs+1))
# print(history.history.keys())
# dict_keys(['val_loss', 'val_acc', 'loss', 'acc'])
# history = model_drop.fit(X_train, Y_train, batch_size=batch_size, epochs=nb_epoch, verbose=1, validation_data=(X_test, Y_test))
# we will get val_loss and val_acc only when you pass the paramter validation_data
# val_loss : validation loss
# val_acc : validation accuracy
# loss : training loss
# acc : train accuracy
# for each key in histrory.histrory we will have a list of length equal to number of epochs
vy = history.history['val_loss']
ty = history.history['loss']
plt_dynamic(x, vy, ty, ax)
# + [markdown] colab_type="text" id="7J4XUCHn_OGi"
# ### 1.6 With Droupout's and Batch Normilization where activation sigmoid and weight initilized
# + colab={"base_uri": "https://localhost:8080/", "height": 512} colab_type="code" executionInfo={"elapsed": 1738265, "status": "ok", "timestamp": 1568637682668, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-wdXb0DL2Ohg/AAAAAAAAAAI/AAAAAAAAAtk/gU50EgBa5uo/s64/photo.jpg", "userId": "11395127353489346000"}, "user_tz": -330} id="cVlfh9dr_OGn" outputId="76bb705d-f674-4979-83f7-f6909b835c3d"
model6 = Sequential()
#conv_layer one
model6.add(Conv2D(32, kernel_size=(3, 3),activation='sigmoid',input_shape=input_shape,padding ='same',kernel_initializer = glorot_normal()))
model6.add(MaxPooling2D(pool_size=(2, 2)))
model6.add(Dropout(0.25))
#conv_layer two
model6.add(Conv2D(64, (5, 5), activation='sigmoid',padding ='same',kernel_initializer = glorot_normal()))
model6.add(BatchNormalization())
model6.add(Dropout(0.25))
#top layer
model6.add(Flatten())
model6.add(BatchNormalization())
model6.add(Dropout(0.5))
model6.add(Dense(128, activation='sigmoid',kernel_initializer= glorot_normal()))
model6.add(BatchNormalization())
model6.add(Dropout(0.5))
model6.add(Dense(num_classes, activation='softmax'))
model6.compile(loss="categorical_crossentropy",
optimizer="adam",
metrics=['accuracy'])
history = model6.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
score = model6.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
# + colab={"base_uri": "https://localhost:8080/", "height": 283} colab_type="code" executionInfo={"elapsed": 1585, "status": "ok", "timestamp": 1568638319826, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-wdXb0DL2Ohg/AAAAAAAAAAI/AAAAAAAAAtk/gU50EgBa5uo/s64/photo.jpg", "userId": "11395127353489346000"}, "user_tz": -330} id="7Yjoxd-y_OGx" outputId="8b8cdd46-a294-49c1-90b7-69b1ea3933c6"
fig,ax = plt.subplots(1,1)
ax.set_xlabel('epoch') ; ax.set_ylabel('Categorical Crossentropy Loss')
# list of epoch numbers
x = list(range(1,epochs+1))
# print(history.history.keys())
# dict_keys(['val_loss', 'val_acc', 'loss', 'acc'])
# history = model_drop.fit(X_train, Y_train, batch_size=batch_size, epochs=nb_epoch, verbose=1, validation_data=(X_test, Y_test))
# we will get val_loss and val_acc only when you pass the paramter validation_data
# val_loss : validation loss
# val_acc : validation accuracy
# loss : training loss
# acc : train accuracy
# for each key in histrory.histrory we will have a list of length equal to number of epochs
vy = history.history['val_loss']
ty = history.history['loss']
plt_dynamic(x, vy, ty, ax)
# + [markdown] colab_type="text" id="U6DmRz2NAcH2"
# ### 1.7 With Droupout's and Batch Normilization where activation tanh and weight initilized
# + colab={"base_uri": "https://localhost:8080/", "height": 512} colab_type="code" executionInfo={"elapsed": 2533419, "status": "ok", "timestamp": 1568640912051, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-wdXb0DL2Ohg/AAAAAAAAAAI/AAAAAAAAAtk/gU50EgBa5uo/s64/photo.jpg", "userId": "11395127353489346000"}, "user_tz": -330} id="4qt8WbMVAcH6" outputId="3115c275-2806-4e90-a28a-0d9099faffd8"
model7 = Sequential()
#conv_layer one
model7.add(Conv2D(32, kernel_size=(3, 3),activation='tanh',input_shape=input_shape,padding ='same',kernel_initializer= glorot_normal()))
model7.add(MaxPooling2D(pool_size=(2, 2)))
model7.add(Dropout(0.25))
#conv_layer two
model7.add(Conv2D(64, (5, 5), activation='tanh',padding ='same',kernel_initializer= glorot_normal()))
model7.add(BatchNormalization())
model7.add(Dropout(0.25))
#top layer
model7.add(Flatten())
model7.add(BatchNormalization())
model7.add(Dropout(0.5))
model7.add(Dense(128, activation='tanh',kernel_initializer= glorot_normal()))
model7.add(BatchNormalization())
model7.add(Dropout(0.5))
model7.add(Dense(num_classes, activation='softmax'))
model7.compile(loss="categorical_crossentropy",
optimizer="adam",
metrics=['accuracy'])
history = model7.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
score = model7.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
# + colab={"base_uri": "https://localhost:8080/", "height": 283} colab_type="code" executionInfo={"elapsed": 2525728, "status": "ok", "timestamp": 1568640912064, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-wdXb0DL2Ohg/AAAAAAAAAAI/AAAAAAAAAtk/gU50EgBa5uo/s64/photo.jpg", "userId": "11395127353489346000"}, "user_tz": -330} id="8j051qHqAcIF" outputId="8a26b586-38df-43dd-df83-26e54605436f"
fig,ax = plt.subplots(1,1)
ax.set_xlabel('epoch') ; ax.set_ylabel('Categorical Crossentropy Loss')
# list of epoch numbers
x = list(range(1,epochs+1))
# print(history.history.keys())
# dict_keys(['val_loss', 'val_acc', 'loss', 'acc'])
# history = model_drop.fit(X_train, Y_train, batch_size=batch_size, epochs=nb_epoch, verbose=1, validation_data=(X_test, Y_test))
# we will get val_loss and val_acc only when you pass the paramter validation_data
# val_loss : validation loss
# val_acc : validation accuracy
# loss : training loss
# acc : train accuracy
# for each key in histrory.histrory we will have a list of length equal to number of epochs
vy = history.history['val_loss']
ty = history.history['loss']
plt_dynamic(x, vy, ty, ax)
# + [markdown] colab_type="text" id="wJuvUE7qBK-M"
# ### 1.8 With Droupout's and Batch Normilization where optimizer as sdg
# + colab={"base_uri": "https://localhost:8080/", "height": 512} colab_type="code" executionInfo={"elapsed": 2499272, "status": "ok", "timestamp": 1568644069992, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-wdXb0DL2Ohg/AAAAAAAAAAI/AAAAAAAAAtk/gU50EgBa5uo/s64/photo.jpg", "userId": "11395127353489346000"}, "user_tz": -330} id="MIj51ZvXBK-R" outputId="6fb3ae2d-416c-4497-9c49-316b8d4eb74b"
model8 = Sequential()
#conv_layer one
model8.add(Conv2D(32, kernel_size=(3, 3),activation='relu',input_shape=input_shape,padding ='same',kernel_initializer= he_normal(seed=None)))
model8.add(MaxPooling2D(pool_size=(2, 2)))
model8.add(Dropout(0.25))
#conv_layer two
model8.add(Conv2D(64, (5, 5), activation='relu',padding ='same',kernel_initializer= he_normal(seed=None)))
model8.add(BatchNormalization())
model8.add(Dropout(0.25))
#top layer
model8.add(Flatten())
model8.add(BatchNormalization())
model8.add(Dropout(0.5))
model8.add(Dense(128, activation='relu',kernel_initializer= he_normal(seed=None)))
model8.add(BatchNormalization())
model8.add(Dropout(0.5))
model8.add(Dense(num_classes, activation='softmax'))
model8.compile(loss="categorical_crossentropy",
optimizer="sgd",
metrics=['accuracy'])
history = model8.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
score = model8.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
# + colab={"base_uri": "https://localhost:8080/", "height": 283} colab_type="code" executionInfo={"elapsed": 2491572, "status": "ok", "timestamp": 1568644069996, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-wdXb0DL2Ohg/AAAAAAAAAAI/AAAAAAAAAtk/gU50EgBa5uo/s64/photo.jpg", "userId": "11395127353489346000"}, "user_tz": -330} id="t9yuyAutBK-f" outputId="abe84ec6-03a6-40bb-a1ef-fda6dec16245"
fig,ax = plt.subplots(1,1)
ax.set_xlabel('epoch') ; ax.set_ylabel('Categorical Crossentropy Loss')
# list of epoch numbers
x = list(range(1,epochs+1))
# print(history.history.keys())
# dict_keys(['val_loss', 'val_acc', 'loss', 'acc'])
# history = model_drop.fit(X_train, Y_train, batch_size=batch_size, epochs=nb_epoch, verbose=1, validation_data=(X_test, Y_test))
# we will get val_loss and val_acc only when you pass the paramter validation_data
# val_loss : validation loss
# val_acc : validation accuracy
# loss : training loss
# acc : train accuracy
# for each key in histrory.histrory we will have a list of length equal to number of epochs
vy = history.history['val_loss']
ty = history.history['loss']
plt_dynamic(x, vy, ty, ax)
# + [markdown] colab_type="text" id="g23SRE7NzlB6"
# <h2>2. Number of convolusion Layer 3 </2>
# + [markdown] colab_type="text" id="mB9pqFB7zlB7"
# #### 2.1 Without droupout or Batch Normilization
# + colab={"base_uri": "https://localhost:8080/", "height": 512} colab_type="code" executionInfo={"elapsed": 2166999, "status": "ok", "timestamp": 1568650502591, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-wdXb0DL2Ohg/AAAAAAAAAAI/AAAAAAAAAtk/gU50EgBa5uo/s64/photo.jpg", "userId": "11395127353489346000"}, "user_tz": -330} id="rolZZ99szlB8" outputId="00551ae5-6df2-4614-dade-a074e26be4bc"
model1 = Sequential()
#conv_layer one
model1.add(Conv2D(32, kernel_size=(5,5),activation='relu',input_shape=input_shape,padding ='same'))
model1.add(MaxPooling2D(pool_size=(2, 2)))
#conv_layer two
model1.add(Conv2D(64, (5,5), activation='relu',padding ='same'))
model1.add(MaxPooling2D(pool_size=(2, 2)))
#conv_layer three
model1.add(Conv2D(32, (5,5), activation='relu',padding ='same'))
model1.add(MaxPooling2D(pool_size=(2, 2)))
#top layer
model1.add(Flatten())
model1.add(Dense(124, activation='relu'))
model1.add(Dense(num_classes, activation='softmax'))
model1.compile(loss="categorical_crossentropy",
optimizer="adam",
metrics=['accuracy'])
history = model1.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
score = model1.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
# + colab={"base_uri": "https://localhost:8080/", "height": 283} colab_type="code" executionInfo={"elapsed": 2166493, "status": "ok", "timestamp": 1568650502594, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-wdXb0DL2Ohg/AAAAAAAAAAI/AAAAAAAAAtk/gU50EgBa5uo/s64/photo.jpg", "userId": "11395127353489346000"}, "user_tz": -330} id="ZIrFpEE06NKX" outputId="f0ae8965-37ab-408a-9fb6-a30855c18817"
fig,ax = plt.subplots(1,1)
ax.set_xlabel('epoch') ; ax.set_ylabel('Categorical Crossentropy Loss')
# list of epoch numbers
x = list(range(1,epochs+1))
# print(history.history.keys())
# dict_keys(['val_loss', 'val_acc', 'loss', 'acc'])
# history = model_drop.fit(X_train, Y_train, batch_size=batch_size, epochs=nb_epoch, verbose=1, validation_data=(X_test, Y_test))
# we will get val_loss and val_acc only when you pass the paramter validation_data
# val_loss : validation loss
# val_acc : validation accuracy
# loss : training loss
# acc : train accuracy
# for each key in histrory.histrory we will have a list of length equal to number of epochs
vy = history.history['val_loss']
ty = history.history['loss']
plt_dynamic(x, vy, ty, ax)
# + [markdown] colab_type="text" id="V7gLFAFfzlB-"
# ### 2.2 With Droupout's
# + colab={"base_uri": "https://localhost:8080/", "height": 512} colab_type="code" executionInfo={"elapsed": 4391552, "status": "ok", "timestamp": 1568652731187, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-wdXb0DL2Ohg/AAAAAAAAAAI/AAAAAAAAAtk/gU50EgBa5uo/s64/photo.jpg", "userId": "11395127353489346000"}, "user_tz": -330} id="Dmu08PEfzlB_" outputId="0db1efb0-3821-458a-eb51-5ded8262f474"
model2 = Sequential()
#conv_layer one
model2.add(Conv2D(32, kernel_size=(5,5),activation='relu',input_shape=input_shape,padding ='same'))
model2.add(MaxPooling2D(pool_size=(2, 2)))
model2.add(Dropout(0.25))
#conv_layer two
model2.add(Conv2D(64, (5, 5), activation='relu',padding ='same'))
model2.add(MaxPooling2D(pool_size=(2, 2)))
model2.add(Dropout(0.25))
#conv_layer three
model2.add(Conv2D(32, (5,5), activation='relu',padding ='same'))
model2.add(MaxPooling2D(pool_size=(2, 2)))
model2.add(Dropout(0.25))
#top layer
model2.add(Flatten())
model2.add(Dense(128, activation='relu'))
model2.add(Dropout(0.5))
model2.add(Dense(num_classes, activation='softmax'))
model2.compile(loss="categorical_crossentropy",
optimizer="adam",
metrics=['accuracy'])
history = model2.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
score = model2.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
# + colab={"base_uri": "https://localhost:8080/", "height": 283} colab_type="code" executionInfo={"elapsed": 4391271, "status": "ok", "timestamp": 1568652731191, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-wdXb0DL2Ohg/AAAAAAAAAAI/AAAAAAAAAtk/gU50EgBa5uo/s64/photo.jpg", "userId": "11395127353489346000"}, "user_tz": -330} id="3LZ_BtZg6PfS" outputId="b9c06dfc-994e-45fe-b51f-45cba8a4940c"
fig,ax = plt.subplots(1,1)
ax.set_xlabel('epoch') ; ax.set_ylabel('Categorical Crossentropy Loss')
# list of epoch numbers
x = list(range(1,epochs+1))
# print(history.history.keys())
# dict_keys(['val_loss', 'val_acc', 'loss', 'acc'])
# history = model_drop.fit(X_train, Y_train, batch_size=batch_size, epochs=nb_epoch, verbose=1, validation_data=(X_test, Y_test))
# we will get val_loss and val_acc only when you pass the paramter validation_data
# val_loss : validation loss
# val_acc : validation accuracy
# loss : training loss
# acc : train accuracy
# for each key in histrory.histrory we will have a list of length equal to number of epochs
vy = history.history['val_loss']
ty = history.history['loss']
plt_dynamic(x, vy, ty, ax)
# + [markdown] colab_type="text" id="bddlgsy4zlCB"
# ### 2.3 With only Batch Normilization
# + colab={"base_uri": "https://localhost:8080/", "height": 512} colab_type="code" executionInfo={"elapsed": 537, "status": "ok", "timestamp": 1568657537802, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-wdXb0DL2Ohg/AAAAAAAAAAI/AAAAAAAAAtk/gU50EgBa5uo/s64/photo.jpg", "userId": "11395127353489346000"}, "user_tz": -330} id="JnjGoWY0zlCC" outputId="b3067ffe-20c0-493d-a5b8-aa9b75770d70"
model3 = Sequential()
#conv_layer one
model3.add(Conv2D(32, kernel_size=(5,5),activation='relu',input_shape=input_shape,padding ='same'))
model3.add(MaxPooling2D(pool_size=(2, 2)))
#conv_layer two
model3.add(Conv2D(64, (5,5), activation='relu',padding ='same'))
model3.add(BatchNormalization())
#conv_layer three
model3.add(Conv2D(32, (5,5), activation='relu',padding ='same'))
model3.add(MaxPooling2D(pool_size=(2, 2)))
model3.add(BatchNormalization())
#top layer
model3.add(Flatten())
model3.add(BatchNormalization())
model3.add(Dense(128, activation='relu'))
model3.add(BatchNormalization())
model3.add(Dense(num_classes, activation='softmax'))
model3.compile(loss="categorical_crossentropy",
optimizer="adam",
metrics=['accuracy'])
history = model3.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
score = model3.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
# + colab={"base_uri": "https://localhost:8080/", "height": 283} colab_type="code" executionInfo={"elapsed": 1634, "status": "ok", "timestamp": 1568660441249, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-wdXb0DL2Ohg/AAAAAAAAAAI/AAAAAAAAAtk/gU50EgBa5uo/s64/photo.jpg", "userId": "11395127353489346000"}, "user_tz": -330} id="k4_ftL556Rw3" outputId="138122b8-630b-4b7e-b300-c4d81be19bab"
fig,ax = plt.subplots(1,1)
ax.set_xlabel('epoch') ; ax.set_ylabel('Categorical Crossentropy Loss')
# list of epoch numbers
x = list(range(1,epochs+1))
# print(history.history.keys())
# dict_keys(['val_loss', 'val_acc', 'loss', 'acc'])
# history = model_drop.fit(X_train, Y_train, batch_size=batch_size, epochs=nb_epoch, verbose=1, validation_data=(X_test, Y_test))
# we will get val_loss and val_acc only when you pass the paramter validation_data
# val_loss : validation loss
# val_acc : validation accuracy
# loss : training loss
# acc : train accuracy
# for each key in histrory.histrory we will have a list of length equal to number of epochs
vy = history.history['val_loss']
ty = history.history['loss']
plt_dynamic(x, vy, ty, ax)
# + [markdown] colab_type="text" id="hr3b-VNWiVH9"
# ### 2.4 With Droupout's and Batch Normilization
# + colab={"base_uri": "https://localhost:8080/", "height": 512} colab_type="code" executionInfo={"elapsed": 2887264, "status": "ok", "timestamp": 1568660424943, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-wdXb0DL2Ohg/AAAAAAAAAAI/AAAAAAAAAtk/gU50EgBa5uo/s64/photo.jpg", "userId": "11395127353489346000"}, "user_tz": -330} id="O5dyARnniVIE" outputId="81649aa2-457e-46a2-b04c-a402f0d84fd5"
model4 = Sequential()
#conv_layer one
model4.add(Conv2D(32, kernel_size=(5,5),activation='relu',input_shape=input_shape,padding ='same'))
model4.add(MaxPooling2D(pool_size=(2, 2)))
model4.add(Dropout(0.25))
#conv_layer two
model4.add(Conv2D(64, (5,5), activation='relu',padding ='same'))
model4.add(BatchNormalization())
model4.add(Dropout(0.25))
#conv_layer three
model4.add(Conv2D(32, (5,5), activation='relu',padding ='same'))
model4.add(MaxPooling2D(pool_size=(2, 2)))
model4.add(BatchNormalization())
model4.add(Dropout(0.25))
#top layer
model4.add(Flatten())
model4.add(BatchNormalization())
model4.add(Dropout(0.5))
model4.add(Dense(128, activation='relu'))
model4.add(BatchNormalization())
model4.add(Dropout(0.5))
model4.add(Dense(num_classes, activation='softmax'))
model4.compile(loss="categorical_crossentropy",
optimizer="adam",
metrics=['accuracy'])
history = model4.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
score = model4.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
# + colab={"base_uri": "https://localhost:8080/", "height": 283} colab_type="code" executionInfo={"elapsed": 61, "status": "ok", "timestamp": 1568660424951, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-wdXb0DL2Ohg/AAAAAAAAAAI/AAAAAAAAAtk/gU50EgBa5uo/s64/photo.jpg", "userId": "11395127353489346000"}, "user_tz": -330} id="OIFEqEYwiVIR" outputId="f294cca0-7dcc-429c-c227-5d350944b7c1"
fig,ax = plt.subplots(1,1)
ax.set_xlabel('epoch') ; ax.set_ylabel('Categorical Crossentropy Loss')
# list of epoch numbers
x = list(range(1,epochs+1))
# print(history.history.keys())
# dict_keys(['val_loss', 'val_acc', 'loss', 'acc'])
# history = model_drop.fit(X_train, Y_train, batch_size=batch_size, epochs=nb_epoch, verbose=1, validation_data=(X_test, Y_test))
# we will get val_loss and val_acc only when you pass the paramter validation_data
# val_loss : validation loss
# val_acc : validation accuracy
# loss : training loss
# acc : train accuracy
# for each key in histrory.histrory we will have a list of length equal to number of epochs
vy = history.history['val_loss']
ty = history.history['loss']
plt_dynamic(x, vy, ty, ax)
# + [markdown] colab_type="text" id="07a55_LdzlCE"
# ### 2.5 With Droupout's and Batch Normilization and with different Kernel size
# + colab={"base_uri": "https://localhost:8080/", "height": 512} colab_type="code" executionInfo={"elapsed": 1859535, "status": "ok", "timestamp": 1568662516222, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-wdXb0DL2Ohg/AAAAAAAAAAI/AAAAAAAAAtk/gU50EgBa5uo/s64/photo.jpg", "userId": "11395127353489346000"}, "user_tz": -330} id="QFVpYOqYzlCF" outputId="8b7753be-a6f2-4b65-e97b-f6f590317cb6"
model5 = Sequential()
#conv_layer one
model5.add(Conv2D(32, kernel_size=(3, 3),activation='relu',input_shape=input_shape,padding ='same'))
model5.add(MaxPooling2D(pool_size=(2, 2)))
model5.add(Dropout(0.25))
#conv_layer two
model5.add(Conv2D(64, (5, 5), activation='relu',padding ='same'))
model5.add(MaxPooling2D(pool_size=(2, 2)))
model5.add(BatchNormalization())
model5.add(Dropout(0.25))
#conv_layer three
model5.add(Conv2D(32, (2,2), activation='relu',padding ='same'))
model5.add(MaxPooling2D(pool_size=(2, 2)))
model5.add(BatchNormalization())
model5.add(Dropout(0.25))
#top layer
model5.add(Flatten())
model5.add(BatchNormalization())
model5.add(Dropout(0.5))
model5.add(Dense(128, activation='relu'))
model5.add(BatchNormalization())
model5.add(Dropout(0.5))
model5.add(Dense(num_classes, activation='softmax'))
model5.compile(loss="categorical_crossentropy",
optimizer="adam",
metrics=['accuracy'])
history = model5.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
score = model5.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
# + colab={"base_uri": "https://localhost:8080/", "height": 283} colab_type="code" executionInfo={"elapsed": 1859190, "status": "ok", "timestamp": 1568662516224, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-wdXb0DL2Ohg/AAAAAAAAAAI/AAAAAAAAAtk/gU50EgBa5uo/s64/photo.jpg", "userId": "11395127353489346000"}, "user_tz": -330} id="0CCMzYC-6Ttv" outputId="d6b2733a-94cc-40f3-d370-df6fbda40149"
fig,ax = plt.subplots(1,1)
ax.set_xlabel('epoch') ; ax.set_ylabel('Categorical Crossentropy Loss')
# list of epoch numbers
x = list(range(1,epochs+1))
# print(history.history.keys())
# dict_keys(['val_loss', 'val_acc', 'loss', 'acc'])
# history = model_drop.fit(X_train, Y_train, batch_size=batch_size, epochs=nb_epoch, verbose=1, validation_data=(X_test, Y_test))
# we will get val_loss and val_acc only when you pass the paramter validation_data
# val_loss : validation loss
# val_acc : validation accuracy
# loss : training loss
# acc : train accuracy
# for each key in histrory.histrory we will have a list of length equal to number of epochs
vy = history.history['val_loss']
ty = history.history['loss']
plt_dynamic(x, vy, ty, ax)
# + [markdown] colab_type="text" id="WEcLi7CxjEjw"
# ### 2.6 With Droupout's and Batch Normilization where activation sigmoid and weight initilized
# + colab={"base_uri": "https://localhost:8080/", "height": 512} colab_type="code" executionInfo={"elapsed": 3765664, "status": "ok", "timestamp": 1568664423494, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-wdXb0DL2Ohg/AAAAAAAAAAI/AAAAAAAAAtk/gU50EgBa5uo/s64/photo.jpg", "userId": "11395127353489346000"}, "user_tz": -330} id="i1nizEfGjKLf" outputId="48f1eed7-7b62-487b-9ad2-c9abe1a14e59"
model6 = Sequential()
#conv_layer one
model6.add(Conv2D(32, kernel_size=(3, 3),activation='sigmoid',input_shape=input_shape,padding ='same',kernel_initializer='glorot_normal'))
model6.add(MaxPooling2D(pool_size=(2, 2)))
model6.add(Dropout(0.25))
#conv_layer two
model6.add(Conv2D(64, (5, 5), activation='sigmoid',padding ='same',kernel_initializer='glorot_normal'))
model6.add(MaxPooling2D(pool_size=(2, 2)))
model6.add(BatchNormalization())
model6.add(Dropout(0.25))
#conv_layer three
model6.add(Conv2D(32, (2,2), activation='sigmoid',padding ='same',kernel_initializer='glorot_normal'))
model6.add(MaxPooling2D(pool_size=(2, 2)))
model6.add(BatchNormalization())
model6.add(Dropout(0.25))
#top layer
model6.add(Flatten())
model6.add(BatchNormalization())
model6.add(Dropout(0.5))
model6.add(Dense(128, activation='sigmoid',kernel_initializer='glorot_normal'))
model6.add(BatchNormalization())
model6.add(Dropout(0.5))
model6.add(Dense(num_classes, activation='softmax',kernel_initializer='glorot_normal'))
model6.compile(loss="categorical_crossentropy",
optimizer="adam",
metrics=['accuracy'])
history = model6.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
score = model6.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
# + colab={"base_uri": "https://localhost:8080/", "height": 283} colab_type="code" executionInfo={"elapsed": 3765239, "status": "ok", "timestamp": 1568664423497, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-wdXb0DL2Ohg/AAAAAAAAAAI/AAAAAAAAAtk/gU50EgBa5uo/s64/photo.jpg", "userId": "11395127353489346000"}, "user_tz": -330} id="ubuLLuu6jKLt" outputId="5a5bc2e7-1f62-4e42-f0fa-4d291eb010f9"
fig,ax = plt.subplots(1,1)
ax.set_xlabel('epoch') ; ax.set_ylabel('Categorical Crossentropy Loss')
# list of epoch numbers
x = list(range(1,epochs+1))
# print(history.history.keys())
# dict_keys(['val_loss', 'val_acc', 'loss', 'acc'])
# history = model_drop.fit(X_train, Y_train, batch_size=batch_size, epochs=nb_epoch, verbose=1, validation_data=(X_test, Y_test))
# we will get val_loss and val_acc only when you pass the paramter validation_data
# val_loss : validation loss
# val_acc : validation accuracy
# loss : training loss
# acc : train accuracy
# for each key in histrory.histrory we will have a list of length equal to number of epochs
vy = history.history['val_loss']
ty = history.history['loss']
plt_dynamic(x, vy, ty, ax)
# + [markdown] colab_type="text" id="UKokewxhk2uz"
# ### 2.7 With Droupout's and Batch Normilization where activation tanh and weight initilized
# + colab={"base_uri": "https://localhost:8080/", "height": 512} colab_type="code" executionInfo={"elapsed": 2188288, "status": "ok", "timestamp": 1568895570698, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-wdXb0DL2Ohg/AAAAAAAAAAI/AAAAAAAAAtk/gU50EgBa5uo/s64/photo.jpg", "userId": "11395127353489346000"}, "user_tz": -330} id="0uPX_bB7kIC4" outputId="1dd49b78-d697-4fa3-f52c-de25a50847e0"
model7 = Sequential()
#conv_layer one
model7.add(Conv2D(32, kernel_size=(3, 3),activation='tanh',input_shape=input_shape,padding ='same',kernel_initializer='glorot_normal'))
model7.add(MaxPooling2D(pool_size=(2, 2)))
model7.add(Dropout(0.25))
#conv_layer two
model7.add(Conv2D(64, (5,5), activation='tanh',padding ='same',kernel_initializer='glorot_normal'))
model7.add(BatchNormalization())
model7.add(Dropout(0.25))
#conv_layer three
model7.add(Conv2D(32, (2,2), activation='tanh',padding ='same',kernel_initializer='glorot_normal'))
model7.add(MaxPooling2D(pool_size=(2, 2)))
model7.add(BatchNormalization())
model7.add(Dropout(0.25))
#top layer
model7.add(Flatten())
model7.add(Dropout(0.5))
model7.add(Dense(128, activation='tanh',kernel_initializer='glorot_normal'))
model7.add(BatchNormalization())
model7.add(Dropout(0.5))
model7.add(Dense(128, activation='tanh',kernel_initializer='glorot_normal'))
model7.add(BatchNormalization())
model7.add(Dropout(0.5))
model7.add(Dense(num_classes, activation='softmax'))
model7.compile(loss="categorical_crossentropy",
optimizer="adam",
metrics=['accuracy'])
history = model7.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
score = model7.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
# + colab={"base_uri": "https://localhost:8080/", "height": 283} colab_type="code" executionInfo={"elapsed": 95, "status": "ok", "timestamp": 1568705306373, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-wdXb0DL2Ohg/AAAAAAAAAAI/AAAAAAAAAtk/gU50EgBa5uo/s64/photo.jpg", "userId": "11395127353489346000"}, "user_tz": -330} id="6fYl1nThkIDD" outputId="dc509db3-bbff-4041-c866-045d4328e44a"
fig,ax = plt.subplots(1,1)
ax.set_xlabel('epoch') ; ax.set_ylabel('Categorical Crossentropy Loss')
# list of epoch numbers
x = list(range(1,epochs+1))
# print(history.history.keys())
# dict_keys(['val_loss', 'val_acc', 'loss', 'acc'])
# history = model_drop.fit(X_train, Y_train, batch_size=batch_size, epochs=nb_epoch, verbose=1, validation_data=(X_test, Y_test))
# we will get val_loss and val_acc only when you pass the paramter validation_data
# val_loss : validation loss
# val_acc : validation accuracy
# loss : training loss
# acc : train accuracy
# for each key in histrory.histrory we will have a list of length equal to number of epochs
vy = history.history['val_loss']
ty = history.history['loss']
plt_dynamic(x, vy, ty, ax)
# + [markdown] colab_type="text" id="8ZIkarmEjEj5"
# ### 2.8 With Droupout's and Batch Normilization where optimizer as sdg
# + colab={"base_uri": "https://localhost:8080/", "height": 512} colab_type="code" executionInfo={"elapsed": 424, "status": "ok", "timestamp": 1568707688256, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-wdXb0DL2Ohg/AAAAAAAAAAI/AAAAAAAAAtk/gU50EgBa5uo/s64/photo.jpg", "userId": "11395127353489346000"}, "user_tz": -330} id="ss7MiU9nlFdW" outputId="08a6cc6b-9f05-40f2-9f56-1ab3d134dff1"
model8 = Sequential()
#conv_layer one
model8.add(Conv2D(32, kernel_size=(3, 3),activation='relu',input_shape=input_shape,padding ='same',kernel_initializer='he_normal'))
model8.add(MaxPooling2D(pool_size=(2, 2)))
model8.add(Dropout(0.25))
#conv_layer two
model8.add(Conv2D(64, (5, 5), activation='relu',padding ='same',kernel_initializer='he_normal'))
model8.add(MaxPooling2D(pool_size=(2, 2)))
model8.add(BatchNormalization())
model8.add(Dropout(0.25))
#conv_layer three
model8.add(Conv2D(32, (2,2), activation='relu',padding ='same',kernel_initializer='he_normal'))
model8.add(MaxPooling2D(pool_size=(2, 2)))
model8.add(BatchNormalization())
model8.add(Dropout(0.25))
#top layer
model8.add(Flatten())
model8.add(BatchNormalization())
model8.add(Dropout(0.5))
model8.add(Dense(128, activation='relu',kernel_initializer='he_normal'))
model8.add(BatchNormalization())
model8.add(Dropout(0.5))
model8.add(Dense(num_classes, activation='softmax',kernel_initializer='he_normal'))
model8.compile(loss="categorical_crossentropy",
optimizer="adam",
metrics=['accuracy'])
history = model8.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
score = model8.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
# + colab={"base_uri": "https://localhost:8080/", "height": 283} colab_type="code" executionInfo={"elapsed": 14, "status": "ok", "timestamp": 1568707688260, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-wdXb0DL2Ohg/AAAAAAAAAAI/AAAAAAAAAtk/gU50EgBa5uo/s64/photo.jpg", "userId": "11395127353489346000"}, "user_tz": -330} id="BrzzMuAwlGCb" outputId="826e9816-5927-4444-bf02-b9a6a2e76721"
fig,ax = plt.subplots(1,1)
ax.set_xlabel('epoch') ; ax.set_ylabel('Categorical Crossentropy Loss')
# list of epoch numbers
x = list(range(1,epochs+1))
# print(history.history.keys())
# dict_keys(['val_loss', 'val_acc', 'loss', 'acc'])
# history = model_drop.fit(X_train, Y_train, batch_size=batch_size, epochs=nb_epoch, verbose=1, validation_data=(X_test, Y_test))
# we will get val_loss and val_acc only when you pass the paramter validation_data
# val_loss : validation loss
# val_acc : validation accuracy
# loss : training loss
# acc : train accuracy
# for each key in histrory.histrory we will have a list of length equal to number of epochs
vy = history.history['val_loss']
ty = history.history['loss']
plt_dynamic(x, vy, ty, ax)
# + [markdown] colab_type="text" id="A424m2utzlCH"
# <h2>3. Number of convolusion Layer 5 </2>
# + [markdown] colab_type="text" id="4CZgqeUezlCI"
# #### 3.1 Without droupout or Batch Normilization
# + colab={"base_uri": "https://localhost:8080/", "height": 512} colab_type="code" executionInfo={"elapsed": 6132, "status": "ok", "timestamp": 1568715058480, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-wdXb0DL2Ohg/AAAAAAAAAAI/AAAAAAAAAtk/gU50EgBa5uo/s64/photo.jpg", "userId": "11395127353489346000"}, "user_tz": -330} id="SAk-BbiszlCJ" outputId="2b3378e6-74a8-410d-a40f-87e3129220db"
model1 = Sequential()
#conv_layer one
model1.add(Conv2D(32, kernel_size=(5,5),activation='relu',input_shape=input_shape,padding ='same'))
model1.add(MaxPooling2D(pool_size=(2, 2)))
#conv_layer two
model1.add(Conv2D(64, (5,5), activation='relu',padding ='same'))
model1.add(MaxPooling2D(pool_size=(2, 2)))
#conv_layer three
model1.add(Conv2D(32, (6,6), activation='relu',padding ='same'))
model1.add(MaxPooling2D(pool_size=(2, 2)))
#conv_layer four
model1.add(Conv2D(128, (3,3), activation='relu',padding ='same'))
#conv_layer five
model1.add(Conv2D(16, (5,5), activation='relu',padding ='same'))
model1.add(MaxPooling2D(pool_size=(2, 2)))
#top layer
model1.add(Flatten())
model1.add(Dense(124, activation='relu'))
model1.add(Dense(num_classes, activation='softmax'))
model1.compile(loss="categorical_crossentropy",
optimizer="adam",
metrics=['accuracy'])
history = model1.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
score = model1.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
# + colab={"base_uri": "https://localhost:8080/", "height": 283} colab_type="code" executionInfo={"elapsed": 699, "status": "ok", "timestamp": 1568715058488, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-wdXb0DL2Ohg/AAAAAAAAAAI/AAAAAAAAAtk/gU50EgBa5uo/s64/photo.jpg", "userId": "11395127353489346000"}, "user_tz": -330} id="mE_Kv6Fm6VRy" outputId="f551c9ab-5c74-424e-96c2-8e20f89c2f35"
fig,ax = plt.subplots(1,1)
ax.set_xlabel('epoch') ; ax.set_ylabel('Categorical Crossentropy Loss')
# list of epoch numbers
x = list(range(1,epochs+1))
# print(history.history.keys())
# dict_keys(['val_loss', 'val_acc', 'loss', 'acc'])
# history = model_drop.fit(X_train, Y_train, batch_size=batch_size, epochs=nb_epoch, verbose=1, validation_data=(X_test, Y_test))
# we will get val_loss and val_acc only when you pass the paramter validation_data
# val_loss : validation loss
# val_acc : validation accuracy
# loss : training loss
# acc : train accuracy
# for each key in histrory.histrory we will have a list of length equal to number of epochs
vy = history.history['val_loss']
ty = history.history['loss']
plt_dynamic(x, vy, ty, ax)
# + [markdown] colab_type="text" id="ovWQWr00zlCL"
# ### 3.2 With Droupout's
# + colab={"base_uri": "https://localhost:8080/", "height": 512} colab_type="code" executionInfo={"elapsed": 683, "status": "ok", "timestamp": 1568715058489, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-wdXb0DL2Ohg/AAAAAAAAAAI/AAAAAAAAAtk/gU50EgBa5uo/s64/photo.jpg", "userId": "11395127353489346000"}, "user_tz": -330} id="oj__qvcWzlCM" outputId="3d81fc17-44a3-480d-935e-3bba977611e6"
model2 = Sequential()
#conv_layer one
model2.add(Conv2D(32, kernel_size=(5,5),activation='relu',input_shape=input_shape,padding ='same'))
model2.add(MaxPooling2D(pool_size=(2, 2)))
model2.add(Dropout(0.25))
#conv_layer two
model2.add(Conv2D(64, (3, 3), activation='relu',padding ='same'))
model2.add(MaxPooling2D(pool_size=(2, 2)))
model2.add(Dropout(0.25))
#conv_layer three
model2.add(Conv2D(32, (5,5), activation='relu',padding ='same'))
model2.add(MaxPooling2D(pool_size=(2, 2)))
model2.add(Dropout(0.25))
#conv_layer four
model2.add(Conv2D(128, (3,3), activation='relu',padding ='same'))
model2.add(Dropout(0.25))
#conv_layer five
model2.add(Conv2D(16, (8,8), activation='relu',padding ='same'))
model2.add(MaxPooling2D(pool_size=(2, 2)))
model2.add(Dropout(0.25))
#top layer
model2.add(Flatten())
model2.add(Dense(128, activation='relu'))
model2.add(Dropout(0.5))
model2.add(Dense(num_classes, activation='softmax'))
model2.compile(loss="categorical_crossentropy",
optimizer="adam",
metrics=['accuracy'])
history = model2.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
score = model2.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
# + colab={"base_uri": "https://localhost:8080/", "height": 283} colab_type="code" executionInfo={"elapsed": 350, "status": "ok", "timestamp": 1568715058490, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-wdXb0DL2Ohg/AAAAAAAAAAI/AAAAAAAAAtk/gU50EgBa5uo/s64/photo.jpg", "userId": "11395127353489346000"}, "user_tz": -330} id="WCrKzxxz6Xdo" outputId="1d36b118-4e1f-4e6a-d4f6-e475f2f1c834"
fig,ax = plt.subplots(1,1)
ax.set_xlabel('epoch') ; ax.set_ylabel('Categorical Crossentropy Loss')
# list of epoch numbers
x = list(range(1,epochs+1))
# print(history.history.keys())
# dict_keys(['val_loss', 'val_acc', 'loss', 'acc'])
# history = model_drop.fit(X_train, Y_train, batch_size=batch_size, epochs=nb_epoch, verbose=1, validation_data=(X_test, Y_test))
# we will get val_loss and val_acc only when you pass the paramter validation_data
# val_loss : validation loss
# val_acc : validation accuracy
# loss : training loss
# acc : train accuracy
# for each key in histrory.histrory we will have a list of length equal to number of epochs
vy = history.history['val_loss']
ty = history.history['loss']
plt_dynamic(x, vy, ty, ax)
# + [markdown] colab_type="text" id="DSfchi24zlCP"
# ### 3.3 With Batch Normilization
# + colab={"base_uri": "https://localhost:8080/", "height": 512} colab_type="code" executionInfo={"elapsed": 728575, "status": "ok", "timestamp": 1568715786730, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-wdXb0DL2Ohg/AAAAAAAAAAI/AAAAAAAAAtk/gU50EgBa5uo/s64/photo.jpg", "userId": "11395127353489346000"}, "user_tz": -330} id="25Oayd2XzlCP" outputId="c350c9e2-f04b-4338-9582-9997e4e33bf8"
model3 = Sequential()
#conv_layer one
model3.add(Conv2D(32, kernel_size=(3, 3),activation='relu',input_shape=input_shape,padding ='same'))
model3.add(MaxPooling2D(pool_size=(2, 2)))
#conv_layer two
model3.add(Conv2D(128, (3, 3), activation='relu',padding ='same'))
model3.add(BatchNormalization())
#conv_layer three
model3.add(Conv2D(32, (3, 3), activation='relu',padding ='same'))
model3.add(BatchNormalization())
#conv_layer four
model3.add(Conv2D(40, (3, 3), activation='relu',padding ='same'))
model3.add(MaxPooling2D(pool_size=(2, 2)))
model3.add(BatchNormalization())
#conv_layer five
model3.add(Conv2D(8, (3, 3), activation='relu',padding ='same'))
model3.add(MaxPooling2D(pool_size=(2, 2)))
model3.add(BatchNormalization())
#top layer
model3.add(Flatten())
model3.add(Dense(128, activation='relu'))
model3.add(BatchNormalization())
model3.add(Dense(128, activation='relu'))
model3.add(BatchNormalization())
model3.add(Dense(num_classes, activation='softmax'))
model3.compile(loss="categorical_crossentropy",
optimizer="adam",
metrics=['accuracy'])
history = model3.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
score = model3.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
# + colab={"base_uri": "https://localhost:8080/", "height": 283} colab_type="code" executionInfo={"elapsed": 46, "status": "ok", "timestamp": 1568715786736, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-wdXb0DL2Ohg/AAAAAAAAAAI/AAAAAAAAAtk/gU50EgBa5uo/s64/photo.jpg", "userId": "11395127353489346000"}, "user_tz": -330} id="zGzEdkIW6ZJH" outputId="aa704592-6db6-48ef-8824-1adcaa9ffd46"
fig,ax = plt.subplots(1,1)
ax.set_xlabel('epoch') ; ax.set_ylabel('Categorical Crossentropy Loss')
# list of epoch numbers
x = list(range(1,epochs+1))
# print(history.history.keys())
# dict_keys(['val_loss', 'val_acc', 'loss', 'acc'])
# history = model_drop.fit(X_train, Y_train, batch_size=batch_size, epochs=nb_epoch, verbose=1, validation_data=(X_test, Y_test))
# we will get val_loss and val_acc only when you pass the paramter validation_data
# val_loss : validation loss
# val_acc : validation accuracy
# loss : training loss
# acc : train accuracy
# for each key in histrory.histrory we will have a list of length equal to number of epochs
vy = history.history['val_loss']
ty = history.history['loss']
plt_dynamic(x, vy, ty, ax)
# + [markdown] colab_type="text" id="b5Br3ddhmlt2"
# ### 3.4 With Droupout's and Batch Normilization
# + colab={"base_uri": "https://localhost:8080/", "height": 512} colab_type="code" executionInfo={"elapsed": 3509164, "status": "ok", "timestamp": 1568901423827, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-wdXb0DL2Ohg/AAAAAAAAAAI/AAAAAAAAAtk/gU50EgBa5uo/s64/photo.jpg", "userId": "11395127353489346000"}, "user_tz": -330} id="mTGvKNFcmlt7" outputId="76519026-c01d-4595-d89c-34cb45be21ab"
model4 = Sequential()
#conv_layer one
model4.add(Conv2D(32, kernel_size=(3, 3),activation='relu',input_shape=input_shape,padding ='same'))
model4.add(MaxPooling2D(pool_size=(2, 2)))
model4.add(Dropout(0.25))
#conv_layer two
model4.add(Conv2D(128, (3, 3), activation='relu',padding ='same'))
model4.add(BatchNormalization())
model4.add(Dropout(0.25))
#conv_layer three
model4.add(Conv2D(32, (3, 3), activation='relu',padding ='same'))
model4.add(BatchNormalization())
model4.add(Dropout(0.25))
#conv_layer four
model4.add(Conv2D(40, (3, 3), activation='relu',padding ='same'))
model4.add(MaxPooling2D(pool_size=(2, 2)))
model4.add(BatchNormalization())
model4.add(Dropout(0.25))
#conv_layer five
model4.add(Conv2D(8, (3, 3), activation='relu',padding ='same'))
model4.add(MaxPooling2D(pool_size=(2, 2)))
model4.add(BatchNormalization())
model4.add(Dropout(0.25))
#top layer
model4.add(Flatten())
model4.add(Dropout(0.5))
model4.add(Dense(128, activation='relu'))
model4.add(BatchNormalization())
model4.add(Dropout(0.5))
model4.add(Dense(128, activation='relu'))
model4.add(BatchNormalization())
model4.add(Dropout(0.5))
model4.add(Dense(num_classes, activation='softmax'))
model4.compile(loss="categorical_crossentropy",
optimizer="adam",
metrics=['accuracy'])
history = model4.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
score = model4.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
# + colab={"base_uri": "https://localhost:8080/", "height": 283} colab_type="code" executionInfo={"elapsed": 1253, "status": "ok", "timestamp": 1568721288953, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-wdXb0DL2Ohg/AAAAAAAAAAI/AAAAAAAAAtk/gU50EgBa5uo/s64/photo.jpg", "userId": "11395127353489346000"}, "user_tz": -330} id="rXOy_DCrmluI" outputId="14419699-cf52-45b4-b9d4-0c099992b8c9"
fig,ax = plt.subplots(1,1)
ax.set_xlabel('epoch') ; ax.set_ylabel('Categorical Crossentropy Loss')
# list of epoch numbers
x = list(range(1,epochs+1))
# print(history.history.keys())
# dict_keys(['val_loss', 'val_acc', 'loss', 'acc'])
# history = model_drop.fit(X_train, Y_train, batch_size=batch_size, epochs=nb_epoch, verbose=1, validation_data=(X_test, Y_test))
# we will get val_loss and val_acc only when you pass the paramter validation_data
# val_loss : validation loss
# val_acc : validation accuracy
# loss : training loss
# acc : train accuracy
# for each key in histrory.histrory we will have a list of length equal to number of epochs
vy = history.history['val_loss']
ty = history.history['loss']
plt_dynamic(x, vy, ty, ax)
# + [markdown] colab_type="text" id="w7wy90GpzlCS"
# ### 3.5 With Droupout's and Batch Normilization and with different Kernel size
# + colab={"base_uri": "https://localhost:8080/", "height": 899} colab_type="code" executionInfo={"elapsed": 4884439, "status": "ok", "timestamp": 1568745951103, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-wdXb0DL2Ohg/AAAAAAAAAAI/AAAAAAAAAtk/gU50EgBa5uo/s64/photo.jpg", "userId": "11395127353489346000"}, "user_tz": -330} id="OPtvGUFrzlCS" outputId="d227bd95-0647-4064-af05-57484655192c"
model5 = Sequential()
#conv_layer one
model5.add(Conv2D(32, kernel_size=(3, 3),activation='relu',input_shape=input_shape,padding ='same'))
model5.add(MaxPooling2D(pool_size=(2, 2)))
model5.add(Dropout(0.25))
#conv_layer two
model5.add(Conv2D(128, (2,2), activation='relu',padding ='same'))
model5.add(BatchNormalization())
model5.add(Dropout(0.25))
#conv_layer three
model5.add(Conv2D(32, (5,5), activation='relu',padding ='same'))
model5.add(MaxPooling2D(pool_size=(2, 2)))
model5.add(BatchNormalization())
model5.add(Dropout(0.25))
#conv_layer four
model5.add(Conv2D(40, (2,2), activation='relu',padding ='same'))
model5.add(BatchNormalization())
model5.add(Dropout(0.25))
#conv_layer five
model5.add(Conv2D(8, (6,6), activation='relu',padding ='same'))
model5.add(MaxPooling2D(pool_size=(2, 2)))
model5.add(BatchNormalization())
model5.add(Dropout(0.25))
#top layer
model5.add(Flatten())
model5.add(Dropout(0.5))
model5.add(Dense(128, activation='relu'))
model5.add(BatchNormalization())
model5.add(Dropout(0.5))
model5.add(Dense(128, activation='relu'))
model5.add(BatchNormalization())
model5.add(Dropout(0.5))
model5.add(Dense(num_classes, activation='softmax'))
model5.compile(loss="categorical_crossentropy",
optimizer="adam",
metrics=['accuracy'])
history = model5.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
score = model5.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
# + colab={"base_uri": "https://localhost:8080/", "height": 283} colab_type="code" executionInfo={"elapsed": 4884095, "status": "ok", "timestamp": 1568745951122, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-wdXb0DL2Ohg/AAAAAAAAAAI/AAAAAAAAAtk/gU50EgBa5uo/s64/photo.jpg", "userId": "11395127353489346000"}, "user_tz": -330} id="JsqW24F36ar8" outputId="50197643-fd4c-424a-86be-6897ae5a087a"
fig,ax = plt.subplots(1,1)
ax.set_xlabel('epoch') ; ax.set_ylabel('Categorical Crossentropy Loss')
# list of epoch numbers
x = list(range(1,epochs+1))
# print(history.history.keys())
# dict_keys(['val_loss', 'val_acc', 'loss', 'acc'])
# history = model_drop.fit(X_train, Y_train, batch_size=batch_size, epochs=nb_epoch, verbose=1, validation_data=(X_test, Y_test))
# we will get val_loss and val_acc only when you pass the paramter validation_data
# val_loss : validation loss
# val_acc : validation accuracy
# loss : training loss
# acc : train accuracy
# for each key in histrory.histrory we will have a list of length equal to number of epochs
vy = history.history['val_loss']
ty = history.history['loss']
plt_dynamic(x, vy, ty, ax)
# + [markdown] colab_type="text" id="DEqR3fVDn1Qx"
# ### 3.6 With Droupout's and Batch Normilization where activation sigmoid and weight initilized
# + colab={"base_uri": "https://localhost:8080/", "height": 899} colab_type="code" executionInfo={"elapsed": 5264733, "status": "ok", "timestamp": 1568793474494, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-wdXb0DL2Ohg/AAAAAAAAAAI/AAAAAAAAAtk/gU50EgBa5uo/s64/photo.jpg", "userId": "11395127353489346000"}, "user_tz": -330} id="7U7HlEjxoBfZ" outputId="73c973cc-976f-4d2d-ad64-a3fd4aa00fe2"
model6 = Sequential()
#conv_layer one
model6.add(Conv2D(32, kernel_size=(3, 3),activation='sigmoid',input_shape=input_shape,padding ='same',kernel_initializer='glorot_normal'))
model6.add(MaxPooling2D(pool_size=(2, 2)))
model6.add(Dropout(0.25))
#conv_layer two
model6.add(Conv2D(128, (2,2), activation='sigmoid',padding ='same',kernel_initializer='glorot_normal'))
model6.add(BatchNormalization())
model6.add(Dropout(0.25))
#conv_layer three
model6.add(Conv2D(32, (5,5), activation='sigmoid',padding ='same',kernel_initializer='glorot_normal'))
model6.add(MaxPooling2D(pool_size=(2, 2)))
model6.add(BatchNormalization())
model6.add(Dropout(0.25))
#conv_layer four
model6.add(Conv2D(40, (2,2), activation='sigmoid',padding ='same',kernel_initializer='glorot_normal'))
model6.add(BatchNormalization())
model6.add(Dropout(0.25))
#conv_layer five
model6.add(Conv2D(8, (6,6), activation='sigmoid',padding ='same',kernel_initializer='glorot_normal'))
model6.add(MaxPooling2D(pool_size=(2, 2)))
model6.add(BatchNormalization())
model6.add(Dropout(0.25))
#top layer
model6.add(Flatten())
model6.add(Dropout(0.5))
model6.add(Dense(128, activation='sigmoid'))
model6.add(BatchNormalization())
model6.add(Dropout(0.5))
model6.add(Dense(128, activation='sigmoid'))
model6.add(BatchNormalization())
model6.add(Dropout(0.5))
model6.add(Dense(num_classes, activation='softmax'))
model6.compile(loss="categorical_crossentropy",
optimizer="adam",
metrics=['accuracy'])
history = model6.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
score = model6.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
# + colab={"base_uri": "https://localhost:8080/", "height": 283} colab_type="code" executionInfo={"elapsed": 5264312, "status": "ok", "timestamp": 1568793474506, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-wdXb0DL2Ohg/AAAAAAAAAAI/AAAAAAAAAtk/gU50EgBa5uo/s64/photo.jpg", "userId": "11395127353489346000"}, "user_tz": -330} id="aGQawCcAoDfd" outputId="6b71ba46-983b-41f2-f3b1-5e6133059476"
fig,ax = plt.subplots(1,1)
ax.set_xlabel('epoch') ; ax.set_ylabel('Categorical Crossentropy Loss')
# list of epoch numbers
x = list(range(1,epochs+1))
# print(history.history.keys())
# dict_keys(['val_loss', 'val_acc', 'loss', 'acc'])
# history = model_drop.fit(X_train, Y_train, batch_size=batch_size, epochs=nb_epoch, verbose=1, validation_data=(X_test, Y_test))
# we will get val_loss and val_acc only when you pass the paramter validation_data
# val_loss : validation loss
# val_acc : validation accuracy
# loss : training loss
# acc : train accuracy
# for each key in histrory.histrory we will have a list of length equal to number of epochs
vy = history.history['val_loss']
ty = history.history['loss']
plt_dynamic(x, vy, ty, ax)
# + [markdown] colab_type="text" id="GFcxXcCWn1Q9"
# ### 3.7 With Droupout's and Batch Normilization where activation tanh and weight initilized
# + colab={"base_uri": "https://localhost:8080/", "height": 512} colab_type="code" executionInfo={"elapsed": 5291647, "status": "ok", "timestamp": 1568798879249, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-wdXb0DL2Ohg/AAAAAAAAAAI/AAAAAAAAAtk/gU50EgBa5uo/s64/photo.jpg", "userId": "11395127353489346000"}, "user_tz": -330} id="4yoWb2tN7FlS" outputId="589ad8cb-287d-4fb0-b775-4e738b0361a3"
model7 = Sequential()
#conv_layer one
model7.add(Conv2D(32, kernel_size=(3, 3),activation='tanh',input_shape=input_shape,padding ='same',kernel_initializer='glorot_normal'))
model7.add(MaxPooling2D(pool_size=(2, 2)))
model7.add(Dropout(0.25))
#conv_layer two
model7.add(Conv2D(128, (2,2), activation='tanh',padding ='same',kernel_initializer='glorot_normal'))
model7.add(BatchNormalization())
model7.add(Dropout(0.25))
#conv_layer three
model7.add(Conv2D(32, (5,5), activation='tanh',padding ='same',kernel_initializer='glorot_normal'))
model7.add(MaxPooling2D(pool_size=(2, 2)))
model7.add(BatchNormalization())
model7.add(Dropout(0.25))
#conv_layer four
model7.add(Conv2D(40, (2,2), activation='tanh',padding ='same',kernel_initializer='glorot_normal'))
model7.add(BatchNormalization())
model7.add(Dropout(0.25))
#conv_layer five
model7.add(Conv2D(8, (6,6), activation='tanh',padding ='same',kernel_initializer='glorot_normal'))
model7.add(MaxPooling2D(pool_size=(2, 2)))
model7.add(BatchNormalization())
model7.add(Dropout(0.25))
#top layer
model7.add(Flatten())
model7.add(Dropout(0.5))
model7.add(Dense(128, activation='tanh'))
model7.add(BatchNormalization())
model7.add(Dropout(0.5))
model7.add(Dense(128, activation='tanh'))
model7.add(BatchNormalization())
model7.add(Dropout(0.5))
model7.add(Dense(num_classes, activation='softmax'))
model7.compile(loss="categorical_crossentropy",
optimizer="adam",
metrics=['accuracy'])
history = model7.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
score = model7.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
# + colab={"base_uri": "https://localhost:8080/", "height": 283} colab_type="code" executionInfo={"elapsed": 5285242, "status": "ok", "timestamp": 1568798879253, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-wdXb0DL2Ohg/AAAAAAAAAAI/AAAAAAAAAtk/gU50EgBa5uo/s64/photo.jpg", "userId": "11395127353489346000"}, "user_tz": -330} id="rnfywgcE7Fli" outputId="26b42700-f6ac-4f0c-a28b-0b4659fdf9bc"
fig,ax = plt.subplots(1,1)
ax.set_xlabel('epoch') ; ax.set_ylabel('Categorical Crossentropy Loss')
# list of epoch numbers
x = list(range(1,epochs+1))
# print(history.history.keys())
# dict_keys(['val_loss', 'val_acc', 'loss', 'acc'])
# history = model_drop.fit(X_train, Y_train, batch_size=batch_size, epochs=nb_epoch, verbose=1, validation_data=(X_test, Y_test))
# we will get val_loss and val_acc only when you pass the paramter validation_data
# val_loss : validation loss
# val_acc : validation accuracy
# loss : training loss
# acc : train accuracy
# for each key in histrory.histrory we will have a list of length equal to number of epochs
vy = history.history['val_loss']
ty = history.history['loss']
plt_dynamic(x, vy, ty, ax)
# + [markdown] colab_type="text" id="a_i-hWKAn1RA"
# ### 3.8 With Droupout's and Batch Normilization where optimizer as sdg
# + colab={"base_uri": "https://localhost:8080/", "height": 512} colab_type="code" executionInfo={"elapsed": 5478764, "status": "ok", "timestamp": 1568808578710, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-wdXb0DL2Ohg/AAAAAAAAAAI/AAAAAAAAAtk/gU50EgBa5uo/s64/photo.jpg", "userId": "11395127353489346000"}, "user_tz": -330} id="DoHlbcXB762K" outputId="c21c46d9-1f84-47b3-88e5-c3b1e1aeac14"
model8 = Sequential()
#conv_layer one
model8.add(Conv2D(32, kernel_size=(3, 3),activation='relu',input_shape=input_shape,padding ='same',kernel_initializer='he_normal'))
model8.add(MaxPooling2D(pool_size=(2, 2)))
model8.add(Dropout(0.25))
#conv_layer two
model8.add(Conv2D(128, (2,2), activation='relu',padding ='same',kernel_initializer='he_normal'))
model8.add(BatchNormalization())
model8.add(Dropout(0.25))
#conv_layer three
model8.add(Conv2D(32, (5,5), activation='relu',padding ='same',kernel_initializer='he_normal'))
model8.add(MaxPooling2D(pool_size=(2, 2)))
model8.add(BatchNormalization())
model8.add(Dropout(0.25))
#conv_layer four
model8.add(Conv2D(40, (2,2), activation='relu',padding ='same',kernel_initializer='he_normal'))
model8.add(BatchNormalization())
model8.add(Dropout(0.25))
#conv_layer five
model8.add(Conv2D(8, (6,6), activation='relu',padding ='same',kernel_initializer='he_normal'))
model8.add(MaxPooling2D(pool_size=(2, 2)))
model8.add(BatchNormalization())
model8.add(Dropout(0.25))
#top layer
model8.add(Flatten())
model8.add(Dropout(0.5))
model8.add(Dense(128, activation='relu'))
model8.add(BatchNormalization())
model8.add(Dropout(0.5))
model8.add(Dense(128, activation='relu'))
model8.add(BatchNormalization())
model8.add(Dropout(0.5))
model8.add(Dense(num_classes, activation='softmax'))
model8.compile(loss="categorical_crossentropy",
optimizer="sgd",
metrics=['accuracy'])
history = model8.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
score = model8.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
# + colab={"base_uri": "https://localhost:8080/", "height": 283} colab_type="code" executionInfo={"elapsed": 5478357, "status": "ok", "timestamp": 1568808578726, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-wdXb0DL2Ohg/AAAAAAAAAAI/AAAAAAAAAtk/gU50EgBa5uo/s64/photo.jpg", "userId": "11395127353489346000"}, "user_tz": -330} id="rggW0GgH762V" outputId="a5f9ee74-321d-40ba-dcaf-19da41b97c09"
fig,ax = plt.subplots(1,1)
ax.set_xlabel('epoch') ; ax.set_ylabel('Categorical Crossentropy Loss')
# list of epoch numbers
x = list(range(1,epochs+1))
# print(history.history.keys())
# dict_keys(['val_loss', 'val_acc', 'loss', 'acc'])
# history = model_drop.fit(X_train, Y_train, batch_size=batch_size, epochs=nb_epoch, verbose=1, validation_data=(X_test, Y_test))
# we will get val_loss and val_acc only when you pass the paramter validation_data
# val_loss : validation loss
# val_acc : validation accuracy
# loss : training loss
# acc : train accuracy
# for each key in histrory.histrory we will have a list of length equal to number of epochs
vy = history.history['val_loss']
ty = history.history['loss']
plt_dynamic(x, vy, ty, ax)
# + [markdown] colab_type="text" id="ITfM-szSNlUv"
# <h2>Result's</h2>
# + colab={} colab_type="code" id="GItAksOoNlUw"
#for ploting
import matplotlib.pyplot as plt
def plot_gr(X_axis,LP_loss,LP_acc,layers):
fig = plt.figure(figsize = (12,5))
plt.subplot(1,2,1)
plt.plot(X_axis,LP_loss)
plt.legend("loss")
plt.xlabel ("Model")
plt.ylabel("loss")
plt.title(str(layers) +"layer CNN")
plt.subplot(1,2,2)
plt.plot(X_axis,LP_acc)
plt.xlabel ("Model")
plt.ylabel("Accuracy")
plt.legend("Acc")
plt.title(str(layers) +"layer CNN")
plt.tight_layout(pad=0.4, w_pad=0.5, h_pad=1.0)
# + colab={} colab_type="code" id="nrGF7B7INlUx"
#BN refers to Batch Normilization layer
#DO refers to Drop Out layer
#Batchsize = 128
#Activation = Relu,sigmoid,tanh
#Optimizer = Adam,sgd
# + [markdown] colab_type="text" id="clnZzBoHNlUy"
# <h2> 2 Layer CNN </h2>
# + colab={"base_uri": "https://localhost:8080/", "height": 228} colab_type="code" executionInfo={"elapsed": 1308, "status": "ok", "timestamp": 1568914009427, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-wdXb0DL2Ohg/AAAAAAAAAAI/AAAAAAAAAtk/gU50EgBa5uo/s64/photo.jpg", "userId": "11395127353489346000"}, "user_tz": -330} id="btI3R3M-NlU0" outputId="435abc71-ad12-42e5-8f69-ed05e1587be3"
from prettytable import PrettyTable
x = PrettyTable()
x.field_names = ["Model Description","1St layer Filter","2nd layer Filter","Test Loss","Test Acc"]
x.add_row(["without BN & DO" ,"32 (3X3)" ,"64 (3X3)" ,0.0546 ,0.9876])
x.add_row(["Only Do" ,"32 (3X3)" ,"64 (3X3)" ,0.0259 ,0.9926])
x.add_row(["Only BN " ,"32 (3X3)" ,"64 (3X3)" ,0.0577 ,0.9836])
x.add_row(["With BN & DO" ,"32 (3X3)" ,"64 (5X5)" ,0.0229 ,0.9915])
x.add_row(["Diff kernel size" ,"32 (3X3)" ,"64 (5X5)" ,0.0195 ,0.993])
x.add_row(["Activation Sigmoid" ,"32 (3X3)" ,"64 (5X5)" ,0.0572 ,0.9807])
x.add_row(["Activation Tanh" ,"32 (3X3)" ,"64 (5X5)" ,0.0546,0.982])
x.add_row(["Optimizer SGD" ,"32 (3X3)" ,"64 (5X5)" ,0.0538 ,0.9815])
print(x)
# + colab={} colab_type="code" id="Igacr1Z2NlU1"
X_axis = ["simple","DO","BN","BN & DO","diff size","sigmoid","tanh","SGD"]
two_L_loss = [0.0546 ,0.0259 ,0.0577 ,0.0229 ,0.0195 ,0.0572 ,0.0546 ,0.0538]
two_L_acc = [0.9876,0.9926,0.9836,0.9915,0.993,0.9807,0.982,0.9815 ]
# + colab={"base_uri": "https://localhost:8080/", "height": 383} colab_type="code" executionInfo={"elapsed": 2383, "status": "ok", "timestamp": 1568914010914, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-wdXb0DL2Ohg/AAAAAAAAAAI/AAAAAAAAAtk/gU50EgBa5uo/s64/photo.jpg", "userId": "11395127353489346000"}, "user_tz": -330} id="wWGtRGv0NlU2" outputId="ed35988d-4df8-4a88-ead0-172740806ff9"
plot_gr(X_axis,two_L_loss,two_L_acc,2)
# + [markdown] colab_type="text" id="0OWMZNrmNlU4"
# ### Observation:
# we can see that the loss got decreased when we use BN and DO with Relu activation and Adam optimizer.when the activation is sigmoid or tanh we can see that the loss increased ,it might be because it would have encountered vanishing gradient .
# in case of optimizer as SGD the loss is high because it converges to actual minima is low rate
# + [markdown] colab_type="text" id="RoYTmT3sNlU4"
# <h2> 3 Layer CNN <h/2>
# + colab={"base_uri": "https://localhost:8080/", "height": 228} colab_type="code" executionInfo={"elapsed": 1760, "status": "ok", "timestamp": 1568914010916, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-wdXb0DL2Ohg/AAAAAAAAAAI/AAAAAAAAAtk/gU50EgBa5uo/s64/photo.jpg", "userId": "11395127353489346000"}, "user_tz": -330} id="mL7asS2ONlU7" outputId="949c6343-8444-4c6c-bca0-10d76900a503"
x = PrettyTable()
x.field_names = ["Model Description","1St layer Filter","2nd layer Filter","3rd layer Filter","Test Loss","Test Acc"]
x.add_row(["without BN & DO" ,"32 (5X5)" , "64 (5X5)" , "32 (5X5)", 0.0339, 0.9916])
x.add_row(["Only Do" ,"32 (5X5)" , "64 (3X3)" , "32 (5X5)", 0.0169 ,0.9941])
x.add_row(["Only BN" ,"32 (5X5)" , "64 (3X3)" , "32 (5X5)", 0.0317, 0.9916])
x.add_row(["With BN & DO" ,"32 (5X5)" , "64 (3X3)" , "32 (5X5)", 0.0149, 0.9946])
x.add_row(["Diff kernel size" ,"32 (3X3)" , "64 (5, 5)" , "32 (2X2)", 0.022, 0.9926])
x.add_row(["Activation Sigmoid" ,"32 (3X3)" , "64 (5, 5)" , "32 (2X2)", 0.0492 ,0.9836])
x.add_row(["Activation Tanh" ,"32 (3X3)" , "64 (5, 5)" , "32 (2X2)", 0.0623 ,0.9812])
x.add_row(["Optimizer SGD" ,"32 (3X3)" , "64 (5, 5)" , "32 (2X2)", 0.0233 ,0.9918])
print(x)
# + colab={} colab_type="code" id="9Gyrpt6TNlU9"
X_axis = ["simple","DO","BN","BN & DO","diff size","sigmoid","tanh","SGD"]
three_L_loss = [0.0339,0.0169,0.0317,0.0149,0.022,0.0492,0.0623,0.0233 ]
three_L_acc = [0.9916,0.9941,0.9916,0.9946,0.9926,0.9836,0.9812,0.9918 ]
# + colab={"base_uri": "https://localhost:8080/", "height": 383} colab_type="code" executionInfo={"elapsed": 2390, "status": "ok", "timestamp": 1568914011884, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-wdXb0DL2Ohg/AAAAAAAAAAI/AAAAAAAAAtk/gU50EgBa5uo/s64/photo.jpg", "userId": "11395127353489346000"}, "user_tz": -330} id="1i3iSvZBNlU_" outputId="b3f624ad-4367-474c-f5c4-eb0bfc295fea"
plot_gr(X_axis,three_L_loss,three_L_acc,3)
# + [markdown] colab_type="text" id="2Oklx87NNlVA"
# ### Observation:
# we can see the same pattern which is there in 2layer CNN,t the loss got decreased when we use BN and DO with Relu activation and Adam optimizer.when the activation is sigmoid or tanh we can see that the loss increased ,it might be because it would have encountered vanishing gradient .
# in case of optimizer as SGD the loss is high because it converges to actual minima is low rate
# + [markdown] colab_type="text" id="6T9Zm0M0NlVB"
# <h2> 5 Layer CNN </h2>
# + colab={"base_uri": "https://localhost:8080/", "height": 248} colab_type="code" executionInfo={"elapsed": 1625, "status": "ok", "timestamp": 1568914011891, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-wdXb0DL2Ohg/AAAAAAAAAAI/AAAAAAAAAtk/gU50EgBa5uo/s64/photo.jpg", "userId": "11395127353489346000"}, "user_tz": -330} id="k_nPMG91jdGa" outputId="7f113f69-d15b-4b10-e94c-ce54438d4301"
from prettytable import PrettyTable
x = PrettyTable()
x.field_names = ["Model Description","1St layer Filter","2nd layer Filter","3rd layer Filter","4th layer Filter","5th layer Filter","Test Loss","Test Acc"]
x.add_row(["without BN & DO" ,"32 (5X5)" , "64 (5X5)" , "32 (6X6)" , "128 (3X3)" , "16 (5X5)", 0.0459 ,0.9892])
x.add_row(["Only Do" ,"32 (5X5)" , "64 (3X3)" ,"32 (5X5)" , "128 (3X3)" , "16 (8X8)", 0.0345 ,0.9916])
x.add_row(["Only BN" ,"32 (3X3)" , "128 (3X3)" , "32 (3X3)" , "40 (3X3)", "8 (3X3)" , 0.0358, 0.9893])
x.add_row(["With BN & DO" ,"32 (3X3)" , "128 (3X3)" , "32 (3X3)" , "40 (3X3)", "8 (3X3)" ,0.0307 ,0.9901])
x.add_row(["Diff kernel size" ,"32 (3X3)" , "128 (2X2)" , "32 (5X5)" , "40 (2X2)" , "8 (6X6)", 0.0234, 0.9923])
x.add_row(["Activation Sigmoid" ,"32 (3X3)" , "128 (2X2)" , "32 (5X5)" , "40 (2X2)" , "8 (6X6)", 0.0884, 0.9746])
x.add_row(["Activation Tanh" ,"32 (3X3)" , "128 (2X2)" , "32 (5X5)" , "40 (2X2)" , "8 (6X6)", 0.0977 ,0.9732])
x.add_row(["Optimizer SGD" ,"32 (3X3)" , "128 (2X2)" , "32 (5X5)" , "40 (2X2)" , "8 (6X6)", 0.0541, 0.9827])
print(x)
# + colab={} colab_type="code" id="f68KFL7djdGr"
X_axis = ["simple","DO","BN","BN & DO","diff size","sigmoid","tanh","SGD"]
five_L_loss = [0.0459,0.0345,0.0358,0.0307,0.0234,0.0884,0.0977,0.0541 ]
five_L_acc = [0.9892,0.9916,0.9893,0.9901,0.9923,0.9746,0.9732,0.9827 ]
# + colab={"base_uri": "https://localhost:8080/", "height": 383} colab_type="code" executionInfo={"elapsed": 2769, "status": "ok", "timestamp": 1568914013833, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-wdXb0DL2Ohg/AAAAAAAAAAI/AAAAAAAAAtk/gU50EgBa5uo/s64/photo.jpg", "userId": "11395127353489346000"}, "user_tz": -330} id="ORuxnkQDNlVF" outputId="cd8e1d14-35b4-42e8-bf85-e026e638b3d0"
plot_gr(X_axis,five_L_loss,five_L_acc,5)
# + [markdown] colab_type="text" id="FkSGGZU2jRHJ"
# ### Observation:
# we can see that the loss got decreased when we use BN and DO with Relu activation and Adam optimizer.when the activation is sigmoid or tanh we can see that the loss increased ,it might be because it would have encountered vanishing gradient .
# in case of optimizer as SGD the loss is high because it converges to actual minima is low rate
# + colab={"base_uri": "https://localhost:8080/", "height": 387} colab_type="code" executionInfo={"elapsed": 2085, "status": "ok", "timestamp": 1568914014881, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/-wdXb0DL2Ohg/AAAAAAAAAAI/AAAAAAAAAtk/gU50EgBa5uo/s64/photo.jpg", "userId": "11395127353489346000"}, "user_tz": -330} id="mGGo5ZTGpE_z" outputId="30621e66-eb91-4b4e-d8e9-7cd41267d8d1"
import matplotlib.pyplot as plt
plt.figure(figsize=(15,5))
plt.subplot(1,2,1)
plt.plot(X_axis,two_L_loss,label = 2)
plt.plot(X_axis,three_L_loss,label = 3)
plt.plot(X_axis,five_L_loss,label = 5)
plt.legend()
plt.xlabel("Models")
plt.ylabel("Loss")
plt.title("Model vs loss")
plt.subplot(1,2,2)
plt.plot(X_axis,two_L_acc,label = 2)
plt.plot(X_axis,three_L_acc,label = 3)
plt.plot(X_axis,five_L_acc,label = 5)
plt.legend()
plt.xlabel("Models")
plt.ylabel("Accuracy")
plt.title("Model vs Accuracy")
# + [markdown] colab_type="text" id="P0hVUG-ENlVH"
# ### Observation:
# From the above graph we can see that the loss curve of 5 layered CNN is always below loss curve of 3 layered and CNN and 3 layerd CNN curve is below 2layered CNN.so we can witness increase in number of layers results in decrease in loss.same way the accuracy curve of deep layered CNN is always above less deep layered CNN.
|
Convolutional Neural Network.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python (py39)
# language: python
# name: py39
# ---
# # Time series of spring phytoplankton bloom and model forcing at station S3 from Feb 15th - June 15th 2019
# +
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import matplotlib as mpl
import netCDF4 as nc
import datetime as dt
from salishsea_tools import evaltools as et, places, viz_tools, visualisations, bloomdrivers
import xarray as xr
import pandas as pd
import pickle
import os
# %matplotlib inline
# -
start=dt.datetime(2019,2,15)
end=dt.datetime(2019,6,15)
year=str(start.year)
modver='201812'
# ### Location of station S3
# +
loc='S3'
# lat and lon informatin for place:
lon,lat=places.PLACES['S3']['lon lat']
# get place information on SalishSeaCast grid:
ij,ii=places.PLACES['S3']['NEMO grid ji']
# GEM2.5 grid ji is atm forcing grid for ops files
jw,iw=places.PLACES['S3']['GEM2.5 grid ji']
fig, ax = plt.subplots(1,1,figsize = (6,6))
with nc.Dataset('/data/vdo/MEOPAR/NEMO-forcing/grid/bathymetry_201702.nc') as grid:
viz_tools.plot_coastline(ax, grid, coords ='map', isobath=.1)
ax.plot(lon, lat, '.', markersize=14, color='red')
ax.set_ylim(48,50)
ax.set_xlim(-125,-122)
ax.set_title('Location of Station S3')
ax.set_xlabel('Longitude')
ax.set_ylabel('Latitude')
# -
# ### load data
savedir='/ocean/aisabell/MEOPAR/extracted_files'
#savedir='/data/eolson/results/MEOPAR'
fname=f'springTimeSeries_{year}_{loc}_{modver}.pkl'
savepath=os.path.join(savedir,fname)
recalc=False
if recalc==True or not os.path.isfile(savepath):
basedir='/results/SalishSea/nowcast-green.201812/'
nam_fmt='nowcast'
flen=1 # files contain 1 day of data each
ftype= 'ptrc_T' # load bio files
tres=24 # 1: hourly resolution; 24: daily resolution
flist=et.index_model_files(start,end,basedir,nam_fmt,flen,ftype,tres)
# flist contains paths: file pathes; t_0 timestemp of start of each file; t_n: timestamp of start of next file
# a list of the files we want between start and end date
print(flist)
fliste3t = et.index_model_files(start,end,basedir,nam_fmt,flen,"carp_T",tres)
ik=0
with xr.open_mfdataset(flist['paths']) as bio:
bio_time=np.array(bio.time_centered[:])
sno3=np.array(bio.nitrate.isel(deptht=ik,y=ij,x=ii))
sdiat=np.array(bio.diatoms.isel(deptht=ik,y=ij,x=ii))
sflag=np.array(bio.flagellates.isel(deptht=ik,y=ij,x=ii))
scili=np.array(bio.ciliates.isel(deptht=ik,y=ij,x=ii))
no3_alld=np.array(bio.nitrate.isel(y=ij,x=ii))
diat_alld=np.array(bio.diatoms.isel(y=ij,x=ii))
flag_alld=np.array(bio.flagellates.isel(y=ij,x=ii))
cili_alld=np.array(bio.ciliates.isel(y=ij,x=ii))
with xr.open_mfdataset(fliste3t['paths']) as carp:
intdiat=np.array(np.sum(bio.diatoms.isel(y=ij,x=ii)*carp.e3t.isel(y=ij,x=ii),1)) # depth integrated diatom
intphyto=np.array(np.sum((bio.diatoms.isel(y=ij,x=ii)+bio.flagellates.isel(y=ij,x=ii)\
+bio.ciliates.isel(y=ij,x=ii))*carp.e3t.isel(y=ij,x=ii),1))
spar=np.array(carp.PAR.isel(deptht=ik,y=ij,x=ii))
fracdiat=intdiat/intphyto # depth integrated fraction of diatoms
sphyto=sdiat+sflag+scili
phyto_alld=diat_alld+flag_alld+cili_alld
percdiat=sdiat/sphyto # percent diatoms
opsdir='/results/forcing/atmospheric/GEM2.5/operational'
flist2=et.index_model_files(start,end,opsdir,nam_fmt='ops',flen=1,ftype='None',tres=24)
with xr.open_mfdataset(flist2['paths']) as winds:
u_wind=np.array(winds.u_wind.isel(y=jw,x=iw))
v_wind=np.array(winds.v_wind.isel(y=jw,x=iw))
twind=np.array(winds.time_counter)
solar=np.array(winds.solar.isel(y=jw,x=iw))
# wind speed:
wspeed=np.sqrt(u_wind**2 + v_wind**2)
# wind direction in degrees from east
d = np.arctan2(v_wind, u_wind)
winddirec=np.rad2deg(d + (d < 0)*2*np.pi)
# reading Fraser river flow files
dfFra=pd.read_csv('/ocean/eolson/MEOPAR/obs/ECRivers/Flow/FraserHopeDaily__Dec-2-2020_10_31_05PM.csv',
skiprows=1)
# the original file contains both flow and water level information in the same field (Value)
# keep only the flow data, where PARAM=1 (drop PARAM=2 values, water level data)
# flow units are m3/s
# DD is YD, year day (ie. 1 is jan 1)
dfFra.drop(dfFra.loc[dfFra.PARAM==2].index,inplace=True)
# rename 'Value' column to 'Flow' now that we have removed all the water level rows
dfFra.rename(columns={'Value':'Flow'}, inplace=True)
# inplace=True does this function on the orginal dataframe
# no time information so use dt.date
dfFra['Date']=[dt.date(iyr,1,1)+dt.timedelta(days=idd-1) for iyr, idd in zip(dfFra['YEAR'],dfFra['DD'])]
# taking the value from the yr column, jan1st date, and making jan1 column to be 1 not 0
dfFra.head(2)
# select portion of dataframe in desired date range
dfFra2=dfFra.loc[(dfFra.Date>=start.date())&(dfFra.Date<=end.date())]
riv_time=dfFra2['Date'].values
rivFlow=dfFra2['Flow'].values
# could also write dfFra['Date'], sometimes this is required
# newstart is a datetime object, so we convert it to just a date with .date
pickle.dump((bio_time,sno3,sdiat,sflag,scili,diat_alld,no3_alld,flag_alld,cili_alld,phyto_alld,intdiat,intphyto,spar,fracdiat,sphyto,percdiat,
u_wind,v_wind,twind,solar,wspeed,winddirec,riv_time,rivFlow),open(savepath,'wb'))
else:
bio_time,sno3,sdiat,sflag,scili,diat_alld,no3_alld,flag_alld,cili_alld,phyto_alld,intdiat,intphyto,spar,fracdiat,sphyto,percdiat,\
u_wind,v_wind,twind,solar,wspeed,winddirec,riv_time,rivFlow=pickle.load(open(savepath,'rb'))
# # Spring Bloom Timing Metrics (3 ways)
# ### Metric 1: <NAME> definition: “peak phytoplankton concentration (averaged from the surface to 3 m depth) within four days of the average 0-3 m nitrate concentration going below 0.5 uM (the half-saturation concentration) for two consecutive days”
# a) Average phytoplankton concentration over upper 3 m
#
# b) Average nitrate over upper 3 m
#
# c) Find first location where nitrate crosses below 0.5 micromolar and stays there for 2 days
#
# d) Find date with maximum phytoplankton concentration within four days (say 9 day window) of date in c).
#
bloomtime1=bloomdrivers.metric1_bloomtime(phyto_alld,no3_alld,bio_time)
print(f'The spring phytoplankton bloom according to metric 1 occurs at {bloomtime1}')
# ### Metric 2: the first peak in which chlorophyll concentrations are above 5 ug/L for more than two days (Olson et. al 2020)
# Take first peak, check if it has two days around have concentrations>5, if no, move to next peak
#
bloomtime2=bloomdrivers.metric2_bloomtime(sphyto,sno3,bio_time)
print(f'The spring phytoplankton bloom according to metric 2 occurs at {bloomtime2}')
# ### Metric 3: For a given year, bloom initiation is determined to be the week that first reaches the threshold value (by looking at weekly averages) as long as one of the two following weeks was >70% of the threshold value. (Karyn’s method)
# The median + 5% of the annual Chl concentration is deemed “threshold value” for each year.
bloomtime3=bloomdrivers.metric3_bloomtime(sphyto,sno3,bio_time)
print(f'The spring phytoplankton bloom according to metric 3 occurs at {bloomtime3}')
# ### Total surface phytoplankton and nitrate:
# +
fig,ax=plt.subplots(1,1,figsize=(12,3))
p1=ax.plot(bio_time,sphyto,
'-',color='forestgreen',label='Phytoplankton')
p2=ax.plot(bio_time,sno3,
'-',color='orange',label='Nitrate')
ax.legend(handles=[p1[0],p2[0]],loc=1)
ax.set_ylabel('Concentration ($\mu$M N)')
ax.set_title('Surface Phytoplankton and Nitrate at Station S3')
ax.axvline(x=bloomtime1, label='Metric 1 Bloom Date:{}'.format(bloomtime1), color='r')
ax.axvline(x=bloomtime2, label='Metric 2 Bloom Date:{}'.format(bloomtime2), color='k')
ax.axvline(x=bloomtime3, label='Metric 3 Bloom Date:{}'.format(bloomtime3), color='b')
ax.legend()
# -
# ### Fraction of surface phytoplankton that is diatoms
fig,ax=plt.subplots(1,1,figsize=(12,3))
ax.plot(bio_time,percdiat, '-',color='orchid')
ax.set_ylabel('Diatoms / Total Phytoplankton')
ax.set_title('Fraction of Diatoms in Total Surface Phytoplankton')
ax.set_ylim(0,1)
# ### Depth integrated phytoplankton:
# %%time
fig,ax=plt.subplots(1,1,figsize=(12,3))
ax.plot(bio_time,intphyto,'-',color='forestgreen',label='Phytoplankton')
ax.legend(loc=2);
ax.set_ylabel('Concentration (mmol N/m2)')
ax.set_xlim(bio_time[0],bio_time[-1])
ax.set_title('Depth Integrated Phytoplankton Concentration')
# ### Fraction of depth integrated phytoplankton that is diatoms
# %%time
fig,ax=plt.subplots(1,1,figsize=(12,3))
ax.plot(bio_time,fracdiat,'-',color='orchid')
ax.set_ylabel('Diatoms / Total Phytoplankton')
ax.set_xlim(bio_time[0],bio_time[-1])
ax.set_title('Fraction of Diatoms in Total Depth Integrated Phytoplankton')
ax.set_ylim(0,1)
# ### Fraser River Flow
# +
# plot phytoplankton on top:
fig,ax1=plt.subplots(1,1,figsize=(12,3))
p1=ax1.plot(bio_time,sphyto,
'-',color='forestgreen',label='Phytoplankton')
p2=ax1.plot(bio_time,sno3,
'-',color='orange',label='Nitrate')
ax1.set_ylabel('Concentration ($\mu$M N)')
ax1.set_ylim(0,18)
# Now plot Fraser Flow
ax2=ax1.twinx()
p3=ax2.plot(riv_time,rivFlow,'c-', label='Fraser Flow')
ax2.set_ylabel('Flow (m$^3$s$^{-1}$)')
ax2.set_title('Fraser Flow at Hope and Surface Phytoplankton at Station S3')
ax1.legend(handles=[p1[0],p2[0],p3[0]],loc='upper center')
# -
# ### Forcing (ops): Wind Speed
fig,ax=plt.subplots(1,1,figsize=(18,2))
ax.plot(twind,u_wind,'c-')
ax.plot(twind,v_wind,'b-')
ax.set_xlim(start,end)
ax.set_title('Wind speed')
ax.set_ylabel('m/s')
fig,ax=plt.subplots(1,1,figsize=(20,6))
q=ax.quiver(twind, np.zeros(len(twind)), u_wind, v_wind,scale=200, width=0.001); # change the scale
ax.set_yticklabels([]);
fig.autofmt_xdate(bottom=0.3, rotation=30, ha='right')
yearsFmt = mdates.DateFormatter('%b %d')
ax.xaxis.set_major_formatter(yearsFmt)
ax.set_xlim(start,end)
ax.set_title('Wind Vectors in Geographic Coordinates')
# this can probably be done better?
# ### Daily average wind speed
# calculate daily average wind speed:
ttday=twind[12::24] # start at 12th value and take every 24th
wsdaily=list()
for ii in range(0,int(len(wspeed)/24)):
wsdaily.append(np.mean(wspeed[(ii*24):((ii+1)*24)]))
wsdaily=np.array(wsdaily) # convert to numpy array from list to be able to plot
fig,ax=plt.subplots(1,1,figsize=(18,2))
ax.plot(ttday,wsdaily,'b-')
ax.set_xlim(start,end)
ax.set_title('Daily average wind speed')
ax.set_ylabel('m/s')
# ### Daily average wind speed cubed
# +
wscubed=wsdaily**3
# plot phytoplankton on top:
fig,ax1=plt.subplots(1,1,figsize=(12,3))
p1=ax1.plot(bio_time,sphyto,
'-',color='forestgreen',label='Phytoplankton')
p2=ax1.plot(bio_time,sno3,
'-',color='orange',label='Nitrate')
ax1.set_ylabel('Concentration ($\mu$M N)')
ax1.set_ylim(0,18)
ax2=ax1.twinx()
p3=ax2.plot(ttday,wscubed,'b-',label='Wind Speed Cubed')
ax2.set_xlim(start,end)
ax2.set_title('Daily Average Wind Speed cubed and Surface Phytoplankton at Station S3')
ax2.set_ylabel('$\mathregular{m^3}$/$\mathregular{s^3}$')
ax1.legend(handles=[p1[0],p2[0],p3[0]],loc='upper center')
# -
# ### Photosynthetically Available Radiation (PAR) at Surface
# +
# plot phytoplankton on top:
fig,ax1=plt.subplots(1,1,figsize=(12,3))
p1=ax1.plot(bio_time,sphyto,
'-',color='forestgreen',label='Phytoplankton')
p2=ax1.plot(bio_time,sno3,
'-',color='orange',label='Nitrate')
ax1.set_ylabel('Concentration ($\mu$M N)')
ax1.set_ylim(0,18)
ax2=ax1.twinx()
p3=ax2.plot(bio_time,spar,
'-',color='red',label='Model PAR')
ax2.set_ylabel('PAR (W/$\mathregular{m^2}$)') # say its model PAR
ax2.set_title('Modeled PAR and Surface Phytoplankton at Station S3')
ax1.legend(handles=[p1[0],p2[0],p3[0]],loc='center left')
# -
# ### Forcing: Solar radiation
fig,ax=plt.subplots(1,1,figsize=(18,2))
ax.plot(twind,solar,'r-')
ax.set_xlim(start,end)
ax.set_title('Solar radiation')
ax.set_ylabel('W/$\mathregular{m^2}$')
# calculate daily average solar radiation:
ttday=twind[12::24] # start at 12th value and take every 24th
solardaily=list()
for ii in range(0,int(len(solar)/24)):
solardaily.append(np.mean(solar[(ii*24):((ii+1)*24)]))
solardaily=np.array(solardaily) # convert to numpy array from list to be able to plot
# +
# plot phytoplankton on top:
fig,ax1=plt.subplots(1,1,figsize=(12,3))
p1=ax1.plot(bio_time,sphyto,
'-',color='forestgreen',label='Phytoplankton')
p2=ax1.plot(bio_time,sno3,
'-',color='orange',label='Nitrate')
ax1.set_ylabel('Concentration ($\mu$M N)')
ax1.set_ylim(0,18)
ax2=ax1.twinx()
p3=ax2.plot(ttday,solardaily,'m-',label='Solar Radiation')
ax2.set_xlim(start,end)
ax2.set_title('Daily Average Solar Radiation and Surface Phytoplankton at Station S3')
ax2.set_ylabel('W/$\mathregular{m^2}$')
ax1.legend(handles=[p1[0],p2[0],p3[0]],loc='upper center')
# -
|
notebooks/Bloom_Timing/stationS3/2019_S3_PhytoBloomTiming.ipynb
|
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.3.1
# language: julia
# name: julia-1.3
# ---
# # Bechmark: Vern9 vs IRKGL16 (Float128)
#
# <ul id="top">
# <li><a href="#Loading-packages">
# Loading Packages</a></li>
#
# <li><a href="#Initial-value-problem:-Burrau-problem">
# Initial value problem: Burrau problem</a></li>
#
# <li><a href="#Integration-with-Vern9">
# Integration with Vern9</a></li>
#
# <li><a href="#Integration-with-IRKGL16-(Adaptive-step)">
# Integration with IRKGL16 (Adaptive-step)</a></li>
#
# <li><a href="#Error-in-energy">
# Error in energy</a></li>
#
# </ul>
#
# 2020-04-18 <NAME>
#
# - Maixter=12
# - tol=1e-13
# ## Loading packages
using Pkg
using IRKGaussLegendre,LinearAlgebra,Plots,Statistics
using OrdinaryDiffEq
using DiffEqDevTools,BenchmarkTools
using Dates
BenchmarkTools.DEFAULT_PARAMETERS.samples=10
# <a href="#top">Back to the top</a>
#
# ## Initial value problem: ThreeBody problem
# +
const μ = parse(Float64,"0.012277471")
#const uμ = 1 - μ
f = (du,u,p,t) -> begin
μ=p[1]
uμ=1-μ
@inbounds begin
# 1 = y₁
# 2 = y₂
# 3 = y₁'
# 4 = y₂'
D₁ = ((u[1]+μ)^2 + u[2]^2)^(3/2)
D₂ = ((u[1]-uμ)^2 + u[2]^2)^(3/2)
du[1] = u[3]+u[2]
du[2] = u[4]-u[1]
du[3] = u[4] - uμ*(u[1]+μ)/D₁ - μ*(u[1]-uμ)/D₂
du[4] = -u[3] - uμ*u[2]/D₁ - μ*u[2]/D₂
end
end
t₀ = 0.0;
T = parse(Float64,"17.0652165601579625588917206249")
tspan = (t₀,2T)
py0=parse(BigFloat,"-2.00158510637908252240537862224")+0.994
u0=[0.994,0.0,0.0, Float64(py0)]
prob = ODEProblem(f,u0,tspan,[μ])
# -
setprecision(BigFloat, 106)
u0128=BigFloat.([0.994,0.0,0.0, py0])
μ128 = parse(BigFloat,"0.012277471")
tspan128=(BigFloat(t₀),BigFloat(2T))
prob128=ODEProblem(f,u0128,tspan128,[μ128]);
# <a href="#top">Back to the top</a>
#
#
# ## Integration with Vern9
# Recommend methods: Non-Stiff Problems
#
# - For high accuracy non-stiff solving ( BigFloat and tolerances like <1e-12 ), JuliaDiffeq recommend
# the Vern9 method
#
sol0 = @time(solve(prob128,Vern9(),saveat=0.5, abstol=1e-13,reltol=1e-13));
sol0.destats
plot(sol0,vars=(1,2))
# ## Integration with IRKGL16 (Adaptive-step)
#
(sol1,iters1,steps1)=@time(solve(prob128,IRKGL16(),reltol=1e-13, abstol=1e-13,myoutputs=true));
sum(iters1)/sol1.destats.naccept
sol1.destats
T128 = parse(BigFloat,"17.0652165601579625588917206249")
sol1.t[end]-2*T128
# ## Integration with IRKGL162 (Adaptive-step)
(sol2,iters2,steps2)=@time(solve(prob128,IRKGL162(),reltol=1e-13, abstol=1e-13,myoutputs=true));
sum(iters2)/sol2.destats.naccept
sol2.destats
# ### Plots IRKGL16
plot(sol1.t[2:end],iters1[2:end], title="Iteration numbers", label="IRKGL16")
plot!(sol2.t[2:end],iters2[2:end],label="IRKGL162")
plot(sol1.t,steps1, title="step sizes in the integration", label="IRKGL16")
plot!(sol2.t,steps2,label="IRKGL162")
plot(sol1,vars=(1,2))
# <a href="#top">Back to the top</a>
# ## Error in energy
setprecision(BigFloat, 256)
u0128=[0.994,0.0,0.0, py0]
μ128 = parse(BigFloat,"0.012277471")
(typeof(u0128))
# +
function NbodyEnergy(u, mu)
"""
3RT-Nbody problem
"""
# Implementation
@inbounds begin
x = u[1] # x
y = u[2] # y
px = u[3] # px
py = u[4] # py
umu=1-mu
r1 = ((x+mu)^2 + y^2)^(1/2)
r2 = ((x-umu)^2 + y^2)^(1/2)
Energy=(px*px+py*py)/2+px*y-py*x-umu/r1-mu/r2-mu*umu/2
return(Energy)
end
end
# +
E0=NbodyEnergy(u0128,μ128)
end1=length(steps1)
ulist = sol0.u
tlist = sol0.t
EnergyErrors=[NbodyEnergy(BigFloat.(sol0.u[j]),μ128)/E0-1 for j in 1:length(tlist)]
ulist1 = sol1.u[1:end]
tlist1 = sol1.t[1:end]
EnergyErrors1=[NbodyEnergy(BigFloat.(sol1.u[j]),μ128)/E0-1 for j in 1:length(tlist1)]
ulist2 = sol2.u[1:end]
tlist2 = sol2.t[1:end]
EnergyErrors2=[NbodyEnergy(BigFloat.(sol2.u[j]),μ128)/E0-1 for j in 1:length(tlist2)]
ylimit1=-18
ylimit2=-8
p1=plot(tlist[1:end],log10.(abs.(EnergyErrors)),
ylims=(ylimit1,ylimit2),
xlabel="t", title="Error in energy", label="Vern9")
p2=plot(tlist1[1:end],log10.(abs.(EnergyErrors1)),
ylims=(ylimit1,ylimit2),
label="IRKGL16")
p2=plot!(tlist2[1:end],log10.(abs.(EnergyErrors2)),
ylims=(ylimit1,ylimit2),
label="IRKGl162")
plot(p1,p2,layout=2)
# -
(Float32(maximum(abs.(EnergyErrors))),Float32(maximum(abs.(EnergyErrors1))),Float32(maximum(abs.(EnergyErrors2))))
# ## Global error
tspan128=[BigFloat(0.0),2*parse(BigFloat,"17.0652165601579625588917206249")]
test_sol = TestSolution(tspan128,[u0128]);
# Vern9
apr = appxtrue(sol0,test_sol)
#@show sol0[end]
#@show apr.u[end]
@show apr.errors;
# IRGL16
apr = appxtrue(sol1,test_sol)
#@show sol1[end]
#@show apr.u[end]
@show apr.errors;
# IRGL162
apr = appxtrue(sol2,test_sol)
#@show sol1[end]
#@show apr.u[end]
@show apr.errors
err1=Float32(norm(sol1.u[end]-u0128))
err2=Float32(norm(sol2.u[end]-u0128))
(err1,err2)
a=sol1.u[end]
b=u0128
err = [norm(a[i] - b[i]) / (norm(b[i])+1e-16) for i in 1:4]
a=sol2.u[end]
b=u0128
err = [norm(a[i] - b[i]) / norm(b[i]+1e-16) for i in 1:4]
# ## Higher Order Algorithms
# +
#abstols = 1.0 ./ 10.0 .^ (8:16)
#reltols = 1.0 ./ 10.0 .^ (8:16);
# +
#setups = [
# #Dict(:alg=>DP8())
# Dict(:alg=>Vern9())
# Dict(:alg=>IRKGL16())
# Dict(:alg=>IRKGL162())];
#wp = WorkPrecisionSet(prob,abstols,reltols,setups;appxsol=test_sol,save_everystep=false,numruns=100)
#plot(wp)
# -
|
Tutorials/.ipynb_checkpoints/Three Body-Work-Precision-Float128Tole13-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="DkA0Fobtb9dM"
# ##### Copyright 2020 The Cirq Developers
# + cellView="form" id="tUshu7YfcAAW"
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + [markdown] id="igOQCrBOcF5d"
# # Circuits
# + [markdown] id="LHRAvc9TcHOH"
# <table class="tfo-notebook-buttons" align="left">
# <td>
# <a target="_blank" href="https://quantumai.google/cirq/circuits"><img src="https://quantumai.google/site-assets/images/buttons/quantumai_logo_1x.png" />View on QuantumAI</a>
# </td>
# <td>
# <a target="_blank" href="https://colab.research.google.com/github/quantumlib/Cirq/blob/master/docs/circuits.ipynb"><img src="https://quantumai.google/site-assets/images/buttons/colab_logo_1x.png" />Run in Google Colab</a>
# </td>
# <td>
# <a target="_blank" href="https://github.com/quantumlib/Cirq/blob/master/docs/circuits.ipynb"><img src="https://quantumai.google/site-assets/images/buttons/github_logo_1x.png" />View source on GitHub</a>
# </td>
# <td>
# <a href="https://storage.googleapis.com/tensorflow_docs/Cirq/docs/circuits.ipynb"><img src="https://quantumai.google/site-assets/images/buttons/download_icon_1x.png" />Download notebook</a>
# </td>
# </table>
# + id="bd9529db1c0b"
try:
import cirq
except ImportError:
print("installing cirq...")
# !pip install --quiet cirq
print("installed cirq.")
# + [markdown] id="doUaJJGSAwCO"
# ## Conceptual overview
#
# The primary representation of quantum programs in Cirq is the `Circuit` class. A `Circuit` is a collection of `Moments`. A `Moment` is a collection of `Operations` that all act during the same abstract time slice. An `Operation` is a some effect that operates on a specific subset of Qubits, the most common type of `Operation` is a `GateOperation`.
#
# + [markdown] id="77zB_UqkAwCR"
# 
# + [markdown] id="uB8cQJ0PAwCT"
# Let's unpack this.
#
# At the base of this construction is the notion of a qubit. In Cirq, qubits and other quantum objects are identified by instances of subclasses of the Qid base class. Different subclasses of Qid can be used for different purposes. For example, the qubits that Google’s Xmon devices use are often arranged on the vertices of a square grid. For this, the class GridQubit subclasses Qid. For example, we can create a 3 by 3 grid of qubits using
# + id="G30Zl1VwAwCU"
import cirq
qubits = [cirq.GridQubit(x, y) for x in range(3) for y in range(3)]
print(qubits[0])
# + [markdown] id="gpi9rwuiAwCZ"
# The next level up is the notion of a `Gate`. A `Gate` represents a physical process that occurs on a `Qubit`. The important property of a `Gate` is that it can be applied to one or more qubits. This can be done via the `Gate.on` method itself or via `()`, and doing this turns the `Gate` into a `GateOperation`.
# + id="-X_LL4_nAwCa"
# This is an Pauli X gate. It is an object instance.
x_gate = cirq.X
# Applying it to the qubit at location (0, 0) (defined above)
# turns it into an operation.
x_op = x_gate(qubits[0])
print(x_op)
# + [markdown] id="0N7X3nmFAwCd"
# A `Moment` is simply a collection of operations, each of which operates on a different set of qubits, and which conceptually represents these operations as occurring during this abstract time slice. The `Moment` structure itself is not required to be related to the actual scheduling of the operations on a quantum computer, or via a simulator, though it can be. For example, here is a `Moment` in which **Pauli** `X` and a `CZ` gate operate on three qubits:
# + id="naO0-nS0AwCe"
cz = cirq.CZ(qubits[0], qubits[1])
x = cirq.X(qubits[2])
moment = cirq.Moment([x, cz])
print(moment)
# + [markdown] id="8TxICnXCAwCh"
# The above is not the only way one can construct moments, nor even the typical method, but illustrates that a `Moment` is just a collection of operations on disjoint sets of qubits.
#
# Finally, at the top level a `Circuit` is an ordered series of `Moment` objects. The first `Moment` in this series contains the first `Operations` that will be applied. Here, for example, is a simple circuit made up of two moments:
# + id="W7ToOyp9AwCi"
cz01 = cirq.CZ(qubits[0], qubits[1])
x2 = cirq.X(qubits[2])
cz12 = cirq.CZ(qubits[1], qubits[2])
moment0 = cirq.Moment([cz01, x2])
moment1 = cirq.Moment([cz12])
circuit = cirq.Circuit((moment0, moment1))
print(circuit)
# + [markdown] id="YMXONr0SAwCl"
# Note that the above is one of the many ways to construct a `Circuit`, which illustrates the concept that a `Circuit` is an iterable of `Moment` objects.
# + [markdown] id="93Wlr_HjAwCm"
# ## Constructing circuits
#
# Constructing Circuits as a series of `Moment` objects, with each `Moment` being hand-crafted, is tedious. Instead, we provide a variety of different ways to create a `Circuit`.
#
# One of the most useful ways to construct a `Circuit` is by appending onto the `Circuit` with the `Circuit.append` method.
#
# + id="7xtjmYzKAwCn"
from cirq.ops import CZ, H
q0, q1, q2 = [cirq.GridQubit(i, 0) for i in range(3)]
circuit = cirq.Circuit()
circuit.append([CZ(q0, q1), H(q2)])
print(circuit)
# + [markdown] id="CKuRApDeAwCp"
# This appended a new moment to the qubit, which we can continue to do:
# + id="HO8WYyU9AwCq"
circuit.append([H(q0), CZ(q1, q2)])
print(circuit)
# + [markdown] id="8HNpFd0UAwCs"
# In these two examples, we appended full moments, what happens when we append all of these at once?
# + id="Q3qzMlQmAwCt"
circuit = cirq.Circuit()
circuit.append([CZ(q0, q1), H(q2), H(q0), CZ(q1, q2)])
print(circuit)
# + [markdown] id="OQX8FTMyAwCw"
# We see that here we have again created two `Moment` objects. How did `Circuit` know how to do this? `Circuit`'s `Circuit.append` method (and its cousin, `Circuit.insert`) both take an argument called the `InsertStrategy`. By default, `InsertStrategy` is `InsertStrategy.NEW_THEN_INLINE`.
# + [markdown] id="2t7qgbPkAwCx"
# ### InsertStrategies
#
# `InsertStrategy` defines how `Operations` are placed in a `Circuit` when requested to be inserted at a given location. Here, a location is identified by the index of the `Moment` (in the `Circuit`) where the insertion is requested to be placed at (in the case of `Circuit.append`, this means inserting at the `Moment`, at an index one greater than the maximum moment index in the `Circuit`).
#
# There are four such strategies: `InsertStrategy.EARLIEST`, `InsertStrategy.NEW`, `InsertStrategy.INLINE` and `InsertStrategy.NEW_THEN_INLINE`.
#
# `InsertStrategy.EARLIEST` is defined as:
#
# *Scans backward from the insert location until a moment with operations touching qubits affected by the operation to insert is found. The operation is added to the moment just after that location.*
#
# For example, if we first create an `Operation` in a single moment, and then use `InsertStrategy.EARLIEST`, `Operation` can slide back to this first ` Moment` if there is space:
# + id="Hd5IGmQrAwCx"
from cirq.circuits import InsertStrategy
circuit = cirq.Circuit()
circuit.append([CZ(q0, q1)])
circuit.append([H(q0), H(q2)], strategy=InsertStrategy.EARLIEST)
print(circuit)
# + [markdown] id="BGnlt-kPAwCz"
# After creating the first moment with a `CZ` gate, the second append uses the `InsertStrategy.EARLIEST` strategy. The `H` on `q0` cannot slide back, while the `H` on `q2` can and so ends up in the first `Moment`.
#
# Contrast this with the `InsertStrategy.NEW` `InsertStrategy`:
#
# *Every operation that is inserted is created in a new moment.*
# + id="Yupv8gQOAwC0"
circuit = cirq.Circuit()
circuit.append([H(q0), H(q1), H(q2)], strategy=InsertStrategy.NEW)
print(circuit)
# + [markdown] id="F7ziPs17AwC2"
# Here every operator processed by the append ends up in a new moment. `InsertStrategy.NEW` is most useful when you are inserting a single operation and do not want it to interfere with other `Moments`.
# + [markdown] id="Ceb0nBxeAwC3"
# Another strategy is `InsertStrategy.INLINE`:
# + [markdown] id="zsbTh2QhAwC4"
# *Attempts to add the operation to insert into the moment just before the desired insert location. But, if there’s already an existing operation affecting any of the qubits touched by the operation to insert, a new moment is created instead.*
# + id="J3LTjH9-AwC5"
circuit = cirq.Circuit()
circuit.append([CZ(q1, q2)])
circuit.append([CZ(q1, q2)])
circuit.append([H(q0), H(q1), H(q2)], strategy=InsertStrategy.INLINE)
print(circuit)
# + [markdown] id="7iDg6j4fAwC7"
# After two initial `CZ` between the second and third qubit, we try to insert three `H` `Operations`. We see that the `H` on the first qubit is inserted into the previous `Moment`, but the `H` on the second and third qubits cannot be inserted into the previous `Moment`, so a new `Moment` is created.
#
# Finally, we turn to the default strategy:
# + [markdown] id="O-LfqxSWAwC8"
# *Creates a new moment at the desired insert location for the first operation, but then switches to inserting operations according to `InsertStrategy.INLINE`.*
# + id="LmT3IKEEAwC9"
circuit = cirq.Circuit()
circuit.append([H(q0)])
circuit.append([CZ(q1,q2), H(q0)], strategy=InsertStrategy.NEW_THEN_INLINE)
print(circuit)
# + [markdown] id="m2SoYFsFAwC_"
# The first append creates a single moment with an `H` on the first qubit. Then, the append with the `InsertStrategy.NEW_THEN_INLINE` strategy begins by inserting the `CZ` in a new `Moment` (the `InsertStrategy.NEW` in `InsertStrategy.NEW_THEN_INLINE`). Subsequent appending is done `InsertStrategy.INLINE`, so the next `H` on the first qubit is appending in the just created `Moment`.
# + [markdown] id="Ij28qJdtAwC_"
# ### Patterns for arguments to append and insert
#
# In the above examples, we used a series of `Circuit.append `calls with a list of different `Operations` added to the circuit. However, the argument where we have supplied a list can also take more than just list values. For instance:
# + id="kmt8hAfZAwDA"
def my_layer():
yield CZ(q0, q1)
yield [H(q) for q in (q0, q1, q2)]
yield [CZ(q1, q2)]
yield [H(q0), [CZ(q1, q2)]]
circuit = cirq.Circuit()
circuit.append(my_layer())
for x in my_layer():
print(x)
# + id="LXs7ffVWAwDC"
print(circuit)
# + [markdown] id="DpGimiozAwDE"
# Recall that Python functions with a `yield` are generators. Generators are functions that act as iterators. In the above example, we see that we can iterate `over my_layer()`. In this case, each of the `yield` returns produces what was yielded, and here these are:
#
# * `Operations`,
# * lists of `Operations`,
# * or lists of `Operations` mixed with lists of `Operations`.
#
#
# When we pass an iterator to the `append` method, `Circuit` is able to flatten all of these and pass them as one giant list to `Circuit.append` (this also works for `Circuit.insert`).
#
# The above idea uses the concept of `OP_TREE`. An `OP_TREE` is not a class, but a *contract*. The basic idea is that, if the input can be iteratively flattened into a list of operations, then the input is an `OP_TREE`.
#
# A very nice pattern emerges from this structure: define generators for sub-circuits, which can vary by size or `Operation` parameters.
#
# Another useful method to construct a `Circuit` fully formed from an `OP_TREE` is to pass the `OP_TREE` into `Circuit` when initializing it:
# + id="dC6iBIqrAwDF"
circuit = cirq.Circuit(H(q0), H(q1))
print(circuit)
# + [markdown] id="dE0kxo6tAwDI"
# ### Slicing and iterating over circuits
#
# Circuits can be iterated over and sliced. When they are iterated, each item in the iteration is a moment:
#
# + id="z2csEbbyAwDI"
circuit = cirq.Circuit(H(q0), CZ(q0, q1))
for moment in circuit:
print(moment)
# + [markdown] id="8gT1t2drAwDL"
# Slicing a `Circuit`, on the other hand, produces a new `Circuit` with only the moments corresponding to the slice:
# + id="hxczWjkMAwDL"
circuit = cirq.Circuit(H(q0), CZ(q0, q1), H(q1), CZ(q0, q1))
print(circuit[1:3])
# + [markdown] id="cDAVDT7bAwDO"
# Especially useful is dropping the last moment (which are often just measurements): `circuit[:-1]`, or reversing a circuit: `circuit[::-1]`.
#
# + [markdown] id="7af5e8ea5b45"
# ### Related
#
#
# - [Transform circuits](transform.ipynb) - features related to circuit optimization and compilation
# - [Devices](devices.ipynb) - validate circuits against device constraints
# - [Import/export circuits](interop.ipynb) - features to serialize/deserialize circuits into/from different formats
|
docs/circuits.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import json
import os
import tqdm
import pandas as pd
# ## I. convert emails text (both training and testing) into appropriate jsonl file format
# ### 6088 entries in training set ( 2000+ machine generated, the rest are human-written)
# #### 4000+ are from email corpus, 2000+ are from gtp-2 generated and the ENRON Email Dataset
# ###### kaggle datasets download -d nitishabharathi/email-spam-dataset
PATH = '/Users/jessicademacbook/DSCI-550-Assignment-2/data/Grover_input_output/8_GPT-2_Generated_Text_for_Grover/'
folders = [f for f in os.listdir(PATH) if not f.startswith('.')]
#read all machine txt in each folders, label all machine generated content
lis=[]
for folder in folders:
for i in os.listdir(f'{PATH}{folder}'):
f=open(f'{PATH}{folder}/{i}','r')
text=f.read()
text_dic={"article":text,"label":"machine","split":"train"}
lis.append(text_dic)
#read all human json in email corpus, label all content as human
path='/Users/jessicademacbook/DSCI-550-Assignment-1/data/separated by email/'
for i in os.listdir(path):
if i.endswith('.json'):
f=open(f'{path}{i}','r')
text=json.load(f)
try:
content=text["X-TIKA:content"]
if pd.isna(content):
pass
else:
content_dic={"article":content,"label":"human","split":"train"}
lis.append(content_dic)
except KeyError:
pass
# +
with open('/Users/jessicademacbook/DSCI-550-Assignment-2/data/Grover_input_output/fake_emails.csv', "r") as f:
result=pd.read_csv(f)
spam=result['Label']==1
for i in result[spam]['Body']:
if pd.isna(i):
pass
else:
dic={"article":i,"label":"machine","split":"train"}
lis.append(dic)
# -
print('The training set has ', len(lis),'emails in total.')
#write to a jsonl file with all human and machine generated email content
with open('/Users/jessicademacbook/DSCI-550-Assignment-2/data/Grover_input_output/input_emails.jsonl','w') as outfile:
for entry in lis:
json.dump(entry, outfile)
outfile.write('\n')
#check the written jsonl file has correct labels
with open('/Users/jessicademacbook/DSCI-550-Assignment-2/data/Grover_input_output/input_emails.jsonl', "r") as f:
test=[]
for l in f:
item = json.loads(l)
if pd.isna(item['article']):
pass
else:
test.append(item['article'])
print('Are all content are NA-free?', all(test))
# ### Collect 800 email text, labeled as test, write to jsonl file for discrimination
# +
#get generated text for grover test
new_path = '/Users/jessicademacbook/DSCI-550-Assignment-2/data/additional-features-v2/new/4_GPT-2_Generated_Text/'
folders = [f for f in os.listdir(new_path) if not f.startswith('.')]
test_lis=[]
for folder in folders:
for i in os.listdir(f'{new_path}{folder}'):
f=open(f'{new_path}{folder}/{i}','r')
text=f.read()
text_dic={"article":text,"split":"test","label":"machine"}
test_lis.append(text_dic)
print('The file for discrimination has', len(test_lis),'emails in it.')
#write to jsonl file
with open('/Users/jessicademacbook/DSCI-550-Assignment-2/data/Grover_input_output/test_input.jsonl','w') as f:
for entry in test_lis:
json.dump(entry, f)
f.write('\n')
# -
# ## II. Grover Training-this part is done in Google Colab, and the corresponding notebook is called Grover_training in the same folder as this one
# see Grover_training.ipynb
# ## III. Interpreting Grover training result
import numpy as np
# #### The grover model returns a list of data pair showing the probability of the label being corrected. I labeled all the test input as machine, and the accuracy turns out to be 1, meaning that all 800 emails are identified as machine generated.
# + jupyter={"outputs_hidden": true} tags=[]
path='/Users/jessicademacbook/DSCI-550-Assignment-2/data/Grover_input_output/final_outputs_test-probs.npy'
data_array = np.load(path)
print('The first 20 pairs look like', data_array[0:20])
a=0
for i in data_array:
if i[0]>0.95:
a=a+1
print(a,"of 800 emails have probability of being machine generated higher than 0.95.")
print("All emails are identified as machine generated.")
# -
# +
import pandas as pd
df = pd.read_csv('../../data/additional-features-v2/new/assignment2.tsv', sep='\t', index_col=0)
# -
df['grover results'] = pd.Series(['Machine' for _ in range(800)])
df.to_csv('../../data/additional-features-v2/new/assignment2.tsv', sep='\t')
|
notebooks/8/8.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from compas.datastructures import Mesh
from compas.datastructures import subdivision as sd
from compas_plotters import MeshPlotter
mesh = Mesh.from_polyhedron(8)
# +
mesh.summary()
plotter = MeshPlotter(mesh)
plotter.draw_edges()
plotter.draw_faces()
plotter.draw_vertices(radius=0.01)
plotter.show()
# -
mesh2 = sd.mesh_subdivide_tri(mesh)
mesh3 = sd.trimesh_subdivide_loop(mesh2)
mesh4 = sd.mesh_subdivide_catmullclark(mesh3)
plotter = MeshPlotter(mesh3)
plotter.draw_edges()
plotter.draw_faces()
plotter.draw_vertices(radius=0.01)
plotter.show()
mesh4.summary()
# # 3d viewer
import ipyvolume as ipv
vertices, faces = mesh3.to_vertices_and_faces()
vertices
faces
x = [v[0] for v in vertices]
y = [v[1] for v in vertices]
z = [v[2] for v in vertices]
ipv.figure(width=800, height=450)
viewermesh = ipv.plot_trisurf(x, y, z, faces, color='white')
ipv.show()
vertices, faces = mesh4.to_vertices_and_faces()
x = [v[0] for v in vertices]
y = [v[1] for v in vertices]
z = [v[2] for v in vertices]
ipv.figure(width=800, height=450)
viewermesh = ipv.plot_trisurf(x, y, z, faces, color='white')
ipv.show()
faces
triangles_only = []
for f in faces:
if len(f) == 3:
triangles_only.append(f)
else:
for i in range(len(f) - 2):
triangles_only.append([f[0], f[i+1], f[i+2]])
triangles_only
ipv.figure(width=800, height=450)
viewermesh = ipv.plot_trisurf(x, y, z, triangles_only, color='white')
ipv.style.use('minimal')
ipv.show()
def old_draw_compas_mesh(mesh, color='white'):
"""
Renders a compas mesh on a 3D canvas with ipyvolume.
Parameters
----------
mesh : :class: compas.datastructures.Mesh
the mesh to be shown in 3D
Returns
-------
an instance of ipyvolume.widgets.Mesh
"""
# extract lists of vertices and faces
vertices, faces = mesh.to_vertices_and_faces()
# extract x, y and z values into separate lists
x = [v[0] for v in vertices]
y = [v[1] for v in vertices]
z = [v[2] for v in vertices]
# triangulate n-gons
triangles_only = []
for f in faces:
if len(f) == 3:
triangles_only.append(f)
else:
for i in range(len(f) - 2):
triangles_only.append([f[0], f[i+1], f[i+2]])
# create the ipyvolume plot
ipv.figure(width=800, height=450)
viewermesh = ipv.plot_trisurf(x, y, z, triangles_only, color='white')
ipv.style.use('minimal')
ipv.show()
return viewermesh
mesh5 = sd.mesh_subdivide_doosabin(mesh3)
draw_compas_mesh(mesh5)
from utilities import draw_compas_mesh
ipvmesh = draw_compas_mesh(mesh5, color='cyan')
ipvmesh.__dict__
draw_compas_mesh(mesh5)
|
T1/09_mesh_subdivision/191007_3d_mesh.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + tags=["parameters"]
epochs = 50
# -
# # Part 7 - Make we do Federated Learning with FederatedDataset
#
#
# We go introduce new tool if you wan dey use federated datasets. We don create `FederatedDataset` class wey we use as we dey use PyTorch Dataset class, we go give federated dataloader `FederatedDataLoader` wey go come repeat am in a federated fashion.
#
#
# Person wey write am:
# - <NAME> - Twitter: [@iamtrask](https://twitter.com/iamtrask)
# - <NAME> - GitHub: [@LaRiffle](https://github.com/LaRiffle)
#
# Person wey translate am:
# - <NAME> - Twitter: [@techie991](https://twitter.com/techie991)
#
# We go use sandbox wey we discover last lesson
import torch as th
import syft as sy
sy.create_sandbox(globals(), verbose=False)
# Oya find dataset
boston_data = grid.search("#boston", "#data")
boston_target = grid.search("#boston", "#target")
# We go load a model and an optimizer
# +
n_features = boston_data['alice'][0].shape[1]
n_targets = 1
model = th.nn.Linear(n_features, n_targets)
# -
# We go cast the date wey we get for `FederatedDataset`. See the workers wey hold part of the data.
# +
# Cast the result in BaseDatasets
datasets = []
for worker in boston_data.keys():
dataset = sy.BaseDataset(boston_data[worker][0], boston_target[worker][0])
datasets.append(dataset)
# Make we build FederatedDataset object
dataset = sy.FederatedDataset(datasets)
print(dataset.workers)
optimizers = {}
for worker in dataset.workers:
optimizers[worker] = th.optim.Adam(params=model.parameters(),lr=1e-2)
# -
# We go put am for `FederatedDataLoader` and options go dey specify
train_loader = sy.FederatedDataLoader(dataset, batch_size=32, shuffle=False, drop_last=False)
# Last last we go iterate over epochs. You see sey he dey similar to how we dey tarin pure and local PyTorch training!
for epoch in range(1, epochs + 1):
loss_accum = 0
for batch_idx, (data, target) in enumerate(train_loader):
model.send(data.location)
optimizer = optimizers[data.location.id]
optimizer.zero_grad()
pred = model(data)
loss = ((pred.view(-1) - target)**2).mean()
loss.backward()
optimizer.step()
model.get()
loss = loss.get()
loss_accum += float(loss)
if batch_idx % 8 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tBatch loss: {:.6f}'.format(
epoch, batch_idx, len(train_loader),
100. * batch_idx / len(train_loader), loss.item()))
print('Total loss', loss_accum)
# # Congratulations!!! - Oya Join the Community!
#
# Clap for una sef as you don finish this notebook tutorial! If you enjoy am and you wan join the movement towards privacy preserving, decentralized ownership of AI and the AI supply chain (data), follow the steps wey dey below.
#
# ### Star PySyft on GitHub
#
# The easiset way to helep our community na to star the GitHub repos! This go helep raise awareness of the tools we dey build.
#
# - [Star PySyft](https://github.com/OpenMined/PySyft)
#
# ### Join our Slack!
#
# To follow up bumper to bumper on how latest advancements, join our community! You can do so by filling out the form at [http://slack.openmined.org](http://slack.openmined.org)
#
# ### Join a Code Project!
#
# The best way to contribute to our community na to become code contributor! You fit go to PySyft GitHub Issues page and filter for "Projects". E go show you all the top level Tickets giving an overview of what projects you fit join! If you no wan join any project, but you wan code small, you fit look for more "one off" mini-projects by searching for GitHub issues marked "good first issue"
#
# - [PySyft Projects](https://github.com/OpenMined/PySyft/issues?q=is%3Aopen+is%3Aissue+label%3AProject)
# - [Good First Issue Tickets](https://github.com/OpenMined/PySyft/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22)
#
# ### Donate
#
# If you no get time to contribute to our codebase, but still like to lend support, you fit be a Backer on our Open Collective. All donations wey we get na for our web hosting and other community expenses such as hackathons and meetups! meetups!
#
# [OpenMined's Open Collective Page](https://opencollective.com/openmined)
|
examples/tutorials/translations/Pidgin/Part 07 - Make we do Federated Learning with FederatedDataset-Pidgin.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# [](http://colab.research.google.com/github/mpariente/asteroid/blob/master/notebooks/03_PITLossWrapper.ipynb)
# ## Permutation invariant training
# Permutation invariant training (PIT) was succesfully introduced to train DNN-based speaker-independent speech separation systems [1, 2, 3, 4]. Since then, it has been applied to environmental source separation [5] and classification [6], and end-to-end diarization [7, 8].
# There has also been recent work to extend or improve on PIT-based training [9, 10, 11].
#
# Asteroid provides `PITLossWrapper`, a flexible class which enables seamless transformation of simple loss functions into permutation invariant losses, for any loss function and any number of sources!
#
# It supports three types of loss functions :
# - 1) The loss function compute the average loss for a given permutation (over all source-estimates). `PITLossWrapper` loops over the permutations and returns the minimum loss, the one to be backproped. (`pit_from = perm_avg`)
# - 2) Second way, compute pair wise losses and take the mean over each permutation.
# - a) The pairwise losses can be computed using one function, which returns a pairwise matrix. In this case `PITLossWrapper` finds the best permutation and returns the minimum loss. (`pit_from = pw_mtx`)
# - b) The provided function computes the loss for one given target-estimate pair (a point in the pairwise matrix). `PITLossWrapper` computes the pairwise loss matrix by calling this function on each pair. It then finds the best permutation and returns the minimum loss as done in 2b. (`pit_from = pw_pt`)
#
# In addition, we provide common loss functions in these three forms.
# Let's try to understand these three ways of computing PIT losses.
# First install asteroid and depencies
# !pip install git+https://github.com/mpariente/asteroid.git@master
# ### After installing requirements, you need to Restart Runtime (Ctrl + M).
#
# Else it will fail to import asteroid
import torch
from itertools import permutations
import numpy as np
import matplotlib.pyplot as plt
from asteroid.losses import PITLossWrapper
from asteroid.losses import pairwise_mse, singlesrc_mse, multisrc_mse
# To be able to visualize some results, we will take a batch size of 1.
batch_size, n_sources, feat_dim = 1, 4, 50
# First, take random sources
sources = torch.randn(batch_size, n_sources, feat_dim)
# Generate estimates : Randomly permute the sources and add some noise.
random_permutation = torch.randperm(n_sources)
estimate_sources = sources[:, random_permutation] + torch.randn(batch_size, n_sources, feat_dim)
# ### 1. The Naive Way.
# The naive way consists in looping over all permutations on the source axis to find the best one.
# It corresponds to the mode `pit_from='perm_avg'`, meaning `permutation average` because the loss function computes the
# average loss for a set of sources and their estimates (a given permutation).
# +
# The naive way. Find the best loss by looping over the permutations.
perms = list(permutations(range(n_sources)))
all_losses = torch.stack([multisrc_mse(estimate_sources, sources[:, p]) for p in perms])
best_loss_idx = torch.argmin(all_losses)
# We will backprop all_losses[best_loss_idx]
print("Best permutation : {}. 0riginal permutation : {}".format(perms[best_loss_idx], random_permutation))
# This is equivalent to :
loss_func = PITLossWrapper(multisrc_mse, pit_from='perm_avg')
best_loss = loss_func(estimate_sources, sources)
# -
# ### 2. More efficient way
# The first thing to notice is that the loss on one permutation is the sum of between-source losses. We can speed up the naive approach by computing individual pairwise losses and averaging them to compute the loss for each permutation. We can compute the pairwise losses in two ways :
# - The given function compute the loss function for a given pair (a point in the pairwise matrix) and we can loop over the pairs. (`pit_from='pw_pt'` for pairwise point.)
# - The given function computes the pairwise matrix directly. (`pit_from='pw_mtx'` for pairwise matrix)
# +
# Without source axis, let's compute the loss for each source-estimate pair.
def mse(est_target, target):
""" Batch MSE between a source and its estimate"""
loss = (target - est_target)**2
return loss.mean(-1)
# Compute pairwise losses
pairwise_losses = torch.zeros(batch_size, n_sources, n_sources)
for i in range(n_sources):
for j in range(n_sources):
pairwise_losses[:, i, j] = mse(estimate_sources[:, i], sources[:, j])
# Plot the pairwise losses
ax = plt.imshow(pairwise_losses[0].data.numpy())
# -
# We can also compute the pairwise losses directly using dimension broadcasting
def pairwise_mse(est_targets, targets):
""" Batch pairwise MSE. """
targets = targets.unsqueeze(1)
est_targets = est_targets.unsqueeze(2)
pw_loss = (targets - est_targets)**2
mean_over = list(range(3, pw_loss.ndim))
return pw_loss.mean(dim=mean_over)
# Compute pairwise losses using broadcasting (+ unit test equality)
direct_pairwise_losses = pairwise_mse(estimate_sources, sources)
torch.testing.assert_allclose(pairwise_losses, direct_pairwise_losses)
# Plot the pairwise losses
ax = plt.imshow(direct_pairwise_losses[0].data.numpy())
# Now that we have the loss values for each source-estimate pair, we can compute the average over this matrix for each permutation.
# Below are plotted the one-hot permutation matrices, which will be individually multiplied with the `pairwise_losses`.
# +
# Let's plot all the permutation matrices
eye = torch.eye(n_sources)
perms_one_hot = torch.stack([eye[:, perm] for perm in perms], dim=0)
fig, axs = plt.subplots(2, len(perms)//2, figsize=(20, 3))
fig.suptitle('One-hot permutation matrices', fontsize=16)
for i in range(len(perms)):
col, line = divmod(i, 2)
axs[line, col].imshow((perms_one_hot[i]).data.numpy())
axs[line, col].set_axis_off()
fig, axs = plt.subplots(2, len(perms)//2, figsize=(20, 3))
fig.suptitle('Pairwise loss matrix multiplied by one-hot permutation matrices', fontsize=16)
for i in range(len(perms)):
col, line = divmod(i, 2)
axs[line, col].imshow((perms_one_hot[i] * pairwise_losses[0]).data.numpy())
axs[line, col].set_axis_off()
# -
# The mean of each matrix above is a potential loss, the minimum of which will be backproped.
# ### Timing the three approaches, for MSE and SI-SDR
print("For MSE")
print("1. Naive approach ")
loss_func = PITLossWrapper(multisrc_mse, pit_from='perm_avg')
# %timeit best_loss = loss_func(sources, estimate_sources)
print("2. More efficient approaches (b)")
loss_func = PITLossWrapper(singlesrc_mse, pit_from='pw_pt')
# %timeit best_loss = loss_func(sources, estimate_sources)
print("2. More efficient approaches (b)")
loss_func = PITLossWrapper(pairwise_mse, pit_from='pw_mtx')
# %timeit best_loss = loss_func(sources, estimate_sources)
from asteroid.losses import pairwise_neg_sisdr, singlesrc_neg_sisdr, multisrc_neg_sisdr
print("For SI-SDR")
print("1. Naive approach ")
loss_func = PITLossWrapper(multisrc_neg_sisdr, pit_from='perm_avg')
# %timeit best_loss = loss_func(sources, estimate_sources)
print("2. More efficient approaches (b)")
loss_func = PITLossWrapper(singlesrc_neg_sisdr, pit_from='pw_pt')
# %timeit best_loss = loss_func(sources, estimate_sources)
print("2. More efficient approaches (b)")
loss_func = PITLossWrapper(pairwise_neg_sisdr, pit_from='pw_mtx')
# %timeit best_loss = loss_func(sources, estimate_sources)
# ### Citations:
# Classic PIT-based speech separation :
# - [1] <NAME> et al. “Permutation Invariant Training of Deep Models for Speaker-Independent Multi-Talker Speech Separation.” 2017 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP).
# - [2] <NAME> et al. "Multi-talker Speech Separation with Utterance-level Permutation Invariant Training of Deep Recurrent Neural Networks." 2017
# - [3] <NAME>, and <NAME>. “Conv-TasNet: Surpassing Ideal Time–Frequency Magnitude Masking for Speech Separation.” IEEE/ACM Transactions on Audio, Speech, and Language Processing 27.8 (2019)
# - [4] Takahashi, Naoya et al. “Recursive Speech Separation for Unknown Number of Speakers.” Interspeech 2019.
#
# PIT-based environmental sound separation :
# - [5] Kavalerov, Ilya et al. “Universal Sound Separation.” 2019 IEEE Workshop on Applications of Signal Processing to Audio and Acoustics (WASPAA)
# - [6] <NAME> et al. "Improving Universal Sound Separation Using Sound Classification", 2019.
#
# PIT-based end-to-end diariazation
# - [7] Fujita, Yusuke et al. "End-to-End Neural Speaker Diarization with Permutation-Free Objectives", Interspeech 2019
# - [8] Fujita, Yusuke et al. "End-to-End Neural Speaker Diarization with Self-attention", arXiv 2019
#
# Papers on PIT alternatives :
# - [9] <NAME> et al. "Utterance-level Permutation Invariant Training with Discriminative Learning for Single Channel Speech Separation," 2018 11th International Symposium on Chinese Spoken Language Processing (ISCSLP).
# - [10] Yang, Gene-Ping et al. "Interrupted and cascaded permutation invariant training for speech separation" 2019.
# - [11] Yousefi, Midia et al. “Probabilistic Permutation Invariant Training for Speech Separation.” Interspeech 2019
|
notebooks/03_PITLossWrapper.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# #!/usr/bin/python
# -*- coding: utf-8 -*-
"""This notebook creates the to-do list for TAG YEAR in COUNTRY"""
import inspect, os, sys
try :
import pywikibot as pb
except :
current_folder = os.path.realpath(os.path.abspath(os.path.split(inspect.getfile(inspect.currentframe()))[0]))
folder_parts = current_folder.split(os.sep)
pywikibot_folder = os.sep.join(folder_parts[:-1])
if current_folder not in sys.path:
sys.path.insert(0, current_folder)
if pywikibot_folder not in sys.path:
sys.path.insert(0, pywikibot_folder)
import pywikibot as pb
# -
import pandas as pd
from io import StringIO
from mako.template import Template
from datetime import datetime
# +
YEAR = 2017
TAG = 'WLE'
TAG_EXT = 'Wiki Loves Earth'
COUNTRY = "Spain"
BASE_NAME = "Commons:Wiki Loves in {2}/{1}/{0}".format(YEAR, TAG_EXT, COUNTRY)
LOG_PAGE = BASE_NAME + '/Log'
BASE_SITE_DB_NAME = "Commons:Wiki Loves in {1}/{0}".format(TAG_EXT, COUNTRY)
SITE_DB_PAGE = BASE_SITE_DB_NAME + "/Sites DB"
TODO_WLE_PAGE = BASE_NAME + '/To-do'
commons_site = pb.Site('commons', 'commons')
# -
annexes = {
'ES-AN': [u'Anexo:Lugares de importancia comunitaria de Andalucía', 'Andalusia'],
'ES-AR': [u'Anexo:Lugares de importancia comunitaria de Aragón', 'Aragon'],
'ES-AS': [u'Anexo:Lugares de importancia comunitaria de Asturias', 'Asturias'],
'ES-CB': [u'Anexo:Lugares de importancia comunitaria de Cantabria', 'Cantabria'],
'ES-CM': [u'Anexo:Lugares de importancia comunitaria de Castilla-La Mancha', 'Castile-La Mancha'],
'ES-CL': [u'Anexo:Lugares de importancia comunitaria de Castilla y León', u'Castile and León'],
'ES-CT': [u'Anexo:Lugares de importancia comunitaria de Cataluña', 'Catalonia'],
'ES-MD': [u'Anexo:Lugares de importancia comunitaria de la Comunidad de Madrid', 'Community of Madrid'],
'ES-VC': [u'Anexo:Lugares de importancia comunitaria de la Comunidad Valenciana', 'Valencian Community'],
'ES-EX': [u'Anexo:Lugares de importancia comunitaria de Extremadura', 'Extremadura'],
'ES-IB': [u'Anexo:Lugares de importancia comunitaria de las Islas Baleares', 'Balearic Islands'],
'ES-CN': [u'Anexo:Lugares de importancia comunitaria de las Islas Canarias', 'Canary Islands'],
'ES-GA': [u'Anexo:Lugares de importancia comunitaria de Galicia', 'Galicia'],
'ES-RI': [u'Anexo:Lugares de importancia comunitaria de La Rioja', 'La Rioja'],
'ES-NC': [u'Anexo:Lugares de importancia comunitaria de Navarra', 'Navarre'],
'ES-MC': [u'Anexo:Lugares de importancia comunitaria de la Región de Murcia', 'Region of Murcia'],
'ES-PV': [u'Anexo:Lugares de importancia comunitaria del País Vasco', 'Basque Country'],
'ES-CE': [u'Anexo:Lugares de importancia comunitaria de Ceuta y Melilla', 'Ceuta'],
'ES-ML': [u'Anexo:Lugares de importancia comunitaria de Ceuta y Melilla', 'Melilla'],
'ES-MAGRAMA': [u'Anexo:Lugares de importancia comunitaria del MAGRAMA', 'MAGRAMA']
}
# +
# Folder management (templates, images...)
cwd = os.getcwd()
templates_directory = os.path.join(cwd, 'templates')
# +
# retrieval of the WLE SCI (site of community importance) log
pb.output('Retrieving --> WLE site of community importance list')
site_list_page = pb.Page(commons_site, SITE_DB_PAGE)
site_list_text = StringIO(site_list_page.text[site_list_page.text.find('\n') +
1:site_list_page.text.rfind('\n')])
site_df = pd.read_csv(site_list_text, sep=";",
index_col=False,
names=["name", "code", "magrama_url", "community",
"bio_region", "continent", "min_altitude",
"max_altitude", "avg_altitude", "longitude",
"latitude", "area", "marine_percentage",
"marine_area", "image", "commons_cat", "wikidata_id"])
pb.output('Retrieved --> WLE site of community importance list')
site_df["aut_com"] = site_df["community"].apply(lambda x: annexes[x][1])
# -
len(site_df[~site_df['commons_cat'].isnull()])
len(site_df[~site_df['image'].isnull()])
filtered_site_df = site_df[(site_df['commons_cat'].isnull() | (site_df['image'].isnull()))]
len(filtered_site_df)
# +
# Retrieval of images
pb.output('Retrieving --> {1} {0} in {2} images list from cache'.format(YEAR, TAG, COUNTRY))
list_page = pb.Page(commons_site, LOG_PAGE)
list_page_text = StringIO(list_page.text[list_page.text.find('\n') + 1:list_page.text.rfind('\n')])
images_df = pd.read_csv(list_page_text,
sep=";",
index_col=False,
names=['image_title', 'code',
'uploader', 'uploader_registration',
'timestamp', 'date', 'size',
'height', 'width', 'qi',
'finalist']
).fillna('')
pb.output('Retrieved --> {1} {0} in {2} images list from cache'.format(YEAR, TAG, COUNTRY))
total_images_length = len(images_df)
total_images_length
# -
filtered_images_df = images_df[(~images_df['code'].isnull()) & (images_df['code'].isin(filtered_site_df['code'].values))]
filtered_images_df['code'].unique()
missing_sites_df = filtered_site_df[filtered_site_df['code'].isin(filtered_images_df['code'].unique())].fillna('')
missing_sites_df['annex'] = missing_sites_df["community"].apply(lambda x: annexes[x][0])
lost_cats = []
for image_counter, row in images_df.iterrows():
#print(row["image_title"])
page = pb.FilePage(commons_site, row["image_title"])
text = page.text
if (image_counter != 0) and (image_counter % 50 == 0) :
pb.output ('Reviewing --> %d image pages downloaded' %(image_counter))
cats = [cat for cat in page.categories()]
lost_cats.extend([cat.title(withNamespace=False) for cat in cats if (not cat.isHiddenCategory() and not cat.exists())])
template_file = os.path.join(templates_directory, 'todo.wiki')
fh = open(template_file, 'r', encoding = "utf-8")
template = fh.read()
fh.close()
list(set(lost_cats))
vars = {
"lost_categories": list(set(lost_cats)),
"missing_df": missing_sites_df,
"todo_page": TODO_WLE_PAGE,
"tag": TAG,
"full_tag": TAG_EXT,
"year": YEAR,
"country": COUNTRY,
"date": datetime.now().strftime("%B %-d, %Y")
}
t = Template(template)
todo_text = t.render(**vars)
todo_page = pb.Page(commons_site, TODO_WLE_PAGE)
if todo_page.text.strip() != todo_text.strip() :
todo_page.text = todo_text
pb.output('Publishing --> {1} {0} in {2} To-do List'.format(YEAR, TAG, COUNTRY))
todo_page.save("{1} {0} in {2} to-do list".format(YEAR, TAG, COUNTRY))
|
WLE to-do list creator.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "703ab547-8625-4d36-b83a-dedbf9a0d44f", "showTitle": false, "title": ""}
storage_account_name = "databricksdemostorage"
storage_account_key = dbutils.secrets.get("Keys", "Storage")
container = "data"
spark.conf.set(f"fs.azure.account.key.{storage_account_name}.blob.core.windows.net", storage_account_key)
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "51a68fb9-1323-4b0e-a409-0d16f4543443", "showTitle": false, "title": ""}
from pyspark.sql.types import StructType, StructField, StringType, DoubleType
schema = StructType([
StructField("Vendor", StringType(), True),
StructField("Model", StringType(), True),
StructField("CycleTime", DoubleType(), True),
StructField("MinMainMemory", DoubleType(), True),
StructField("MaxMainMemory", DoubleType(), True),
StructField("Cache", DoubleType(), True),
StructField("MinChannels", DoubleType(), True),
StructField("MaxChannels", DoubleType(), True),
StructField("PublishedPerf", DoubleType(), True),
StructField("RelativePerf", DoubleType(), True)
])
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "b20b2d95-a627-41fe-bbd8-775c3b4d9fc5", "showTitle": false, "title": ""}
data = spark.read.option("header", "true").option("delimeter", ",").schema(schema).csv(f"wasbs://{container}@{storage_account_name}.blob.core.windows.net/machine.data")
data.show()
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "92e5457c-dc33-403d-948f-d977349e606b", "showTitle": false, "title": ""}
(train_data, test_data) = data.randomSplit([0.8, 0.2])
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "a7829add-e41a-4a17-a363-03802f568c88", "showTitle": false, "title": ""}
print(train_data.count())
print(test_data.count())
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "3ff1730c-a95c-4aed-ac6d-40bc0503609c", "showTitle": false, "title": ""}
from pyspark.ml.regression import LinearRegression
from pyspark.ml.feature import VectorAssembler
from pyspark.ml.evaluation import RegressionEvaluator
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "a93fecb8-bfba-4ecd-82f2-0b6320f5b914", "showTitle": false, "title": ""}
vectors = VectorAssembler(inputCols=['CycleTime', 'MinMainMemory', 'MaxMainMemory', 'Cache', 'MinChannels', 'MaxChannels'], outputCol="features")
vector_data = vectors.transform(train_data)
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "8e9a9976-2d22-47cc-a2d1-1ed0b24c925f", "showTitle": false, "title": ""}
vector_data.show()
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "640369d8-5a76-4d5d-bb76-9d33f8a52221", "showTitle": false, "title": ""}
features_data = vector_data.select(["features", "PublishedPerf"])
features_data.show()
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "608debb9-67b4-4157-91c9-d894f7749008", "showTitle": false, "title": ""}
lr = LinearRegression(labelCol="PublishedPerf", featuresCol="features")
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "c3a6f50c-714c-4a26-8e58-146faecb0abf", "showTitle": false, "title": ""}
model = lr.fit(features_data)
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "00ba082f-a1fe-4d0a-bf34-ddafdb9a10e4", "showTitle": false, "title": ""}
summary = model.summary
print("R^2", summary.r2)
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "c1ef0685-53d9-4a0b-9169-7a46a31b64d0", "showTitle": false, "title": ""}
evaluator = RegressionEvaluator(predictionCol="prediction", labelCol="PublishedPerf", metricName="r2")
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "60cc1870-2760-4b4a-b9f8-b443eb7f0e91", "showTitle": false, "title": ""}
vector_test = vectors.transform(test_data)
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "0a32a7ea-5254-42a4-8fae-46b36f8138e0", "showTitle": false, "title": ""}
features_test = vector_test.select(["features", "PublishedPerf"])
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "69813c32-f146-4c84-9d17-fd9911e5bde4", "showTitle": false, "title": ""}
test_transform = model.transform(features_test)
test_transform.show()
# + application/vnd.databricks.v1+cell={"inputWidgets": {}, "nuid": "09cf348d-5bcb-4475-b0b2-2b49b72913e2", "showTitle": false, "title": ""}
evaluator.evaluate(test_transform)
|
Notebooks/Linear Regression With MLLib.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
df_train = pd.read_csv('train.csv')
df_test = pd.read_csv('test.csv')
df_train.head()
print("Training Data Shape: ",df_train.shape)
print("Test Data Shape :",df_test.shape)
train_x = df_train.drop(columns =['Survived'],axis =1)
train_y = df_train['Survived']
test_x = df_test.drop(columns =['Survived'],axis =1)
test_y = df_test['Survived']
from sklearn.linear_model import LogisticRegression
lr = LogisticRegression()
lr.fit(train_x,train_y)
preds_y = lr.predict(test_x)
from sklearn.metrics import accuracy_score,confusion_matrix,roc_curve,roc_auc_score
accuracy_score(test_y,preds_y)
confusion_matrix(test_y,preds_y)
roc_curve(test_y,preds_y)
print('Coefficient of model :', lr.coef_)
print('Intercept of model',lr.intercept_)
print("Auc Score :",roc_auc_score(test_y,preds_y))
# ## Save Model
import pickle
filename = 'Logistic_model.sav'
pickle.dump(lr, open(filename, 'wb'))
|
Basic Machine Learning Models/Logistic Regression/Logistic_Regression.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] toc=true
# <h1>Table of Contents<span class="tocSkip"></span></h1>
# <div class="toc"><ul class="toc-item"><li><span><a href="#Resampling-Methods" data-toc-modified-id="Resampling-Methods-5"><span class="toc-item-num">5 </span>Resampling Methods</a></span><ul class="toc-item"><li><span><a href="#Cross-validation" data-toc-modified-id="Cross-validation-5.1"><span class="toc-item-num">5.1 </span>Cross-validation</a></span><ul class="toc-item"><li><span><a href="#The-Validation-Set-Approach" data-toc-modified-id="The-Validation-Set-Approach-5.1.1"><span class="toc-item-num">5.1.1 </span>The Validation Set Approach</a></span></li><li><span><a href="#Leave-One-Out-Cross-Validation" data-toc-modified-id="Leave-One-Out-Cross-Validation-5.1.2"><span class="toc-item-num">5.1.2 </span>Leave-One-Out Cross Validation</a></span></li><li><span><a href="#$k$-fold-Cross-Validation" data-toc-modified-id="$k$-fold-Cross-Validation-5.1.3"><span class="toc-item-num">5.1.3 </span>$k$-fold Cross-Validation</a></span></li><li><span><a href="#Bias-Variance-Tradeoff-for-$k$-fold-Cross-Validation" data-toc-modified-id="Bias-Variance-Tradeoff-for-$k$-fold-Cross-Validation-5.1.4"><span class="toc-item-num">5.1.4 </span>Bias-Variance Tradeoff for $k$-fold Cross Validation</a></span></li><li><span><a href="#Cross-Validation-on-Classification-Problems" data-toc-modified-id="Cross-Validation-on-Classification-Problems-5.1.5"><span class="toc-item-num">5.1.5 </span>Cross-Validation on Classification Problems</a></span></li></ul></li><li><span><a href="#The-Bootstrap" data-toc-modified-id="The-Bootstrap-5.2"><span class="toc-item-num">5.2 </span>The Bootstrap</a></span></li><li><span><a href="#Footnotes" data-toc-modified-id="Footnotes-5.3"><span class="toc-item-num">5.3 </span>Footnotes</a></span></li></ul></li></ul></div>
# -
# ___
# # Resampling Methods
# ___
# - ***Resampling methods*** involve repeatedly drawing samples from a training set and refitting a model of interest on each sample in order to obtain additional information about the fitted model
#
# - Two of the most commonly used resampling methods are ***cross-validation*** and the bootstrap
#
# - Resampling methods can be useful in ***model assessment***, the process of evaluating a model's performance, or in ***model selection***, the process of selecting the proper level of flexibility.
# ## Cross-validation
# ### The Validation Set Approach
# - Randomly divide the data into a ***training set*** and ***validation set***. The model is fit on the training set and its prediction performance on the test set provides an estimate of overall performance.
#
# - In the case of a quantitative response, the prediction performance is measured by the mean-squared-error. The validation estimates the "true" $\text{MSE}$ with the mean-squared error $\text{MSE}_{validation}$ computed on the validation set.
# ##### Advantages
# - conceptual simplicity
# - ease of implementation
# - low computational resources
# ##### Disadvantages
# - the validation estimate is highly variable - it is highly dependent on the train/validation set split
# - since the model is trained on a subset of the dataset, it may tend to overestimate the test error rate if it was trained on the entire dataset
# ### Leave-One-Out Cross Validation
# Given paired observations $\mathcal{D} = \{(x_1, y_1), \dots, (x_n, y_n)\}$, for each $1 \leqslant i \leqslant n$:
# - Divide the data $\mathcal{D}$ into a training set $\mathcal{D}_{(i)} = \mathcal{D}\ \{(x_i, y_i)\}$ and a validation set $\{(x_i, y_i)\}$.
# - Train a model $\mathcal{M}_i$ on $\mathcal{D}_{(i)}$ and use it to predict $\hat{y}_i$.
# - The LOOCV estimate for $\text{MSE}_{test}$ is
#
# $$CV_{(n)} = \frac{1}{n}\sum_{i=1}^n \text{MSE}_i$$
#
# where $\text{MSE}_i = (y_i - \hat{y}_i)$<sup><a href='#foot31' id='ref31'>31</a></sup>
# ##### Advantages
# - approximately unbiased
# - deterministic - doesn't depend on a random train/test split.
# - computationally fast in least squares regression
# $$CV_{(n)} = \frac{1}{n}\sum_{i=1}^n \left(\frac{y_i - \hat{y}_i}{1 - h_i}\right)^2$$
# where $h_i$ is the [leverage](#High-Leverage-Points) of point i
# ##### Disdvantages
# - Computationally expensive<sup><a href='#foot32' id='ref32'>32</a></sup> in general
# ### $k$-fold Cross-Validation
# Given paired observations $\mathcal{D} = \{(x_1, y_1), \dots, (x_n, y_n)\}$, divide the data $\mathcal{D}$ into $K$ ***folds*** (sets) $\mathcal{D}_1, \dots, \mathcal{D}_K$ of roughly equal size.<sup><a href='#foot33' id='ref33'>33</a></sup> Then for each $1 \leqslant k \leqslant K$:
#
# - Train a model on $\mathcal{M}_k$ on $\cup_{j\neq k} \mathcal{D}_{j}$ and validate on $\mathcal{D}_k$.
# - The $k$-fold CV estimate for $\text{MSE}_{test}$ is
#
# $$CV_{(k)} = \frac{1}{k}\sum_{i=1}^k \text{MSE}_k$$
#
# where $\text{MSE}_k$ is the mean-squared-error on the validation set $\mathcal{D}_k$
# ##### Advantages
# - computationally faster than $LOOCV$ if $k > 1$
# - less variance than validation set approach or LOOCV
# ##### Disdvantages
# - more biased than LOOCV if $k > 1$.
# ### Bias-Variance Tradeoff for $k$-fold Cross Validation
# As $k \rightarrow n$, bias $\downarrow$ but variance $\uparrow$
# ### Cross-Validation on Classification Problems
# In the classification setting, we define the LOOCV estimate
#
# $$CV_{(n)} = \frac{1}{n}\sum_{i=1}^n \text{Err}_i$$
#
# where $\text{Err}_i = I(y_i \neq \hat{y}_i)$. The $k$-fold CV and validation error rates are defined analogously.
# ## The Bootstrap
# The bootstrap is a method for estimating the standard error of a statistic<sup><a href='#foot34' id='ref34'>34</a></sup> or statistical learning process. In the case of an estimator $\hat{S}$ for a statistic $S$ proceeds as follows:
#
# Given a dataset $\mathcal{D}$ with $|\mathcal{D}=n|$, for $1 \leqslant i \leqslant B$:
# - Create a bootstrap dataset $\mathcal{D}^\ast_i$ by sampling uniformly $n$ times from $\mathcal{D}$
# - Calculate the statistic $S$ on $\mathcal{D}^\ast_i$ to get a bootstrap estimate $S^\ast_i$ of $S$
#
# Then the bootstrap estimate for the $\mathbf{se}(S)$ the sample standard deviation of the boostrap estimates $S^\ast_1, \dots, S^\ast_B$:
#
# $$\hat{se}(\hat{S}) = \sqrt{\frac{1}{B-1} \sum_{i = 1}^ B \left(S^\ast_i - \overline{S^\ast}\right)^2}$$
# ___
# ## Footnotes
# <p>
# </p>
#
# <div id="foot31"> 31. $\text{MSE}_i$ is just the mean-squared error of the model $\mathcal{M}_i$ on the validation set $\{(x_i, y_i)\}$. It is an approximately unbiased estimator of $\text{MSE}_{test}$ but it has high variance. But as the average of the $\text{MSE}_i$, $CV_{(n)}$ has much lower variance.\\
#
# $CV_{(n)}$ is sometimes called the LOOCV error rate -- it can be seen as the average error rate over the singleton validation sets $\{(x_i, y_i)\}$
# <a href="#ref31">↩</a>
# </div>
#
# <p>
# </p>
#
# <div id="foot32"> 32. Specifically $O(n * \text{model fit time})$
# <a href="#ref32">↩</a>
# </div>
#
# <p>
# </p>
#
# <div id="foot33"> 33. LOOCV is then $k$-fold CV in the case $k=n$. Analogous, $CV_{k}$ is sometimes called the $k$-fold CV error rate, the average error over the folds.
# <a href="#ref33">↩</a>
# </div>
#
# <p>
# </p>
#
# <div id="foot34"> 34. Recall a statistic $S$ is just a function of a sample $S = S(X_1,\dots, X_n)$
# <a href="#ref34">↩</a>
# </div>
|
04_resampling/ch05_notes.ipynb
|
# -*- coding: utf-8 -*-
# # II Kodutöö
#
# # I osa - veebiloengud, videod, artiklid (4 punkti)
#
# 1. (**3 punkti**) Vaata Johns Hopkinsi andmeanalüüsi kursuse videosid:
#
# * [The ggplot2 Plotting System - Part 2 (1/3)](https://www.youtube.com/watch?v=zLwrYvH2Hg0&list=PLjTlxb-wKvXPhZ7tQwlROtFjorSj9tUyZ&index=13)
# * [The ggplot2 Plotting System - Part 2 (2/3)](https://www.youtube.com/watch?v=Z3WP_Up8b5I&list=PLjTlxb-wKvXPhZ7tQwlROtFjorSj9tUyZ&index=14)
# * [The ggplot2 Plotting System - Part 2 (3/3)](https://www.youtube.com/watch?v=gU8w0gjSx40&list=PLjTlxb-wKvXPhZ7tQwlROtFjorSj9tUyZ&index=15)
#
# Nimeta kolm kasulikku teadmist, mis videotest teada said. Juhul kui sa kõike juba teadsid, siis nimeta kolm sellist videos mainitud teadmist, mida sa kõige sagedamini kasutad.
# **<font color='red'>Vastus:</font>** Kirjuta vastus siia.
# 2. (**1 punkt**) Vaata https://www.youtube.com/watch?v=5Dnw46eC-0o
#
# Võta selle video idee ühe lausega kokku.
# **<font color='red'>Vastus:</font>** Kirjuta vastus siia.
#
# # II osa (10 punkti)
#
# Lahenda praktikumis alustatud [*Growth in a Time of Debt*](https://andmeteadus.github.io/2021/praktikum2_riigivolg/) ülesanded.
# **<font color='red'>Vastus:</font>** Kirjuta vastus siia.
# ## Sissejuhatus
#
# Kati "Makro" Ökonoomika on tavaline Eesti naine, kes saab kõigega hakkama: ta kasvatab üksi last, maksab koguperelaenu ning töötab Euroopa Komisjoni struktuuriüksuses ametnikuna. 2013. aasta alguses andis <NAME>, üks toonastest 28-st Euroopa Komisjoni volinikust, talle ülesandeks uurida võlakoorma mõju majanduskasvule.
#
# Kati teadis, et hea uurimus peab põhinema andmetel, mitte ekspertide kõhutundel. Peagi leidis ta artikli [*Growth in a Time of Debt*](http://www.nber.org/papers/w15639), mille põhitulemuseks oli, et kõrgem riigivõlg on seotud madalama majanduskasvuga ning väga probemaatiline on riigivõlg, mis on üle 90% SKP-st. Artikkel tundus usaldusväärne, sest artikli autoriteks on tunnustatud majandusteadlased Harvardist, artiklit oli tsiteeritud sel hetkel juba üle 500 korra ning see põhines 50-aastasel perioodil.
#
# Juba 9. aprillil 2013 kasutas <NAME> oma kõnes Rahvusvahelisele Tööorganisatsioonile Kati leitut:
#
# > public debt in Europe is expected to stabilise only by 2014 and to do so at above 90% of GDP. Serious empirical research has shown that at such high levels, public debt acts as a permanent drag on growth.
#
# Kati tundis, et ta oma tööd hästi teinud ja unistas aastalõpu boonusest. Tema sisemine rahulolu jäi aga üürikeseks. Majandusteadlased (Herndon, Ash, Pollin) ülikoolist UMass Amherst avaldasid teadusartikli, kus väitsid, et Reinhart-Rogoffi metoodikas on tõsiseid vigu sees, alustades Exceli arvutusveast, andmete väljajätmisest ja valest kaalumismeetodist. Majandusteemalised ajalehed olid täis kriitilisi kommentaare, sest Exceli viga on ju hoomatav kõigile.
#
# 
#
# Moodustati kriisikomisjon, mida pandi juhtima Kati. Vaja on teha suurele hulgale poliitikutele võimalikult lihtsasti selgeks, mille vastu eksisid Reinhart-Rogoff ja kui palju esialgsed tulemused muutusid.
#
# ## Tutvumine andmestikuga
#
# 20 arenenud riigi kohta on teada SKP kasvunumber ja võlakoorma-SKP suhe aastatel 1946-2009. Andmestikus on järgnevad tunnused:
#
# * *riik*: mis riigi kohta näitajad on toodud
# * *aasta*: mis aasta kohta käivad SKP kasv ja võlakoorma-SKP suhe
# * *vola_skp_suhe*: võlakoorma ja SKP suhe protsentides
# * *skp_kasv*: SKP kasvunumber
# * *exceli_viga*: binaarne tunnus, mis näitab, kas andmepunkt jäi RR analüüsist välja Exceli *arvutusvea* tõttu (kui on 1, siis jäi välja)
# * *valikuline*: binaarne tunnus, mis näitab, kas andmepunkt jäi RR analüüsist välja (HAP väitel selekteerimise tõttu, RR väitel, et neid andmeid analüüsi tegemise ajal polnud. Kui on 1, siis jäi RR analüüsist välja.)
#
# Laadi alla andmestik [skp_ja_volg.csv](https://raw.githubusercontent.com/andmeteadus/2020/master/data/skp_ja_volg.csv) ja loe töökeskkonda.
# ### Ülesanne 1.1 (2 punkti) - RR ja HAP tulemuste reprodutseerimine
#
# Kontrolli, kas suudad reprodutseerida RR ja HAP tulemused.
# Praktikumis tutvusime paketi *dplyr* [%>% operaatoriga](https://andmeteadus.github.io/2021/praktikum1_dplyr). Soovitame seda kasutada selle ülesande lahendamisel.
#
# Juhised:
#
# * Lisa uus tunnus, mis näitab *vola_skp_suhe* kategooriat (`< 30%`, `30-60%`, `60-90%`, `> 90%`). Näpunäide: kasuks tuleb käsk `cut`.
# * kaalumisviisid
# HAP kaalub igas kategoorias andmepunkte võrdselt (ehk võtab tavalise aritmeetilise keskmise), RR arvutab igas grupis riikide keskmise ja võtab neist aritmeetilise keskmise.
#
# 
#
# * RR-i tulemuste reprodutseerimiseks jäta arvutustest välja andmepunktid, mis jäid välja Exceli vea tõttu (vt tunnus *exceli_viga*) ja andmete puudumise tõttu (vt tunnus *valikuline*).
#
# - RR tegid ka kopeerimisvea: kopeerides riikide keskmisi ühest Exceli tabelist teise, muutus Uus-Meremaa keskmine SKP tõus grupis *"> 90%"* väärtuselt -7.6 väärtuseks -7.9. (Näpunäide: kasuks tuleb käsk `ifelse`.)
# Ülesanne 1.1
# sinu kood
# Peaksid saama sellise tulemuse
library(knitr)
suppressMessages(library(ggplot2))
# mediaanid tabelist 1: http://www.peri.umass.edu/fileadmin/pdf/working_papers/working_papers_301-350/PERI_TechnicalAppendix_April2013.pdf
# keskmised jooniselt 1: http://www.peri.umass.edu/fileadmin/pdf/working_papers/working_papers_301-350/WP322.pdf
df = data.frame(group=c("<30%", "30-60%", "60-90%", ">90%"),
RR_mean=c(4.1, 2.9, 3.4, -0.1),
RR_median=c(4.2, 3.0, 3.1, 1.6),
HAP_mean=c(4.2, 3.1, 3.2, 2.2),
HAP_median=c(4.1, 3.1, 2.9, 2.3))
kable(df)
# ### Ülesanne 1.2 (1 punkt) - visualiseeri võlakoormuse muutumist ajas
#
# Visualiseeri, kuidas võlakoorem on aastate jooksul muutunud riikide lõikes
# Ülesanne 1.2
# sinu kood
# ## Kuidas efektiivselt visualiseerida ...
#
# Järgnevalt püüame leida parima viisi, kuidas efektiivselt visualiseerida
#
# 1. millised vaatlused jäid RR analüüsist välja
# 2. kuivõrd erinesid RR ja HAP analüüside tulemused
# 3. kas võlakoormus suurem kui 90% on maagilise tähtsusega (st kas piir on just täpselt 90%)
# 4. milline on seos SKP ja võlakoormuse vahel
#
# ### Ülesanne 1.3 (1 punkt) - millised vaatlused jäid RR analüüsist välja
# Ülesanne 1.3
# sinu kood
# ### Boonusülesanne B1.1 (2 punkti) - kuidas erinesid RR ja HAP analüüside tulemused
#
# HAPi raportis kasutati vasakpoolset joonist, et visualiseerida RR ja HAP tulemuste erinevusi. NY times pani samale joonisele aga mediaani ja keskmise (parempoolne joonis)! Paku välja parem visualiseerimise idee, kuidas muuta arusaadavaks tulemuste erinevus.
#
# 
#
# > Praktikumis pakuti välja sarnane joonis NY Times joonisega, aga joondiagrammi asemel kasutame tulpdiagrammi ning mediaanide ja aritmeetiliste keskmiste kohta teeme eraldi joonised. Kuna see meetod nõuab oskusi, mida me ei käsitlenud tunni raames, jääb see ülesanne boonusülesandeks.
#
# Vihje:
# +
df = data.frame(analyys=c("RR", "RR", "HAP", "HAP"),
mediaan = c(1, 2, 3, 4),
grupp=c("30", "60", "30", "60"))
ggplot(df, aes(x=grupp, y=mediaan, fill=analyys)) +
geom_bar(position="dodge", stat="identity")
# -
# Ülesanne B1.1
# sinu kood
# ### Ülesanne 1.4 (2 punkti) - kas võlakoormus suurem kui 90% on maagilise tähtsusega
#
# > Our main finding is that across both advanced countries and emerging markets, high debt/GDP levels (90 percent and above) are associated with notably lower growth outcomes.
#
# Selgitage välja, kas täpselt 90% on just see piir, millest suurem võlakoormus on seotud madalama SKP kasvuga, või on see suhteliselt suvaliselt valitud arv?
#
# Üks võimalik lahendusviis: Tekitage uus kategooria, kus võlg jaotatakse 5 gruppi: `< 30%`, `30-60%`, `60-90%`, `90-120%`, `> 120%`). Arvutage iga grupi kohta mediaanid ja keskmised kasutades RR kaalumisviisi. **NB!** Jätta seekord sisse kõik RR vigadest tingitud vaatlused (Exceli viga) ja kaasake subjektiivsest valikust välja jäetud riigid.
# Ülesanne 1.4
# sinu kood
# ### Ülesanne 1.5 (4 punkti) - Kuidas visuaalselt uurida, milline on seos SKP ja võlakoormuse vahel?
#
# Kõigepealt, tehke joonis, kus oleks näha seos SKP ja võlakoormuse vahel. Seose iseloomustamiseks võite kasutada `stat_smooth()` abil leitavat joont.
#
# Näinud seost andmestikus, tekib küsimus, ega see seos ei ole lihtsalt juhuslik. Ehk kas vaadeldud seos erineb oluliselt seostest sellistes andmestikes, kus tegelikult SKP ja võlakoormuse vahel mingisugust seost ei eksisteeri.
#
# Selle visuaalseks kontrollimiseks võime kasutada järgmist skeemi. See põhineb permutatsioonitestil, mille kohta vaadake esmalt [kodutöö osa I](https://andmeteadus.github.io/2021/praktikum2_kodutoo/) videot https://www.youtube.com/watch?v=5Dnw46eC-0o
#
# Skeem:
#
# * Nullhüpotees on, et SKP ja riigivõla vahel seos puudub.
# * Genereerime meie andmetest permuteerimise teel sellise andmestiku, mis vastab nullhüpoteesile. Näiteks võib fikseerida SKP väärtused ning neile vastavusse seatavad riigivõla väärtused permuteerida. (Näpunäide: permuteerimisel on kasuks funktsioon `sample`.) Järgneval joonisel on näidatud tegelik andmestik ning permuteeritud andmestik (permuteeritud andmestik on saadud, kui on fikseeritud x-tunnus, aga y-tunnused on segamini aetud.)
#
# 
#
# * Leidke eelmises punktis genereeritud andmestikul `stat_smooth` hinnang.
# * Korrake eelnevat näiteks 100 korral ning kandke leitud 100 joont joonisele. Võrdluseks lisage esialgsetel andmetel leitud joon teise värviga. Lõpptulemus võiks tulla sarnane järgmise joonisega:
#
# 
# Ülesanne 1.5
# sinu kood
#
# # III osa (11 punkti)
#
# Lahenda [pettuse tuvastamise ülesanded](https://andmeteadus.github.io/2021/praktikum2_pettus/).
#
# ## Sissejuhatus
#
# Markus "Märul" Veekahuri isa on politseinik, ema on politseinik, mõlemad vanaemad on politseinikud ja õde on politseinik.
# Algul vaadati viltuselt Markuse soovile ülikooli statistikat õppima minna, kuid pärast kahte kuud vaikimist vahetati telefoni teel esimene "tere" ning lepiti uuesti ära.
#
# Kuid nagu elus ikka, ei kuku käbi kännust kaugele. Markus läks tööle Politsei- ja Piirivalveametisse ning tema igapäevatööks sai pettuste tuvastamine, kasutades statistilisi meetodeid.
# Tema ametirelvaks on Benfordi seadus.
#
# Benfordi seadus (sageli nimetatud kui esimese numbri seadus) kirjeldab arvu esimese numbri sagedust. Paljudes reaalsetes andmetes esineb number 1 esinumbrina umbes 30% juhtudest ning iga järgneva numbri sagedus kahaneb monotoonselt.
#
# 
#
# Empiiriliselt on näidatud, et Benfordi seadus kehtib näiteks aktsiahindade, jõgede pikkuse, riikide rahvaarvu andmetel. Järgneval joonisel on toodud kaks näidet Benfordi seaduse *kehtimisest*.
# +
suppressWarnings(suppressMessages(library(gridExtra)))
suppressWarnings(suppressMessages(library(dplyr)))
df1 = data.frame(x=factor(c(1:9)),
y=c(32.62, 16.66, 11.8, 9.26, 7.63, 6.55, 5.76, 5.14, 4.56))
p1 = ggplot(df1, aes(x=x, weight=y)) + geom_bar() + ggtitle("Twitteri kasutajaid jälgijate arvu lõikes \n") +
scale_x_discrete("Esimene number") + scale_y_continuous("") + theme(text = element_text(size=11))
df1 = data.frame(x=factor(c(1:9)),
y=c(31.57, 18.12, 11.88, 9.35, 7.84, 6.09, 5.78, 4.83, 4.53))
p2 = ggplot(df1, aes(x=x, weight=y)) + geom_bar() + ggtitle("Riikide SKP suurus \n") +
scale_x_discrete("Esimene number") + scale_y_continuous("") + theme(text = element_text(size=11))
grid.arrange(p1, p2, nrow=1)
# -
# Markuse ametivennad on järeldanud Benfordi seadusest kõrvalekaldumisest, et Kreeka on võltsinud makromajanduslikke näitajaid või et Iraani valimised olid ebaausad. Benfordi seadusest saad täpsemalt lugeda [Vikipeediast.](http://en.wikipedia.org/wiki/Benford%27s_law)
#
# Selles kodutöös on sinu ülesandeks uurida:
#
# * kas Benfordi seaduse põhjal võib väita, et FIE-d on võltsinud maksunäitajaid,
# * kas Benfordi seaduse põhjal võib väita, et 2017. aasta kohaliku omavalitsuse volikogu valimistel toimus pettus.
#
# Kuna ülesannetes on vaja teha *ggplot2* abil jooniseid, soovitame esmalt vaadata ära osa III videod *ggplot2* kohta.
#
# ## <NAME>ik
#
# Loe sisse Maksu- ja Tolliameti 2014. aasta [maksude andmestik](https://raw.githubusercontent.com/andmeteadus/2021/master/data/maksude_andmestik.csv) ja tutvu andmetega. Andmestikus on järgnevad tunnused:
#
# * *registrikood*: juriidilise isiku registrikood
# * *nimi*: juriidilise isiku nimi
# * *liik*: kas tegemist on äriühingu, MTÜ, FIE vms
# * *kaibemaksukohuslane*: kas juriidiline isik on käibemaksukohuslane
# * *maakond*: millises maakonnas on juriidiline isik registreeritud
# * *riiklikud_maksud*: käibemaks, tollimaks jne
# * *toojoumaksud_ja_maksed*: sotsiaalmaks, töötuskindlustusmakse jne
# ### Ülesanne 2.1 (2 punkti)
#
# Tee 3 joonist, mis iseloomustavad hästi seda andmestikku. Iga joonise juurde kirjuta üks lause, mida see joonis sinu arvates näitab.
# Ülesanne 2.1
# sinu kood
# ### Ülesanne 2.2 (2 punkti)
#
# Kontrolli visuaalselt Benfordi seaduse kehtimist tunnustel *riiklikud_maksud* ja *toojoumaksud_ja_maksed*. Selleks tekita esinumbrite histogramm. Nulliga võrduvad väärtused jäta kõrvale. Tee vastav joonis ka FIE-de, äriühingute jne lõikes (vt tunnus *liik*).
#
# Kommenteeri tulemusi.
#
# Kas sellest võib järeldada, et FIE-d jahmerdavad maksudega?
#
# **Näpunäide:** esimest numbrit aitab eraldada näiteks funktsioon `substr`.
# Ülesanne 2.2
# sinu kood
# ## 2017 KOV valimiste hääletustulemuste andmestik
#
# Loe sisse [andmestik](https://raw.githubusercontent.com/andmeteadus/2021/master/data/KOV_valimised_2017.csv) ja tutvu andmetega. Andmestikus on järgnevad tunnused:
#
# * *nimi* - kandidaadi nimi
# * *nr* - kandidaadi number
# * *Maakond*
# * *Omavalitsus*
# * *Kood* - omavalitsuse kood
# * *nimekiri* - partei või valimisliit, kuhu kandidaat kuulub
# * *paberhaali* - kandidaadi poolt saadud paberhäälte arv
# * *ehaali* - kandidaadi poolt saadud e-häälte arv
# * *valitud* - kas kandidaat osutus valituks (*true*) või mitte (*false*)
# ### Ülesanne 2.3 (2 punkti)
#
# * Esmalt tee juurde tunnus, mis näitab kandidaadile antud koguhäälte arvu (paberhäälte ja e-häälte summa).
# * Seejärel tekita tunnus, mille väärtusteks on *Eesti Keskerakond*, *Eesti Reformierakond*, *Sotsiaaldemokraatlik Erakond*, *Erakond Isamaa ja Res Publica Liit*, *Eesti Konservatiivne Rahvaerakond* ja *Muu*, st väiksemad erakonnad ja valimisliidud on ühte gruppi kokku võetud.
# * Tee 3 joonist, mis iseloomustavad hästi seda andmestikku. Iga joonise juurde kirjuta üks lause, mida see joonis sinu arvates näitab.
# Ülesanne 2.3
# sinu kood
# ### Ülesanne 2.4 (2 punkti)
#
# Kontrolli visuaalselt Benfordi seaduse kehtimist:
#
# * e-häälte arvul,
# * paberhäälte arvul,
# * koguhäälte arvul.
#
# Seejärel tee eelnevad joonised ka erakondade kaupa. Kommenteeri tulemusi.
# Ülesanne 2.4
# sinu kood
# ### Ülesanne 2.5 (3 punkti)
#
# Tee järgnevale joonisele võimalikult sarnane.
#
# 
# Ülesanne 2.5
# Sinu kood
# Näpunäited:
#
# * Log-skaala kasutamiseks uuri [järgmisi ggplot2 näiteid.](https://ggplot2.tidyverse.org/reference/scale_continuous.html)
# * Legendi peitmiseks uuri [järgmisi ggplot2 näiteid.](https://ggplot2.tidyverse.org/reference/theme.html) (märksõnaks on `legend.position`)
# * Käsuga `facet_wrap` saab joonise jaotada tükkideks (Mõned [näited](https://ggplot2.tidyverse.org/reference/facet_wrap.html).)
# * Et muuta värvid vastavaks erakonna sümboolikaga, kasuta värve "#00983A", "#FFDE00","#E30613", "#009FE3","#8B4513", "#82368C" (vastavalt KESK, REF, SDE, IRL, EKRE, Muu).
# * Värvide muutmiseks uuri [järgmisi ggplot2 näiteid.](https://ggplot2.tidyverse.org/reference/scale_manual.html)
# * Pööra tähelepanu n-ö akende pealkirjadele ja järjekorrale. Abiks võib olla funktsioon `factor`.
# ### Boonusülesanne B2.1 (2 punkti)
#
# Lisa hallid mummud taustale. Tulemus peaks olema selline:
#
# 
# Ülesanne B2.1
# Sinu kood
# ## <font color='red'>See oli viimane ülesanne! Palun jooksuta kogu Jupyter Notebook uuesti, et veenduda oma koodi töötamises! (`Kernel -> Restart and Run All`)</font>
#
#
# ## Lahendamise aeg
#
# Lisaks võiksid mainida, kui palju aega kulus osa I, II ja III peale. Mida arvad ülesannetest ja videotest (kasulikkus, huvitavus)?
# **<font color='red'>Kodutöö 1 lahendamiseks kulus:</font>** X h
#
# Kõige raskem oli ülesanne ...
# **Tänan vastamast!**
|
kodutood/kodutoo2.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import cx_Freeze
executables = [cx_Freeze.Executable("pygameVideo15.py")]
cx_Freeze.setup(
name = "A bit Racey",
options = {"build_exe": {"package": ["pygame"],
"include_files": ["racecar.png"]}},
executables = executables
)
# -
|
setup.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] toc=true
# <h1>Table of Contents<span class="tocSkip"></span></h1>
# <div class="toc"><ul class="toc-item"><li><span><a href="#Question-1-1" data-toc-modified-id="Question-1-1-1">Question 1-1</a></span></li><li><span><a href="#Question-1-2" data-toc-modified-id="Question-1-2-2">Question 1-2</a></span></li><li><span><a href="#Question-1-3" data-toc-modified-id="Question-1-3-3">Question 1-3</a></span></li><li><span><a href="#Question1-4" data-toc-modified-id="Question1-4-4">Question1-4</a></span></li><li><span><a href="#Question2-1" data-toc-modified-id="Question2-1-5">Question2-1</a></span></li><li><span><a href="#Question-2-2" data-toc-modified-id="Question-2-2-6">Question 2-2</a></span></li><li><span><a href="#Question-2-3" data-toc-modified-id="Question-2-3-7">Question 2-3</a></span></li></ul></div>
# -
% matplotlib inline
import pandas as pd
import matplotlib.pyplot as plt
import datetime as dt
def printOutput(output, question):
asterisk = 100
if question == '1-2':
stat = output[0]
jan = output[1]
june = output[2]
adjustN = (asterisk - len(stat) - 2)//2
print ('%s %s %s' % ('*'*adjustN,stat,'*'*adjustN))
print ('%10s : %.2f' % ('January', jan))
print ('%10s : %.2f' % ('June', june))
print ('%s' % ('*'*asterisk))
elif question == '1-4':
name = output[0]
lower = output[1]
upper = output[2]
ratio = output[3]
print ('%s' % ('*'*asterisk))
print ('%.2f%% of instances in %s are noisy! (normal range is between %.2f and %.2f)' % (ratio, name, lower, upper))
print ('%s' % ('*'*asterisk))
elif question == '2-1':
ratio = output
print ('%s' % ('*'*asterisk))
print ('%% of weekend trips=%.2f' % (ratio))
print ('%s' % ('*'*asterisk))
# +
def FindDuration(rawData,startCol, endCol):
'''
(DataFrame, str, str) -> Series
'''
tStart = pd.to_datetime(rawData[startCol], format = '%d/%m/%y %H:%M')
tEnd = pd.to_datetime(rawData[endCol], format = '%d/%m/%y %H:%M')
return (tEnd - tStart).dt.total_seconds()/60
# -
# ### Question 1-1
# +
# Reading the data into a DataFrame, finding the duration of each trip and creating a new DataFrame
# Jauary
rawJan = pd.read_csv('raw-january.csv',encoding = 'ISO-8859-1')
rawJan['Duration'] = FindDuration(rawJan,'pickup_datetime','dropoff_datetime')
dataJan = rawJan[['Duration', 'trip_distance', 'passenger_count', 'payment_amount']].copy()
# June
rawJune = pd.read_csv('raw-june.csv'
,encoding = 'ISO-8859-1')
rawJune['Duration'] = FindDuration(rawJune,'pickup_datetime','dropoff_datetime')
dataJune = rawJune[['Duration', 'trip_distance', 'passenger_count', 'payment_amount']].copy()
# -
# ### Question 1-2
feaureSet = ['trip_distance', 'passenger_count', 'Duration']
for feaure in feaureSet:
meanJan = dataJan[feaure].mean()
meanJune = dataJune[feaure].mean()
printOutput([feaure,meanJan, meanJune], question = '1-2')
# ### Question 1-3
# +
# Optional: before plotting the boxplots, you may remove the outliers
feaureSet = ['trip_distance', 'payment_amount', 'Duration']
for feaure in feaureSet:
plt.clf()
concatInfo = pd.concat([dataJan[feaure], dataJune[feaure]], axis = 1, keys = ['Jan', 'June'])
concatInfo.boxplot()
plt.title(feaure)
plt.show()
# -
#
# pd.concat() concatenates Dataframe objects and can be replaced by the following lines:
#
# concatInfo = pd.DataFrame()
#
# concatInfo['Jan'] = dataJan[c]
#
# concatInfo['June'] = dataJune[c]
#
# ### Question1-4
# +
# The upperbound of the ranges are selected as q3+3*IQR (except for the number of passengers)
# The lower bound of the ranges are selected intuitively
# part 1 of the question
normRange = {'Duration':[1,50], 'trip_distance':[0.1, 10.0], 'payment_amount':[2, 44.0],'passenger_count':[1,5] }
feaureSet = ['Duration','trip_distance','payment_amount','passenger_count']
records = len(dataJan)
for feature in feaureSet:
minR = normRange[feature][0]
maxR = normRange[feature][1]
fMean = dataJan[feature].mean()
# part 2 of the question
lower = dataJan[dataJan[feature]<minR]
higher = dataJan[dataJan[feature]>maxR]
ratio = 100*(len(lower)+len(higher))/records
printOutput([feature, minR, maxR, ratio], question='1-4')
# part 3 of the question
dataJan.loc[lower.index, feature] = fMean
dataJan.loc[higher.index, feature] = fMean
# -
# ### Question2-1
# +
cleanJan = pd.read_csv('clean-january.csv',encoding = 'ISO-8859-1')
tStart = pd.to_datetime(cleanJan['pickup_datetime'], format = '%d/%m/%y %H:%M')
#Alternatively, if using DatetimeIndex
#fmt='%d/%m/%y %H:%M'
#cleanJan['hour']=pd.DatetimeIndex(pd.to_datetime(cleanJan['pickup_datetime'],format=fmt)).hour
#...
cleanJan['weekday'] = tStart.dt.dayofweek # dt.weekday would return the same value
# The day indices start from 0: Monday=0 ...Sunday=6
cleanJan['isweekend'] = [1 if (x>4) else 0 for x in cleanJan['weekday']]
total = cleanJan.shape[0]
weekends=cleanJan.groupby('isweekend').size()[1]
printOutput((weekends/total)*100, question = '2-1')
# -
# ### Question 2-2
# +
def PlotHist(data, title):
names = ['[0-6)','[6,9)','[9,12)','[12,16)','[16,20)','[20,24)']
picture = data['hour'].hist(bins=[0,5.9,8.9,11.9,15.9,19.9,24])
picture.set_title("Volume of taxis during " + title)
picture.set_xlabel("Time")
picture.set_ylabel("Taxi Demand (number of trips)")
picture.set_xticks([3,7.5,10.5,14,18,22])
picture.set_xticklabels(names,rotation=45)
picture.set_ylim([0,15000])
plt.show()
cleanJan ['hour'] = tStart.dt.hour
Weekend = cleanJan.loc[cleanJan['weekday']>4]
Weekday = cleanJan.loc[cleanJan['weekday']<5]
PlotHist(Weekend, 'Weekends')
PlotHist(Weekday, 'Weekdays')
# -
# ### Question 2-3
# +
cleanJan['Duration'] = FindDuration(cleanJan,'pickup_datetime','dropoff_datetime')
cleanJan['income_efficiency']= cleanJan['payment_amount']/cleanJan['Duration']
#income plot by hour (x axis) and day (y axis)
bygroup = cleanJan.groupby('hour')
picture=bygroup['income_efficiency'].mean().plot(kind='bar')
picture.set_ylabel("Income efficiency")
picture.set_xlabel("Hour of the Day")
plt.show()
# -
|
pahse1/Sample 17/Assignment-answers-sample(1).ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="LnnFAp8Ypj1J"
import numpy as np
import pandas as pd
import random
from sklearn.preprocessing import PolynomialFeatures
from sklearn import linear_model
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score,mean_squared_error, mean_absolute_error
# + id="URJxF8JwvERw"
x_dataset = pd.read_csv('/content/drive/MyDrive/Colab Notebooks/ML/43_x.csv',names=['x1','x2','x3','x4','x5'], header=None)
y_dataset = pd.read_csv('/content/drive/MyDrive/Colab Notebooks/ML/43_y.csv', names = ['Y'], header=None)
# + id="qYb4wVPBv3iL"
tempx1 = x_dataset.loc[:,'x1']
# + colab={"base_uri": "https://localhost:8080/", "height": 424} id="U_liM0im6sNd" outputId="27b62bea-2a66-49f4-f65b-fd42b388106a"
y_dataset
# + colab={"base_uri": "https://localhost:8080/"} id="uJFPoC40w4Cu" outputId="eaf2d732-cafc-4542-de55-6d5d7e487b80"
#Details of the data
print("First dataset: ", x_dataset.shape,"\nSecond dataset:", y_dataset.shape)
# + colab={"base_uri": "https://localhost:8080/", "height": 319} id="ecrT0C8qxbCG" outputId="b0b53caa-5413-4880-9569-e843aa7e7682"
#Stat for each dataset
print("First dataset")
x_dataset.describe()
# + colab={"base_uri": "https://localhost:8080/", "height": 319} id="7e6ds7qJyEgx" outputId="3cec221a-92d0-41ae-ecd2-9adf0eed7010"
print("Second dataset: ")
y_dataset.describe()
# + colab={"base_uri": "https://localhost:8080/", "height": 206} id="jw4k5AfeyM1o" outputId="99909129-02e6-4159-844a-94fc43a7ae96"
#Correlation
x_dataset.corr()
# + colab={"base_uri": "https://localhost:8080/", "height": 81} id="VEb_9er4yQtA" outputId="1417544b-f350-46a3-9973-11504f00c8d4"
#Correlation
y_dataset.corr()
# + colab={"base_uri": "https://localhost:8080/", "height": 206} id="TC8ew5fL0zri" outputId="1fe8788d-d132-47d6-a19e-d062819951ae"
x_dataset.head()
# + colab={"base_uri": "https://localhost:8080/"} id="wuSK7UsN2IZl" outputId="c1806810-1cda-4bae-da04-741bf62bcead"
reg = LinearRegression() #создаем модель
reg.fit(pd.DataFrame(tempx1), y_dataset) #Обучение
# + colab={"base_uri": "https://localhost:8080/"} id="Z5Zny6Jl2RLT" outputId="419ea2cd-5dee-4972-c5cb-74dbca64bc0e"
reg = linear_model.LinearRegression() #создаем модель
reg.fit(x_dataset, y_dataset) #Обучение
# + colab={"base_uri": "https://localhost:8080/"} id="oZqM4X6n2UDA" outputId="ffad7691-70fa-409b-92d9-79b2a0b9f913"
print('Коэффициенты: ', reg.coef_)
print('Свободный член: ', reg.intercept_)
# + colab={"base_uri": "https://localhost:8080/"} id="cU9YE6pO2oCx" outputId="8597e258-ebe9-4949-e399-f6512314fa63"
print('Предсказали: \n', reg.predict(pd.DataFrame(tempx1).iloc[0:5]),"\n")
print('Реально: \n', y_dataset.iloc[0:5])
# + colab={"base_uri": "https://localhost:8080/"} id="VhP3ilxA2eY-" outputId="a525ddfc-601b-4046-aae5-0f1d5ea81c33"
print('Предсказали: \n', reg.predict(x_dataset.iloc[0:5]))
print('Реально: \n', y_dataset.iloc[0:5])
# + colab={"base_uri": "https://localhost:8080/"} id="9v-pvGBT72ok" outputId="d1e96130-d1e1-40cf-fe80-4f9600f6640e"
Ymod =-0.65338473 + 30.55841059 * tempx1
Ymod
# + id="yXWwxoSAzrfu"
# Devide datasets
x_train, x_test, y_train, y_test = train_test_split(x_dataset, y_dataset, test_size=0.2, random_state=123)
# + id="EzYCNbWC1Qcj"
reg2 = LinearRegression().fit(x_train, y_train) #создали и обучили
# + colab={"base_uri": "https://localhost:8080/"} id="R7DJNtDz2ZuC" outputId="d83ccca4-3ede-4ccf-ffe1-febd7f7265a5"
print('Коэффициенты: ', reg2.coef_)
print('Свободный член: ', reg2.intercept_)
# + colab={"base_uri": "https://localhost:8080/"} id="Zadn4Qln1fxN" outputId="d8554de2-150c-4566-e88f-ed636bac75b3"
#оценка
print('Модель, которая обучалась на 80% выборки: ', reg2.score(x_test, y_test))
#вторая оценка для сравнения
print('Модель, которая обучалась на всей выборке: ', reg.score(x_test, y_test))
# + colab={"base_uri": "https://localhost:8080/", "height": 265} id="LEhwQu6M4NLu" outputId="f35c9119-fab2-4465-99fc-ec151d0f0547"
plt.figure()
plt.scatter(np.arange(0, len(x_dataset), 1), reg2.predict(x_dataset), color='g') #предсказано
plt.scatter(np.arange(0, len(x_dataset), 1), y_dataset, color='b') #реальные данные
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 265} id="Kq0zyvhH4hqA" outputId="5fd7ee9e-21e7-4d67-fa86-753dcc40ad54"
plt.figure()
plt.scatter(np.arange(0, len(x_dataset), 1), reg2.predict(x_dataset), color='g') #предсказано
plt.plot(np.arange(0, len(x_dataset), 1), y_dataset, color='b') #реальные данные
plt.show()
# + colab={"base_uri": "https://localhost:8080/"} id="cSktDLON2we2" outputId="2a5b44e9-a255-4e13-b6a7-42d8122cb0c9"
#посмотрим конкретные примеры предсказания, убедимся, что точность довольно-таки низкая
print('Реально: ', y_dataset.iloc[0:5])
print('Предсказано: ', reg2.predict(x_dataset.iloc[0:5]))
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="jn10STBe2_Jc" outputId="97d6f947-6d7d-40d6-a19e-ad8d9e1b620c"
regs_list = [] #здесь будем хранить все парные регрессии (4 всего)
#в цикле проходимся по каждому столбцу обучающей выборки для построения парной регресии (с целевым признаком)
for col in x_train.columns:
#print('ПО СТОЛБЦУ НОМЕР: ', col + 1, 'из 4')
new_reg = linear_model.LinearRegression().fit(x_train[col].values.reshape((-1, 1)), y_train) #создали модель
print('Score: ', new_reg.score(x_test[col].values.reshape((-1,1)), y_test)) #выводим score на тестовой выборке
print('Коэффициент: ', new_reg.coef_)
regs_list.append(new_reg) #сохраняем в список модель
plt.figure() #строим график
plt.scatter(x_dataset[col], y_dataset, color='r') #реальные данные
plt.plot(x_dataset[col], new_reg.predict(x_dataset[col].values.reshape((-1, 1))), color='g') #предсказанные
plt.show()
#парочка примеров с предсказанием для наглядности
print('Примеры предсказания:')
empt_data = {'Реально':[],
'Предсказано':[]}
some_exmpl = pd.DataFrame(empt_data, columns = ['Реально', 'Предсказано'])
some_exmpl['Реально'] = y_dataset[0:5]
some_exmpl['Предсказано'] = new_reg.predict(x_dataset[col].iloc[0:5].values.reshape((-1,1)))
print(some_exmpl)
print('\n\n\n')
# + [markdown] id="YfM0azzI5GAe"
# Мы видим, что ни одна из парных регрессий, представленных выше, не справляется со своей задачей на должном уровне. Об этом говорит низкий score(), большой разброс точек с реальными данными относительно прямой предсказания, большой промах по предсказанным значениям на конкретных примерах. Множественная регрессия (та, что выше) справляется намного лучше.
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="rZH10wNr4_C0" outputId="3a6b66f3-d6d4-4fd1-f7d8-79a9d5288f39"
#снова проходимся по всем нецелевым признакам для полинома
#будем сразу строить всех трех порядков за одну итерацию
for col in x_train.columns:
poly2 = PolynomialFeatures(degree = 2)
poly3 = PolynomialFeatures(degree = 3)
poly10 = PolynomialFeatures(degree = 10)
x_poly2 = poly2.fit_transform(x_train[col].values.reshape((-1, 1))) #полином 2-ого порядка
new_reg2 = linear_model.LinearRegression().fit(x_poly2, y_train)
x_poly3 = poly3.fit_transform(x_train[col].values.reshape((-1, 1))) #полином 3-ого порядка
new_reg3 = linear_model.LinearRegression().fit(x_poly3, y_train)
x_poly10 = poly10.fit_transform(x_train[col].values.reshape((-1, 1))) #полином 10-ого порядка
new_reg10 = linear_model.LinearRegression().fit(x_poly10, y_train)
#что по кэфам и score()? + графики
print('ВТОРОГО ПОРЯДКА\nscore()=',new_reg2.score(poly2.fit_transform(x_test[col].values.reshape((-1, 1))), y_test), '\nкоэффициенты=', new_reg2.coef_, '\nсвободный=', new_reg2.intercept_)
plt.figure()
plt.scatter(x_dataset[col], y_dataset, color='r')
plt.plot(x_dataset[col].sort_values(), new_reg2.predict(poly2.fit_transform(x_dataset[col].sort_values().values.reshape((-1, 1)))), color='g')
plt.show()
print('ТРЕТЬЕГО ПОРЯДКА\nscore()=',new_reg3.score(poly3.fit_transform(x_test[col].values.reshape((-1, 1))), y_test), '\nкоэффициенты=', new_reg3.coef_, '\nсвободный=', new_reg3.intercept_)
plt.figure()
plt.scatter(x_dataset[col], y_dataset, color='r')
plt.plot(x_dataset[col].sort_values(), new_reg3.predict(poly3.fit_transform(x_dataset[col].sort_values().values.reshape((-1, 1)))), color='g')
plt.show()
print('ДЕСЯТОГО ПОРЯДКА\nscore()=',new_reg10.score(poly10.fit_transform(x_test[col].values.reshape((-1, 1))), y_test), '\nкоэффициенты=', new_reg10.coef_, '\nсвободный=', new_reg10.intercept_)
plt.figure()
plt.scatter(x_dataset[col], y_dataset, color='r')
plt.plot(x_dataset[col].sort_values(), new_reg10.predict(poly10.fit_transform(x_dataset[col].sort_values().values.reshape((-1, 1)))), color='g')
plt.show()
print('\n\n\n')
# + [markdown] id="uERk6ycN5mka"
# Какой можно сделать вывод про полиномиальные регрессии?
# Она, так же как и парная регрессия, просто неконкурентноспособна по сравнению со множественной регрессией. Score() довольно мал даже у регрессии 10-ого порядка. Из всей работы очень просто сделать вывод - множественная регрессия является лучшим решением, т.к. все нецелевые признаки в своей совокупности оказывают влияние на целевой признак, и это нельзя не учитывать.
# Однако данный вывод делается конкретно для этого случая, т.е. для данной выборки. Ведь в этом и есть наша работа - подобрать такую регрессию, которая сможет наиболее точно выполнять предиктивные функции.
|
ML/ML_Sem_6.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Face detection con OpenCV
#
# Una de las viejas formas (2001), Para hacer una deteccion de Rostros es usar un clasificador: Haar cascade; este tipo de clasificador es usado en OpenCV para encontrar features y en una imagen siguiendo un proceso de cascada para encontrar rostros
#
#
# [Aqui pueden encontar el articulo original](https://www.cs.cmu.edu/~efros/courses/LBMV07/Papers/viola-cvpr-01.pdf).
#
# Librerias
# %matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import cv2
# +
# Cargar la imagen a color
image = cv2.imread('images/ironman_last_night.png')
#image = cv2.imread('images/avengers.jpg')
#image = cv2.imread('images/drake_meme.jpg')
#image = cv2.imread('images/people.jpg')
# convert to RBG
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
plt.figure(figsize=(20,10))
plt.imshow(image)
# -
# Para implementar un detector de rostros, lo primero que haremos es convertir la imagen a escala de grises. Esto es totalmente compatible puesto que para identificar un rostro solo es necesario tomar encuenta la estructura del rostro y no su color.
#
# +
# Convertir a escala de grises
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
plt.figure(figsize=(20,10))
plt.imshow(gray, cmap='gray')
# -
# Ahora cargaremos un modelo prevamente construido que se encuentra en `files/ haarcascade_frontalface_default.xml` y usarlo para identificar los rostros
#
# **Descripcion de parametros**
#
# El numero de imagenes detectadas es definido por: `detectMultiScale` este se utiliza para detectar diferentes tamaños de imagenes. Los parametros para esta función son los siguientes: `(image, scaleFactor, minNeighbors)`; Por lo regular se detectaran mas imagenes con valores pequeños en el `scaleFactor` y valores menores para `minNeighbors`, Pero al incrementar estos valores se obtendra un mejor performance. Modificaremos estos valores para ajustarce a las imagenes de entrada.
# +
# Cargar modelo
face_cascade = cv2.CascadeClassifier('files/haarcascade_frontalface_default.xml')
# Ejecutar modelo sobre la imagen en escala de grises
faces = face_cascade.detectMultiScale(gray, 4, 6)
#faces = face_cascade.detectMultiScale(gray, 1.11, 2)
# -
# Esta funcion retornara un array de detecciones, coordenadas que definen la posicion y el tamaño de cuadros que rodean los rostros.
print ('Se han encontrado ' + str(len(faces)) + ' rostros en esta imagen')
print ("Las coordenadas y tamaños son los siguientes")
print ('=============================')
print (faces)
# ### Ahora procederemos a dibujar estos cuadros sobre la imagen original
# +
img_with_detections = np.copy(image) # Hacemos una copia de la imagen original
# Iterar sobre las caras encontradas
for (x,y,w,h) in faces:
# Dibujamos los cuadros sobre la imagen original
# El cuarto elemento describe el color en RGB
# Y el ultimo parametro es utilizado para definir el grosor de la linea
cv2.rectangle(img_with_detections,(x,y),(x+w,y+h),(255,0,0),10)
plt.figure(figsize=(20,10))
plt.imshow(img_with_detections)
# -
from IPython.display import Audio,Image, YouTubeVideo
YouTubeVideo(id='zokoTyPjzrI',width=350,height=300)
|
work/.ipynb_checkpoints/face_detection-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false}
# <a href="https://cognitiveclass.ai"><img src = "https://ibm.box.com/shared/static/9gegpsmnsoo25ikkbl4qzlvlyjbgxs5x.png" width = 400> </a>
#
# <h1 align=center><font size = 5>Introduction to Matplotlib and Line Plots</font></h1>
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false}
# ## Introduction
#
# The aim of these labs is to introduce you to data visualization with Python as concrete and as consistent as possible.
# Speaking of consistency, because there is no *best* data visualization library avaiblable for Python - up to creating these labs - we have to introduce different libraries and show their benefits when we are discussing new visualization concepts. Doing so, we hope to make students well-rounded with visualization libraries and concepts so that they are able to judge and decide on the best visualization technique and tool for a given problem _and_ audience.
#
# Please make sure that you have completed the prerequisites for this course, namely [**Python for Data Science**](https://cognitiveclass.ai/courses/python-for-data-science/).
#
# **Note**: The majority of the plots and visualizations will be generated using data stored in *pandas* dataframes. Therefore, in this lab, we provide a brief crash course on *pandas*. However, if you are interested in learning more about the *pandas* library, detailed description and explanation of how to use it and how to clean, munge, and process data stored in a *pandas* dataframe are provided in our course [**Data Analysis with Python**](https://cognitiveclass.ai/courses/data-analysis-python/).
#
# ------------
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false}
# ## Table of Contents
#
# <div class="alert alert-block alert-info" style="margin-top: 20px">
#
# 1. [Exploring Datasets with *pandas*](#0)<br>
# 2. [The Dataset: Immigration to Canada from 1980 to 2013](#2)<br>
# 3. [*pandas* Basics](#4) <br>
# 4. [*pandas* Intermediate: Indexing and Selection](#6) <br>
# 5. [Visualizing Data using Matplotlib](#8) <br>
# 6. [Matplotlib: Standard Python Visualization Library](#10) <br>
# 7. [Line Plots](#12)
#
# </div>
# <hr>
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false}
# # Exploring Datasets with *pandas* <a id="0"></a>
#
# *pandas* is an essential data analysis toolkit for Python. From their [website](http://pandas.pydata.org/):
# >*pandas* is a Python package providing fast, flexible, and expressive data structures designed to make working with “relational” or “labeled” data both easy and intuitive. It aims to be the fundamental high-level building block for doing practical, **real world** data analysis in Python.
#
# The course heavily relies on *pandas* for data wrangling, analysis, and visualization. We encourage you to spend some time and familiarize yourself with the *pandas* API Reference: http://pandas.pydata.org/pandas-docs/stable/api.html.
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false}
# ## The Dataset: Immigration to Canada from 1980 to 2013 <a id="2"></a>
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false}
# Dataset Source: [International migration flows to and from selected countries - The 2015 revision](http://www.un.org/en/development/desa/population/migration/data/empirical2/migrationflows.shtml).
#
# The dataset contains annual data on the flows of international immigrants as recorded by the countries of destination. The data presents both inflows and outflows according to the place of birth, citizenship or place of previous / next residence both for foreigners and nationals. The current version presents data pertaining to 45 countries.
#
# In this lab, we will focus on the Canadian immigration data.
#
# <img src = "https://ibm.box.com/shared/static/mb48k9fiylkd7z3a21cq38xxfy1wni2y.png" align="center" width=900>
#
# For sake of simplicity, Canada's immigration data has been extracted and uploaded to one of IBM servers. You can fetch the data from [here](https://ibm.box.com/shared/static/lw190pt9zpy5bd1ptyg2aw15awomz9pu.xlsx).
#
# ---
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false}
# ## *pandas* Basics<a id="4"></a>
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false}
# The first thing we'll do is import two key data analysis modules: *pandas* and **Numpy**.
# + button=false deletable=true new_sheet=false run_control={"read_only": false}
import numpy as np # useful for many scientific computing in Python
import pandas as pd # primary data structure library
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false}
# Let's download and import our primary Canadian Immigration dataset using *pandas* `read_excel()` method. Normally, before we can do that, we would need to download a module which *pandas* requires to read in excel files. This module is **xlrd**. For your convenience, we have pre-installed this module, so you would not have to worry about that. Otherwise, you would need to run the following line of code to install the **xlrd** module:
# ```
# # # !conda install -c anaconda xlrd --yes
# ```
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false}
# Now we are ready to read in our data.
# + button=false deletable=true jupyter={"outputs_hidden": false} new_sheet=false run_control={"read_only": false}
df_can = pd.read_excel('Canada.xlsx',
sheet_name='Canada by Citizenship',
skiprows=range(20),
skipfooter=2,)
print('Data read into a pandas dataframe!')
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false}
# Let's view the top 5 rows of the dataset using the `head()` function.
# + button=false deletable=true jupyter={"outputs_hidden": false} new_sheet=false run_control={"read_only": false}
df_can.head()
# tip: You can specify the number of rows you'd like to see as follows: df_can.head(10)
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false}
# We can also veiw the bottom 5 rows of the dataset using the `tail()` function.
# + button=false deletable=true jupyter={"outputs_hidden": false} new_sheet=false run_control={"read_only": false}
df_can.tail()
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false}
# When analyzing a dataset, it's always a good idea to start by getting basic information about your dataframe. We can do this by using the `info()` method.
# + button=false deletable=true jupyter={"outputs_hidden": false} new_sheet=false run_control={"read_only": false}
df_can.info()
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false}
# To get the list of column headers we can call upon the dataframe's `.columns` parameter.
# + button=false deletable=true jupyter={"outputs_hidden": false} new_sheet=false run_control={"read_only": false}
df_can.columns.values # returns an numpy array
# -
df_can.columns
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false}
# Similarly, to get the list of indices we use the `.index` parameter.
# + button=false deletable=true jupyter={"outputs_hidden": false} new_sheet=false run_control={"read_only": false}
df_can.index.values # returns a numpy array
# -
df_can.index # returns a range object
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false}
# Note: The default type of index and columns is NOT list.
# + button=false deletable=true jupyter={"outputs_hidden": false} new_sheet=false run_control={"read_only": false}
print(type(df_can.columns))
print(type(df_can.index))
# -
print(type(df_can.columns.values))
print(type(df_can.index.values))
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false}
# To get the index and columns as lists, we can use the `tolist()` method.
# + button=false deletable=true jupyter={"outputs_hidden": false} new_sheet=false run_control={"read_only": false}
df_can.columns.tolist()
df_can.index.tolist()
print (type(df_can.columns.tolist()))
print (type(df_can.index.tolist()))
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false}
# To view the dimensions of the dataframe, we use the `.shape` parameter.
# + button=false deletable=true jupyter={"outputs_hidden": false} new_sheet=false run_control={"read_only": false}
# size of dataframe (rows, columns)
df_can.shape
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false}
# Note: The main types stored in *pandas* objects are *float*, *int*, *bool*, *datetime64[ns]* and *datetime64[ns, tz] (in >= 0.17.0)*, *timedelta[ns]*, *category (in >= 0.15.0)*, and *object* (string). In addition these dtypes have item sizes, e.g. int64 and int32.
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false}
# Let's clean the data set to remove a few unnecessary columns. We can use *pandas* `drop()` method as follows:
# + button=false deletable=true jupyter={"outputs_hidden": false} new_sheet=false run_control={"read_only": false}
# in pandas axis=0 represents rows (default) and axis=1 represents columns.
df_can.drop(['AREA','REG','DEV','Type','Coverage'], axis=1, inplace=True)
df_can.head(2)
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false}
# Let's rename the columns so that they make sense. We can use `rename()` method by passing in a dictionary of old and new names as follows:
# + button=false deletable=true jupyter={"outputs_hidden": false} new_sheet=false run_control={"read_only": false}
df_can.rename(columns={'OdName':'Country', 'AreaName':'Continent', 'RegName':'Region'}, inplace=True)
df_can.columns
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false}
# We will also add a 'Total' column that sums up the total immigrants by country over the entire period 1980 - 2013, as follows:
# + button=false deletable=true new_sheet=false run_control={"read_only": false}
df_can['Total'] = df_can.sum(axis=1)
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false}
# We can check to see how many null objects we have in the dataset as follows:
# + button=false deletable=true jupyter={"outputs_hidden": false} new_sheet=false run_control={"read_only": false}
df_can.isnull().sum()
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false}
# Finally, let's view a quick summary of each column in our dataframe using the `describe()` method.
# + button=false deletable=true jupyter={"outputs_hidden": false} new_sheet=false run_control={"read_only": false}
df_can.describe()
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false}
# ---
# ## *pandas* Intermediate: Indexing and Selection (slicing)<a id="6"></a>
#
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false}
# ### Select Column
# **There are two ways to filter on a column name:**
#
# Method 1: Quick and easy, but only works if the column name does NOT have spaces or special characters.
# ```python
# df.column_name
# (returns series)
# ```
#
# Method 2: More robust, and can filter on multiple columns.
#
# ```python
# df['column']
# (returns series)
# ```
#
# ```python
# df[['column 1', 'column 2']]
# (returns dataframe)
# ```
# ---
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false}
# Example: Let's try filtering on the list of countries ('Country').
# + button=false deletable=true jupyter={"outputs_hidden": false} new_sheet=false run_control={"read_only": false}
df_can.Country # returns a series
# -
df_can['Country']
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false}
# Let's try filtering on the list of countries ('OdName') and the data for years: 1980 - 1985.
# + button=false deletable=true jupyter={"outputs_hidden": false} new_sheet=false run_control={"read_only": false}
df_can[['Country', 1980, 1981, 1982, 1983, 1984, 1985]] # returns a dataframe
# notice that 'Country' is string, and the years are integers.
# for the sake of consistency, we will convert all column names to string later on.
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false}
# ### Select Row
#
# There are main 3 ways to select rows:
#
# ```python
# df.loc[label]
# #filters by the labels of the index/column
# df.iloc[index]
# #filters by the positions of the index/column
# ```
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false}
# Before we proceed, notice that the default index of the dataset is a numeric range from 0 to 194. This makes it very difficult to do a query by a specific country. For example to search for data on Japan, we need to know the corressponding index value.
#
# This can be fixed very easily by setting the 'Country' column as the index using `set_index()` method.
# + button=false deletable=true new_sheet=false run_control={"read_only": false}
df_can.set_index('Country', inplace=True)
# tip: The opposite of set is reset. So to reset the index, we can use df_can.reset_index()
# + button=false deletable=true jupyter={"outputs_hidden": false} new_sheet=false run_control={"read_only": false}
df_can.head(3)
# -
df_can.index.name # check the name of the index
# + button=false deletable=true new_sheet=false run_control={"read_only": false} active=""
# # optional: to remove the name of the index
# df_can.index.name = None
# df_can.index.name
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false}
# Example: Let's view the number of immigrants from Japan (row 87) for the following scenarios:
# 1. The full row data (all columns)
# 2. For year 2013
# 3. For years 1980 to 1985
# + button=false deletable=true jupyter={"outputs_hidden": false} new_sheet=false run_control={"read_only": false}
# 1. the full row data (all columns)
print(df_can.loc['Japan'])
# -
# alternate method
print(df_can.iloc[87])
# alternate method
print(df_can[df_can.index == 'Japan'].T.squeeze())
# + button=false deletable=true jupyter={"outputs_hidden": false} new_sheet=false run_control={"read_only": false}
# 2. for year 2013
print(df_can.loc['Japan', 2013])
# -
# alternate method
print(df_can.iloc[87, 36]) # year 2013 is the last column, with a positional index of 36
# + button=false deletable=true jupyter={"outputs_hidden": false} new_sheet=false run_control={"read_only": false}
# 3. for years 1980 to 1985
print(df_can.loc['Japan', [1980, 1981, 1982, 1983, 1984, 1985]])
# -
# alternate method
print(df_can.iloc[87, [3, 4, 5, 6, 7, 8]])
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false}
# Column names that are integers (such as the years) might introduce some confusion. For example, when we are referencing the year 2013, one might confuse that when the 2013th positional index.
#
# To avoid this ambuigity, let's convert the column names into strings: '1980' to '2013'.
# + button=false deletable=true new_sheet=false run_control={"read_only": false}
df_can.columns = list(map(str, df_can.columns))
[print (type(x)) for x in df_can.columns.values] # list comprehension
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false}
# Since we converted the years to string, let's declare a variable that will allow us to easily call upon the full range of years:
# + button=false deletable=true jupyter={"outputs_hidden": false} new_sheet=false run_control={"read_only": false}
# useful for plotting later on
years = list(map(str, range(1980, 2014)))
years
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false}
# ### Filtering based on a criteria
# To filter the dataframe based on a condition, we simply pass the condition as a boolean vector.
#
# For example, Let's filter the dataframe to show the data on Asian countries (AreaName = Asia).
# + button=false deletable=true jupyter={"outputs_hidden": false} new_sheet=false run_control={"read_only": false}
# 1. create the condition boolean series
condition = df_can['Continent'] == 'Asia'
print (condition)
# + button=false deletable=true jupyter={"outputs_hidden": false} new_sheet=false run_control={"read_only": false}
# 2. pass this condition into the dataFrame
df_can[condition]
# + button=false deletable=true jupyter={"outputs_hidden": false} new_sheet=false run_control={"read_only": false}
# we can pass mutliple criteria in the same line.
# let's filter for AreaNAme = Asia and RegName = Southern Asia
df_can[(df_can['Continent']=='Asia') & (df_can['Region']=='Southern Asia')]
# note: When using 'and' and 'or' operators, pandas requires we use '&' and '|' instead of 'and' and 'or'
# don't forget to enclose the two conditions in parentheses
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false}
# Before we proceed: let's review the changes we have made to our dataframe.
# + button=false deletable=true jupyter={"outputs_hidden": false} new_sheet=false run_control={"read_only": false}
print ('data dimensions:', df_can.shape)
print(df_can.columns)
df_can.head(2)
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false}
# ---
# # Visualizing Data using Matplotlib<a id="8"></a>
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false}
# ## Matplotlib: Standard Python Visualization Library<a id="10"></a>
#
# The primary plotting library we will explore in the course is [Matplotlib](http://matplotlib.org/). As mentioned on their website:
# >Matplotlib is a Python 2D plotting library which produces publication quality figures in a variety of hardcopy formats and interactive environments across platforms. Matplotlib can be used in Python scripts, the Python and IPython shell, the jupyter notebook, web application servers, and four graphical user interface toolkits.
#
# If you are aspiring to create impactful visualization with python, Matplotlib is an essential tool to have at your disposal.
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false}
# ### Matplotlib.Pyplot
#
# One of the core aspects of Matplotlib is `matplotlib.pyplot`. It is Matplotlib's scripting layer which we studied in details in the videos about Matplotlib. Recall that it is a collection of command style functions that make Matplotlib work like MATLAB. Each `pyplot` function makes some change to a figure: e.g., creates a figure, creates a plotting area in a figure, plots some lines in a plotting area, decorates the plot with labels, etc. In this lab, we will work with the scripting layer to learn how to generate line plots. In future labs, we will get to work with the Artist layer as well to experiment first hand how it differs from the scripting layer.
#
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false}
# Let's start by importing `Matplotlib` and `Matplotlib.pyplot` as follows:
# + button=false deletable=true jupyter={"outputs_hidden": false} new_sheet=false run_control={"read_only": false}
# we are using the inline backend
# %matplotlib inline
import matplotlib as mpl
import matplotlib.pyplot as plt
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false}
# *optional: check if Matplotlib is loaded.
# + button=false deletable=true jupyter={"outputs_hidden": false} new_sheet=false run_control={"read_only": false}
print ('Matplotlib version: ', mpl.__version__)
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false}
# *optional: apply a style to Matplotlib.
# + button=false deletable=true jupyter={"outputs_hidden": false} new_sheet=false run_control={"read_only": false}
print(plt.style.available)
# -
mpl.style.use(['ggplot']) # optional: for ggplot-like style
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false}
# ### Plotting in *pandas*
#
# Fortunately, pandas has a built-in implementation of Matplotlib that we can use. Plotting in *pandas* is as simple as appending a `.plot()` method to a series or dataframe.
#
# Documentation:
# - [Plotting with Series](http://pandas.pydata.org/pandas-docs/stable/api.html#plotting)<br>
# - [Plotting with Dataframes](http://pandas.pydata.org/pandas-docs/stable/api.html#api-dataframe-plotting)
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false}
# # Line Plots (Series/Dataframe) <a id="12"></a>
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false}
# **What is a line plot and why use it?**
#
# A line chart or line plot is a type of plot which displays information as a series of data points called 'markers' connected by straight line segments. It is a basic type of chart common in many fields.
# Use line plot when you have a continuous data set. These are best suited for trend-based visualizations of data over a period of time.
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false}
# **Let's start with a case study:**
#
# In 2010, Haiti suffered a catastrophic magnitude 7.0 earthquake. The quake caused widespread devastation and loss of life and about three million people were affected by this natural disaster. As part of Canada's humanitarian effort, the Government of Canada stepped up its effort in accepting refugees from Haiti. We can quickly visualize this effort using a `Line` plot:
#
# **Question:** Plot a line graph of immigration from Haiti using `df.plot()`.
#
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false}
# First, we will extract the data series for Haiti.
# + button=false deletable=true jupyter={"outputs_hidden": false} new_sheet=false run_control={"read_only": false}
haiti = df_can.loc['Haiti', years] # passing in years 1980 - 2013 to exclude the 'total' column
haiti.head()
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false}
# Next, we will plot a line plot by appending `.plot()` to the `haiti` dataframe.
# + button=false deletable=true jupyter={"outputs_hidden": false} new_sheet=false run_control={"read_only": false}
haiti.plot()
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false}
# *pandas* automatically populated the x-axis with the index values (years), and the y-axis with the column values (population). However, notice how the years were not displayed because they are of type *string*. Therefore, let's change the type of the index values to *integer* for plotting.
#
# Also, let's label the x and y axis using `plt.title()`, `plt.ylabel()`, and `plt.xlabel()` as follows:
# + button=false deletable=true jupyter={"outputs_hidden": false} new_sheet=false run_control={"read_only": false}
haiti.index = haiti.index.map(int) # let's change the index values of Haiti to type integer for plotting
haiti.plot(kind='line')
plt.title('Immigration from Haiti')
plt.ylabel('Number of immigrants')
plt.xlabel('Years')
plt.show() # need this line to show the updates made to the figure
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false}
# We can clearly notice how number of immigrants from Haiti spiked up from 2010 as Canada stepped up its efforts to accept refugees from Haiti. Let's annotate this spike in the plot by using the `plt.text()` method.
# + button=false deletable=true jupyter={"outputs_hidden": false} new_sheet=false run_control={"read_only": false}
haiti.plot(kind='line')
plt.title('Immigration from Haiti')
plt.ylabel('Number of Immigrants')
plt.xlabel('Years')
# annotate the 2010 Earthquake.
# syntax: plt.text(x, y, label)
plt.text(2000, 6000, '2010 Earthquake') # see note below
plt.show()
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false}
# With just a few lines of code, you were able to quickly identify and visualize the spike in immigration!
#
# Quick note on x and y values in `plt.text(x, y, label)`:
#
# Since the x-axis (years) is type 'integer', we specified x as a year. The y axis (number of immigrants) is type 'integer', so we can just specify the value y = 6000.
#
# ```python
# plt.text(2000, 6000, '2010 Earthquake') # years stored as type int
# ```
# If the years were stored as type 'string', we would need to specify x as the index position of the year. Eg 20th index is year 2000 since it is the 20th year with a base year of 1980.
# ```python
# plt.text(20, 6000, '2010 Earthquake') # years stored as type int
# ```
# We will cover advanced annotation methods in later modules.
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false}
# We can easily add more countries to line plot to make meaningful comparisons immigration from different countries.
#
# **Question:** Let's compare the number of immigrants from India and China from 1980 to 2013.
#
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false}
# Step 1: Get the data set for China and India, and display dataframe.
# + button=false deletable=true jupyter={"outputs_hidden": false} new_sheet=false run_control={"read_only": false}
### type your answer here
df_CI = df_can.loc[['China', 'India'], years]
df_CI.head()
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false}
# Double-click __here__ for the solution.
# <!-- The correct answer is:
# df_CI = df_can.loc[['India', 'China'], years]
# df_CI.head()
# -->
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false}
# Step 2: Plot graph. We will explicitly specify line plot by passing in `kind` parameter to `plot()`.
# + button=false deletable=true jupyter={"outputs_hidden": false} new_sheet=false run_control={"read_only": false}
### type your answer here
df_CI.plot(kind = 'line')
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false}
# Double-click __here__ for the solution.
# <!-- The correct answer is:
# df_CI.plot(kind='line')
# -->
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false}
# That doesn't look right...
#
# Recall that *pandas* plots the indices on the x-axis and the columns as individual lines on the y-axis. Since `df_CI` is a dataframe with the `country` as the index and `years` as the columns, we must first transpose the dataframe using `transpose()` method to swap the row and columns.
# + button=false deletable=true jupyter={"outputs_hidden": false} new_sheet=false run_control={"read_only": false}
df_CI = df_CI.transpose()
df_CI.head()
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false}
# *pandas* will automatically graph the two countries on the same graph. Go ahead and plot the new transposed dataframe. Make sure to add a title to the plot and label the axes.
# + button=false deletable=true jupyter={"outputs_hidden": false} new_sheet=false run_control={"read_only": false}
### type your answer here
df_CI.plot(kind = 'line')
plt.title('Immigrants from China and India')
plt.ylabel('Number of immigrants')
plt.xlabel('Years')
plt.show()
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false}
# Double-click __here__ for the solution.
# <!-- The correct answer is:
# df_CI.index = df_CI.index.map(int) # let's change the index values of df_CI to type integer for plotting
# df_CI.plot(kind='line')
# -->
#
# <!--
# plt.title('Immigrants from China and India')
# plt.ylabel('Number of Immigrants')
# plt.xlabel('Years')
# -->
#
# <!--
# plt.show()
# -->
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false}
# From the above plot, we can observe that the China and India have very similar immigration trends through the years.
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false}
# *Note*: How come we didn't need to transpose Haiti's dataframe before plotting (like we did for df_CI)?
#
# That's because `haiti` is a series as opposed to a dataframe, and has the years as its indices as shown below.
# ```python
# print(type(haiti))
# print(haiti.head(5))
# ```
# >class 'pandas.core.series.Series' <br>
# >1980 1666 <br>
# >1981 3692 <br>
# >1982 3498 <br>
# >1983 2860 <br>
# >1984 1418 <br>
# >Name: Haiti, dtype: int64 <br>
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false}
# Line plot is a handy tool to display several dependent variables against one independent variable. However, it is recommended that no more than 5-10 lines on a single graph; any more than that and it becomes difficult to interpret.
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false}
# **Question:** Compare the trend of top 5 countries that contributed the most to immigration to Canada.
# + button=false deletable=true jupyter={"outputs_hidden": false} new_sheet=false run_control={"read_only": false}
### type your answer here
df_can.sort_values(['Total'], ascending = False, axis = 0, inplace = True)
df_top5 =df_can.head(5)
# -
df_top5 = df_top5[years].transpose()
df_top5
df_top5.index = df_top5.index.map(int)
df_top5.plot(kind='line', figsize=(14, 8))
plt.title('Immigration Trend of Top 5 Countries')
plt.ylabel('Number of Immigrants')
plt.xlabel('Years')
plt.show()
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false}
# Double-click __here__ for the solution.
# <!-- The correct answer is:
# \\ # Step 1: Get the dataset. Recall that we created a Total column that calculates the cumulative immigration by country. \\ We will sort on this column to get our top 5 countries using pandas sort_values() method.
# \\ inplace = True paramemter saves the changes to the original df_can dataframe
# df_can.sort_values(by='Total', ascending=False, axis=0, inplace=True)
# -->
#
# <!--
# # get the top 5 entries
# df_top5 = df_can.head(5)
# -->
#
# <!--
# # transpose the dataframe
# df_top5 = df_top5[years].transpose()
# -->
#
# <!--
# print(df_top5)
# -->
#
# <!--
# \\ # Step 2: Plot the dataframe. To make the plot more readeable, we will change the size using the `figsize` parameter.
# df_top5.index = df_top5.index.map(int) # let's change the index values of df_top5 to type integer for plotting
# df_top5.plot(kind='line', figsize=(14, 8)) # pass a tuple (x, y) size
# -->
#
# <!--
# plt.title('Immigration Trend of Top 5 Countries')
# plt.ylabel('Number of Immigrants')
# plt.xlabel('Years')
# -->
#
# <!--
# plt.show()
# -->
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false}
# ### Other Plots
#
# Congratulations! you have learned how to wrangle data with python and create a line plot with Matplotlib. There are many other plotting styles available other than the default Line plot, all of which can be accessed by passing `kind` keyword to `plot()`. The full list of available plots are as follows:
#
# * `bar` for vertical bar plots
# * `barh` for horizontal bar plots
# * `hist` for histogram
# * `box` for boxplot
# * `kde` or `density` for density plots
# * `area` for area plots
# * `pie` for pie plots
# * `scatter` for scatter plots
# * `hexbin` for hexbin plot
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false}
# ### Thank you for completing this lab!
#
# This notebook was originally created by [<NAME>](https://www.linkedin.com/in/jayrajasekharan) with contributions from [<NAME>](https://www.linkedin.com/in/ehsanmkermani), and [<NAME>](https://www.linkedin.com/in/slobodan-markovic).
#
# This notebook was recently revised by [<NAME>](https://www.linkedin.com/in/aklson/). I hope you found this lab session interesting. Feel free to contact me if you have any questions!
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false}
# This notebook is part of the free course on **Cognitive Class** called *Data Visualization with Python*. If you accessed this notebook outside the course, you can take this free self-paced course online by clicking [here](https://cocl.us/DV0101EN_Lab1).
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false}
# <hr>
# Copyright © 2018 [Cognitive Class](https://cognitiveclass.ai/?utm_source=bducopyrightlink&utm_medium=dswb&utm_campaign=bdu). This notebook and its source code are released under the terms of the [MIT License](https://bigdatauniversity.com/mit-license/).
|
data_visualization_with_python/notebooks/1_matplotlib_intro_and_line_plots.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="r3-krnDxznLX"
# # Clusterização de dados de amaciamento - Processamento completo
#
# *Versão atual: 3.1*
#
# Mudanças em relação ao v3.0
#
# - Correção na indexação de ensaios
# - Aprimoramento na documentação de algumas funções
# - Adição de análise silhouette
#
# Mudanças em relação ao v2.0
#
# - Usando novos dados processados em Abril, que incluem o RMS e Curtose dos filtros lowpass, bandpass e highpass para os dados de Vibração Calota Inferior
#
# Mudanças em relação ao v1.0
#
# - Usados novos dados com outros tratamentos do thaler
# - Usadas colunas com tratamento de variância
# - Aumentado o tempo máximo usado no treinamento de 19 horas pra 21 horas e mínimo para 1 hora
# - Incluido as colunas CorrenteVariancia , VibracaoCalotaInferiorVariancia , VibracaoCalotaSuperiorVariancia, Vazao no treinamento
# - Aplicado StandardScaler (desvio padrão 1, média 0) no fim do tratamento
# - Exportado tabela para cada amostra com os resultados dos clusters para cada feature treinada
# + colab={"base_uri": "https://localhost:8080/"} id="P4tUuP2qJYYQ" outputId="1ca1851d-3042-4cd0-d023-7c63812b11c5"
# from google.colab import drive
# drive.mount('/content/drive')
# + colab={"base_uri": "https://localhost:8080/", "height": 17} id="Nzup51SiPDuL" outputId="06b3585d-468c-44b2-88f4-ff2edfb8754c"
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
# %matplotlib inline
import plotly.graph_objects as go
from sklearn.cluster import KMeans
import numpy as np
from sklearn.preprocessing import StandardScaler, PolynomialFeatures, MinMaxScaler
from sklearn.pipeline import Pipeline
from sklearn.cluster import DBSCAN
from matplotlib import pyplot
import warnings #ignorar mensagem ao rodar uma célula
figsize=(8, 6)
# habilita o modo offline
import plotly
import plotly.offline as py
from plotly.offline import plot, iplot
import plotly.graph_objs as go
plotly.offline.init_notebook_mode(connected=True)
# habilita o renderizador do colab
import plotly.io as pio
pio.renderers
pio.renderers.default = "colab"
# + id="Ymh4RFcVMVz2"
sns.set(palette='bright',color_codes=True,style='whitegrid')
# + id="VOlv2Wi0wJXF"
# a3_1_antigo = pd.read_csv('/content/drive/My Drive/Nicolas/2021_04 - Dados processados/A3_N_2019_12_04tAm2.5.csv')
# a3_2_antigo = pd.read_csv('/content/drive/My Drive/Nicolas/2021_04 - Dados processados/A3_A_2019_12_09tAm11.8.csv')
# a3_1 = pd.read_csv('/content/drive/My Drive/Nicolas/2021_04 - Dados processados/A3_N_2019_12_04tAm2.5.csv')
# a3_2 = pd.read_csv('/content/drive/My Drive/Nicolas/2021_04 - Dados processados/A3_A_2019_12_09tAm11.8.csv')
# a3_3 = pd.read_csv('/content/drive/My Drive/Nicolas/2021_04 - Dados processados/A3_A_2019_12_11tAm2.5.csv')
# a4_1 = pd.read_csv('/content/drive/My Drive/Nicolas/2021_04 - Dados processados/A4_N_2019_12_16tAm2.1.csv')
# a4_2 = pd.read_csv('/content/drive/My Drive/Nicolas/2021_04 - Dados processados/A4_A_2019_12_19tAm6.csv')
# a4_3 = pd.read_csv('/content/drive/My Drive/Nicolas/2021_04 - Dados processados/A4_A_2020_01_06tAm2.5.csv')
# a4_4 = pd.read_csv('/content/drive/My Drive/Nicolas/2021_04 - Dados processados/A4_A_2020_01_13tAm2.5.csv')
# a5_1 = pd.read_csv('/content/drive/My Drive/Nicolas/2021_04 - Dados processados/A5_N_2020_01_22tAm2.5.csv')
# a5_2 = pd.read_csv('/content/drive/My Drive/Nicolas/2021_04 - Dados processados/A5_A_2020_01_27tAm12.5.csv')
# a5_3 = pd.read_csv('/content/drive/My Drive/Nicolas/2021_04 - Dados processados/A5_A_2020_01_28tAm2.5.csv')
# b5_1 = pd.read_csv('/content/drive/My Drive/Nicolas/2021-04 - Dados Processados_Am B/B5_N_2020_10_16tAm0.csv')
# b5_2 = pd.read_csv('/content/drive/My Drive/Nicolas/2021-04 - Dados Processados_Am B/B5_A_2020_10_22tAm0.csv')
# b5_3 = pd.read_csv('/content/drive/My Drive/Nicolas/2021-04 - Dados Processados_Am B/B5_A_2020_10_27tAm0.csv')
# b10_1 = pd.read_csv('/content/drive/My Drive/Nicolas/2021-04 - Dados Processados_Am B/B10_N_2021_03_22tAm0.csv')
# b10_2 = pd.read_csv('/content/drive/My Drive/Nicolas/2021-04 - Dados Processados_Am B/B10_A_2021_03_25tAm0.csv')
# b10_3 = pd.read_csv('/content/drive/My Drive/Nicolas/2021-04 - Dados Processados_Am B/B10_A_2021_03_30tAm0.csv')
# b11_1 = pd.read_csv('/content/drive/My Drive/Nicolas/2021-04 - Dados Processados_Am B/B11_N_2021_04_05tAm0.csv')
# b11_2 = pd.read_csv('/content/drive/My Drive/Nicolas/2021-04 - Dados Processados_Am B/B11_A_2021_04_08tAm0.csv')
# b11_3 = pd.read_csv('/content/drive/My Drive/Nicolas/2021-04 - Dados Processados_Am B/B11_A_2021_04_22tAm0.csv')
# a2 ok
# b7 ok
# b8 ok
# b12 ok
# b15 ok
#NOVO
# a2_1 = pd.read_csv('/content/drive/My Drive/Nicolas/2021-08 - Dados Processados Completo (Dissertação)/A2_N_2019_07_09tAm0.csv')
# a2_2 = pd.read_csv('/content/drive/My Drive/Nicolas/2021-08 - Dados Processados Completo (Dissertação)/A2_A_2019_08_08tAm0.csv')
# a2_3 = pd.read_csv('/content/drive/My Drive/Nicolas/2021-08 - Dados Processados Completo (Dissertação)/A2_A_2019_08_28tAm0.csv')
# a3_1 = pd.read_csv('/content/drive/My Drive/Nicolas/2021-08 - Dados Processados Completo (Dissertação)/A3_N_2019_12_04tAm0.csv')
# a3_2 = pd.read_csv('/content/drive/My Drive/Nicolas/2021-08 - Dados Processados Completo (Dissertação)/A3_A_2019_12_09tAm0.csv')
# a3_3 = pd.read_csv('/content/drive/My Drive/Nicolas/2021-08 - Dados Processados Completo (Dissertação)/A3_A_2019_12_11tAm0.csv')
# a4_1 = pd.read_csv('/content/drive/My Drive/Nicolas/2021-08 - Dados Processados Completo (Dissertação)/A4_N_2019_12_16tAm0.csv')
# a4_2 = pd.read_csv('/content/drive/My Drive/Nicolas/2021-08 - Dados Processados Completo (Dissertação)/A4_A_2019_12_19tAm0.csv')
# a4_3 = pd.read_csv('/content/drive/My Drive/Nicolas/2021-08 - Dados Processados Completo (Dissertação)/A4_A_2020_01_06tAm0.csv')
# a4_4 = pd.read_csv('/content/drive/My Drive/Nicolas/2021-05 - Dados Processados Artigo C_Bias/A4_A_2020_01_13tAm0.csv')
# a5_1 = pd.read_csv('/content/drive/My Drive/Nicolas/2021-08 - Dados Processados Completo (Dissertação)/A5_N_2020_01_22tAm0.csv')
# a5_2 = pd.read_csv('/content/drive/My Drive/Nicolas/2021-08 - Dados Processados Completo (Dissertação)/A5_A_2020_01_27tAm0.csv')
# a5_3 = pd.read_csv('/content/drive/My Drive/Nicolas/2021-08 - Dados Processados Completo (Dissertação)/A5_A_2020_01_28tAm0.csv')
# B5_N_2020_10_16tAm0
# b5_1 = pd.read_csv('/content/drive/My Drive/Nicolas/2021-08 - Dados Processados Completo (Dissertação)/B5_N_2020_10_16tAm0.csv')
# b5_2 = pd.read_csv('/content/drive/My Drive/Nicolas/2021-08 - Dados Processados Completo (Dissertação)/B5_A_2020_10_22tAm0.csv')
# b5_3 = pd.read_csv('/content/drive/My Drive/Nicolas/2021-08 - Dados Processados Completo (Dissertação)/B5_A_2020_10_27tAm0.csv')
# #NOVO
# b7_1 = pd.read_csv('/content/drive/My Drive/Nicolas/2021-08 - Dados Processados Completo (Dissertação)/B7_N_2021_02_05tAm0.csv')
# b7_2 = pd.read_csv('/content/drive/My Drive/Nicolas/2021-08 - Dados Processados Completo (Dissertação)/B7_A_2021_02_08tAm0.csv')
# b7_3 = pd.read_csv('/content/drive/My Drive/Nicolas/2021-08 - Dados Processados Completo (Dissertação)/B7_A_2021_02_15tAm0.csv')
# #NOVO
# b8_1 = pd.read_csv('/content/drive/My Drive/Nicolas/2021-08 - Dados Processados Completo (Dissertação)/B8_N_2021_02_18tAm0.csv')
# b8_2 = pd.read_csv('/content/drive/My Drive/Nicolas/2021-08 - Dados Processados Completo (Dissertação)/B8_A_2021_02_22tAm0.csv')
# b8_3 = pd.read_csv('/content/drive/My Drive/Nicolas/2021-08 - Dados Processados Completo (Dissertação)/B8_A_2021_02_26tAm0.csv')
# b10_1 = pd.read_csv('/content/drive/My Drive/Nicolas/2021-08 - Dados Processados Completo (Dissertação)/B10_N_2021_03_22tAm0.csv')
# b10_2 = pd.read_csv('/content/drive/My Drive/Nicolas/2021-08 - Dados Processados Completo (Dissertação)/B10_A_2021_03_25tAm0.csv')
# b10_3 = pd.read_csv('/content/drive/My Drive/Nicolas/2021-08 - Dados Processados Completo (Dissertação)/B10_A_2021_03_30tAm0.csv')
# b11_1 = pd.read_csv('/content/drive/My Drive/Nicolas/2021-08 - Dados Processados Completo (Dissertação)/B11_N_2021_04_05tAm0.csv')
# b11_2 = pd.read_csv('/content/drive/My Drive/Nicolas/2021-08 - Dados Processados Completo (Dissertação)/B11_A_2021_04_08tAm0.csv')
# b11_3 = pd.read_csv('/content/drive/My Drive/Nicolas/2021-08 - Dados Processados Completo (Dissertação)/B11_A_2021_04_22tAm0.csv')
# #NOVO
# b12_1 = pd.read_csv('/content/drive/My Drive/Nicolas/2021-08 - Dados Processados Completo (Dissertação)/B12_N_2021_04_27tAm0.csv')
# b12_2 = pd.read_csv('/content/drive/My Drive/Nicolas/2021-08 - Dados Processados Completo (Dissertação)/B12_A_2021_04_30tAm0.csv')
# b12_3 = pd.read_csv('/content/drive/My Drive/Nicolas/2021-08 - Dados Processados Completo (Dissertação)/B12_A_2021_05_04tAm0.csv')
# #NOVO
# b15_1 = pd.read_csv('/content/drive/My Drive/Nicolas/2021-08 - Dados Processados Completo (Dissertação)/B15_N_2021_05_31tAm0.csv')
# b15_2 = pd.read_csv('/content/drive/My Drive/Nicolas/2021-08 - Dados Processados Completo (Dissertação)/B15_A_2021_06_09tAm0.csv')
# b15_3 = pd.read_csv('/content/drive/My Drive/Nicolas/2021-08 - Dados Processados Completo (Dissertação)/B15_A_2021_06_15tAm0.csv')
a2_1 = pd.read_csv('D:/Documentos/Amaciamento/Apresentações/00_Dissertacao/DadosNicolas/A2_N_2019_07_09tAm0.csv')
a2_2 = pd.read_csv('D:/Documentos/Amaciamento/Apresentações/00_Dissertacao/DadosNicolas/A2_A_2019_08_08tAm0.csv')
a2_3 = pd.read_csv('D:/Documentos/Amaciamento/Apresentações/00_Dissertacao/DadosNicolas/A2_A_2019_08_28tAm0.csv')
a3_1 = pd.read_csv('D:/Documentos/Amaciamento/Apresentações/00_Dissertacao/DadosNicolas/A3_N_2019_12_04tAm0.csv')
a3_2 = pd.read_csv('D:/Documentos/Amaciamento/Apresentações/00_Dissertacao/DadosNicolas/A3_A_2019_12_09tAm0.csv')
a3_3 = pd.read_csv('D:/Documentos/Amaciamento/Apresentações/00_Dissertacao/DadosNicolas/A3_A_2019_12_11tAm0.csv')
a4_1 = pd.read_csv('D:/Documentos/Amaciamento/Apresentações/00_Dissertacao/DadosNicolas/A4_N_2019_12_16tAm0.csv')
a4_2 = pd.read_csv('D:/Documentos/Amaciamento/Apresentações/00_Dissertacao/DadosNicolas/A4_A_2019_12_19tAm0.csv')
a4_3 = pd.read_csv('D:/Documentos/Amaciamento/Apresentações/00_Dissertacao/DadosNicolas/A4_A_2020_01_06tAm0.csv')
# a4_4 = pd.read_csv('/content/drive/My Drive/Nicolas/2021-05 - Dados Processados Artigo C_Bias/A4_A_2020_01_13tAm0.csv')
a5_1 = pd.read_csv('D:/Documentos/Amaciamento/Apresentações/00_Dissertacao/DadosNicolas/A5_N_2020_01_22tAm0.csv')
a5_2 = pd.read_csv('D:/Documentos/Amaciamento/Apresentações/00_Dissertacao/DadosNicolas/A5_A_2020_01_27tAm0.csv')
a5_3 = pd.read_csv('D:/Documentos/Amaciamento/Apresentações/00_Dissertacao/DadosNicolas/A5_A_2020_01_28tAm0.csv')
# B5_N_2020_10_16tAm0
# b5_1 = pd.read_csv('D:/Documentos/Amaciamento/Apresentações/00_Dissertacao/DadosNicolas/B5_N_2020_10_16tAm0.csv')
# b5_2 = pd.read_csv('D:/Documentos/Amaciamento/Apresentações/00_Dissertacao/DadosNicolas/B5_A_2020_10_22tAm0.csv')
# b5_3 = pd.read_csv('D:/Documentos/Amaciamento/Apresentações/00_Dissertacao/DadosNicolas/B5_A_2020_10_27tAm0.csv')
# #NOVO
# b7_1 = pd.read_csv('D:/Documentos/Amaciamento/Apresentações/00_Dissertacao/DadosNicolas/B7_N_2021_02_05tAm0.csv')
# b7_2 = pd.read_csv('D:/Documentos/Amaciamento/Apresentações/00_Dissertacao/DadosNicolas/B7_A_2021_02_08tAm0.csv')
# b7_3 = pd.read_csv('D:/Documentos/Amaciamento/Apresentações/00_Dissertacao/DadosNicolas/B7_A_2021_02_15tAm0.csv')
# #NOVO
# b8_1 = pd.read_csv('D:/Documentos/Amaciamento/Apresentações/00_Dissertacao/DadosNicolas/B8_N_2021_02_18tAm0.csv')
# b8_2 = pd.read_csv('D:/Documentos/Amaciamento/Apresentações/00_Dissertacao/DadosNicolas/B8_A_2021_02_22tAm0.csv')
# b8_3 = pd.read_csv('D:/Documentos/Amaciamento/Apresentações/00_Dissertacao/DadosNicolas/B8_A_2021_02_26tAm0.csv')
# b10_1 = pd.read_csv('D:/Documentos/Amaciamento/Apresentações/00_Dissertacao/DadosNicolas/B10_N_2021_03_22tAm0.csv')
# b10_2 = pd.read_csv('D:/Documentos/Amaciamento/Apresentações/00_Dissertacao/DadosNicolas/B10_A_2021_03_25tAm0.csv')
# b10_3 = pd.read_csv('D:/Documentos/Amaciamento/Apresentações/00_Dissertacao/DadosNicolas/B10_A_2021_03_30tAm0.csv')
# b11_1 = pd.read_csv('D:/Documentos/Amaciamento/Apresentações/00_Dissertacao/DadosNicolas/B11_N_2021_04_05tAm0.csv')
# b11_2 = pd.read_csv('D:/Documentos/Amaciamento/Apresentações/00_Dissertacao/DadosNicolas/B11_A_2021_04_08tAm0.csv')
# b11_3 = pd.read_csv('D:/Documentos/Amaciamento/Apresentações/00_Dissertacao/DadosNicolas/B11_A_2021_04_22tAm0.csv')
# #NOVO
# b12_1 = pd.read_csv('D:/Documentos/Amaciamento/Apresentações/00_Dissertacao/DadosNicolas/B12_N_2021_04_27tAm0.csv')
# b12_2 = pd.read_csv('D:/Documentos/Amaciamento/Apresentações/00_Dissertacao/DadosNicolas/B12_A_2021_04_30tAm0.csv')
# b12_3 = pd.read_csv('D:/Documentos/Amaciamento/Apresentações/00_Dissertacao/DadosNicolas/B12_A_2021_05_04tAm0.csv')
# #NOVO
# b15_1 = pd.read_csv('D:/Documentos/Amaciamento/Apresentações/00_Dissertacao/DadosNicolas/B15_N_2021_05_31tAm0.csv')
# b15_2 = pd.read_csv('D:/Documentos/Amaciamento/Apresentações/00_Dissertacao/DadosNicolas/B15_A_2021_06_09tAm0.csv')
# b15_3 = pd.read_csv('D:/Documentos/Amaciamento/Apresentações/00_Dissertacao/DadosNicolas/B15_A_2021_06_15tAm0.csv')
# + [markdown] id="4PBbVyeDJWHw"
# # Funções
# + [markdown] id="FRAmVrQ0KxYi"
# ## Tratamento
#
# + id="OPP8eicLKk1g"
def jan_mmovel(amostra, media, numerofeat):
''' Possiveis janelas D: 1, 10, 30 '''
''' se o valor de numero de features for 1 apenas continua e não executa nada, ficando apenas com os valores atuais'''
if numerofeat == 1:
pass
''' se o numero de features for 3, retorna 2 novas features para cada janela e para o valor de media passada na função '''
if numerofeat == 3:
amostra[f'{media}(K-1)'] = amostra[f'{media}'].shift(1)
amostra[f'{media}(K-2)'] = amostra[f'{media}'].shift(2)
amostra[f'{media}(K-10)'] = amostra[f'{media}'].shift(10)
amostra[f'{media}(K-20)'] = amostra[f'{media}'].shift(20)
amostra[f'{media}(K-30)'] = amostra[f'{media}'].shift(30)
amostra[f'{media}(K-60)'] = amostra[f'{media}'].shift(60)
''' se o numero de features for 5, retorna 4 novas features para cada janela e para o valor de media passada na função '''
if numerofeat == 5:
amostra[f'{media}(K-1)'] = amostra[f'{media}'].shift(1)
amostra[f'{media}(K-2)'] = amostra[f'{media}'].shift(2)
amostra[f'{media}(K-3)'] = amostra[f'{media}'].shift(3)
amostra[f'{media}(K-4)'] = amostra[f'{media}'].shift(4)
amostra[f'{media}(K-10)'] = amostra[f'{media}'].shift(10)
amostra[f'{media}(K-20)'] = amostra[f'{media}'].shift(20)
amostra[f'{media}(K-30)'] = amostra[f'{media}'].shift(30)
amostra[f'{media}(K-40)'] = amostra[f'{media}'].shift(40)
amostra[f'{media}(K-50)'] = amostra[f'{media}'].shift(50)
amostra[f'{media}(K-60)'] = amostra[f'{media}'].shift(60)
amostra[f'{media}(K-90)'] = amostra[f'{media}'].shift(90)
amostra[f'{media}(K-120)'] = amostra[f'{media}'].shift(120)
return amostra
# + id="8rVnmKzafuzx"
def trata_amostras_varios(amostra, nome_ensaio, coluna, numerofeat, tempo):
nome_df = pd.DataFrame()
'''Selecionando a tabela passada a função as colunas: Tempo e o Nome da coluna a ser tratada'''
nome_df = amostra[['Tempo', coluna]].copy()
''' Definindo uma nova coluna cujos valores serão o nome do ensaio referente aos dados passados na função ex: a3_1_N (amostra 3, primeiro ensaio, não amaciado)'''
nome_df['Ensaio'] = nome_ensaio
''' transformando o nome da coluna passada na função para string (garantia de que vai ser string)'''
colunastr = str(coluna)
'''selecionando apenas as linhas onde o tempo de início é maior que 0 e menor que o tempo selecionado ao chamar a função'''
nome_df = nome_df[nome_df.Tempo<=tempo]
nome_df = nome_df[nome_df.Tempo>=1]
'''retirando colunas NaN'''
nome_df = nome_df.dropna(how='any')
''' atualizando o indice do dataframe do pandas'''
nome_df.reset_index(drop=True, inplace=True)
''' definindo novas features através das médias móveis'''
nome_df[f'{coluna}_MA_1'] = nome_df[coluna].rolling(1, min_periods=1).mean()
nome_df[f'{coluna}_MA_5'] = nome_df[coluna].rolling(5, min_periods=1).mean()
nome_df[f'{coluna}_MA_10'] = nome_df[coluna].rolling(10, min_periods=1).mean()
nome_df[f'{coluna}_MA_30'] = nome_df[coluna].rolling(30, min_periods=1).mean()
# '''Escalonando com z-score a coluna selecionada e suas respectivas médias móveis'''
# standard_scaler = StandardScaler()
# nome_df[[coluna, f'{coluna}_MA_1', f'{coluna}_MA_5', f'{coluna}_MA_10', f'{coluna}_MA_30']] = standard_scaler.fit_transform(nome_df[[coluna,
# f'{coluna}_MA_1', f'{coluna}_MA_5', f'{coluna}_MA_10', f'{coluna}_MA_30']])
''' chamando a função jan_mmovel que retorna, dependendo do número de features desejado na análise, nova features com valores anteriores ao valor atual de acordo com as janelas definidas '''
# Janelas para media movel 1
media1 = jan_mmovel(nome_df,f'{coluna}_MA_1',numerofeat)
nome_df = media1.copy()
# Janelas para media movel 5
media5 = jan_mmovel(nome_df,f'{coluna}_MA_5',numerofeat)
nome_df = media5.copy()
# Janelas para media movel 10
media10 = jan_mmovel(nome_df,f'{coluna}_MA_10',numerofeat)
nome_df = media10.copy()
# Janelas para media movel 30
media30 = jan_mmovel(nome_df,f'{coluna}_MA_30',numerofeat)
nome_df = media30.copy()
''' definindo uma nova coluna com os valores sendo o numero de features selecionado ao chamar a função, por motivos de conferência futura'''
nome_df['numfeatures'] = numerofeat
return nome_df
# + id="r0h0VASxitly"
def aplica_tratamento(ensaio1, ensaio2, ensaio3, abrev, numerofeat, tempo):
''' aplica o tratamento para 3 ensaios de cada amostra'''
dataset = pd.DataFrame()
processadas = ['CorrenteRMS', 'CorrenteVariancia', 'CorrenteCurtose', 'VibracaoCalotaInferiorRMS', 'VibracaoCalotaSuperiorRMS',
'VibracaoCalotaInferiorCurtose','VibracaoCalotaSuperiorCurtose',
'VibracaoCalotaInferiorVariancia', 'VibracaoCalotaSuperiorVariancia', 'Vazao']
for i in processadas:
amostra3=[]
df_1 = trata_amostras_varios(ensaio1, f'{abrev}_1_N', i, numerofeat, tempo)
df_2 = trata_amostras_varios(ensaio2, f'{abrev}_2_A', i, numerofeat, tempo)
df_3 = trata_amostras_varios(ensaio3, f'{abrev}_3_A', i, numerofeat, tempo)
amostra3 = pd.concat([df_1,df_2,df_3])
dataset = pd.concat([dataset,amostra3], axis=1)
''' retira colunas duplicadas na concatenação'''
dataset = dataset.loc[:,~dataset.columns.duplicated()].reset_index(drop=True)
return dataset
# + id="8fr_DVoGUEo4"
def calcular_wcss(data):
wcss = []
for k in range(1,10):
kmeans = KMeans(n_clusters = k)
kmeans.fit(X=data)
data['Clusters']=kmeans.labels_
wcss.append(kmeans.inertia_)
return wcss
# + colab={"base_uri": "https://localhost:8080/", "height": 419} id="nP84-nDqJMkK" outputId="c8f9865f-e399-4ffb-89ea-d17e765c158d"
relat = a3_1[['Tempo','CorrenteRMS', 'CorrenteVariancia', 'CorrenteCurtose']]
relat = relat[relat.Tempo<=20]
relat = relat[relat.Tempo>=1]
relat
# + colab={"base_uri": "https://localhost:8080/", "height": 439} id="zLp-J5wEIUbc" outputId="cca863ad-ce34-4ce9-a0b0-6d4edee8b0cc"
elb = aplica_tratamento(a3_1,a3_2,a3_3, "a3",1,20).drop(columns="Ensaio")
# elbB = aplica_tratamento(b10_1,b10_2,b10_3, "b10",1,40).drop(columns="Ensaio")
# + colab={"base_uri": "https://localhost:8080/", "height": 364} id="9PsL-eePT3Hi" outputId="fcc6529a-0dbf-4cd9-c8cd-87deb9c47a91"
warnings.simplefilter('ignore')
soma_quadrados = calcular_wcss(elb[['CorrenteRMS']])
plt.figure(figsize=(7,5))
sns.set(palette='bright',color_codes=True,style='whitegrid', font_scale=1.3)
sns.lineplot(range(1,10),soma_quadrados)
# plt.xticks([1, 2,3,4,5,6,7,8,9,10])
plt.xlabel('Número de grupos')
plt.ylabel("Soma do quadrado das distâncias $[A^2]$")
plt.title("Modelo A")
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 705} id="9VMnVv11VQ_N" outputId="bb2c8f3a-9b2f-4f94-9dc7-6632c6e417e3"
warnings.simplefilter('ignore')
soma_quadradosB = calcular_wcss(elb[['CorrenteRMS']])
plt.figure(figsize=(7,5))
sns.set(palette='bright',color_codes=True,style='whitegrid', font_scale=1.2)
sns.lineplot(range(1,10),soma_quadrados)
plt.xlabel('Número de grupos')
plt.ylabel("Soma do quadrado das distâncias $[A^2]$")
plt.title("Modelo A")
plt.figure(figsize=(7,5))
sns.set(palette='bright',color_codes=True,style='whitegrid', font_scale=1.2)
sns.lineplot(range(1,10),soma_quadradosB)
plt.xlabel('Número de grupos')
plt.ylabel("Soma do quadrado das distâncias $[A^2]$")
plt.title("Modelo B")
plt.show()
# + [markdown] id="t1cnRvJ6K1iG"
# ## Gráficos
# + id="lyvKJbcb0TZ9"
def plot_pct_cluster(amostra1,amostra2,amostra3,amostra4,nomeamostra, titulo, pasta):
''' amostra1, amostra2, amostra3, amostra4: são os ensaios para diferentes médias a ser analisadas
nomeamostra: nome do ensaio que vai ser gerado o plot ex: a3_1_N
titulo: titulo do gráfico
pasta: pasta que vai ser armazenada o gráfico
'''
# Configurações para plot dos gráficos
fig, ax = plt.subplots(1,4,figsize=(20,4))
sns.set(palette='bright',color_codes=True,style='whitegrid')
fig.suptitle(f'{titulo}')
# Retorna o primeiro ensaio passado na função e
teste = amostra1[amostra1['Ensaio'] == nomeamostra].copy()
# Tranforma o formato da coluna Tempo para integer, de forma que os tempos fiquem discretizados
teste['Tempo'] = teste['Tempo'].astype(int)
# Filtra e retorna para cada tempo o valor total de cada cluster
teste = teste.groupby('Tempo')['Clusters'].value_counts()
# Cria uma nova coluna chamada quantidade para armazanar a quantidade de cada cluster
teste = teste.reset_index(name='Quantidade')
#faz a soma do total de cluster em cada hora e armazena em uma lista
teste1 = teste.groupby('Tempo')['Quantidade'].sum()
# faz a fusão da coluna com o total de cada cluster com a coluna com a soma do total de clusters
teste = teste.merge(teste1, on='Tempo')
# cria uma coluna chamada porcentagem que retorna a porcentagem do total de cluster em relação a soma do total de clusters
teste['porcentagem'] = (teste.Quantidade_x / teste.Quantidade_y)*100
teste2 = amostra2[amostra2['Ensaio'] == nomeamostra].copy()
teste2['Tempo'] = teste2['Tempo'].astype(int)
teste2 = teste2.groupby('Tempo')['Clusters'].value_counts()
teste2 = teste2.reset_index(name='Quantidade')
teste3 = teste2.groupby('Tempo')['Quantidade'].sum()
teste2 = teste2.merge(teste3, on='Tempo')
teste2['porcentagem'] = (teste2.Quantidade_x / teste2.Quantidade_y)*100
teste4 = amostra3[amostra3['Ensaio'] == nomeamostra].copy()
teste4['Tempo'] = teste4['Tempo'].astype(int)
teste4 = teste4.groupby('Tempo')['Clusters'].value_counts()
teste4 = teste4.reset_index(name='Quantidade')
teste5 = teste4.groupby('Tempo')['Quantidade'].sum()
teste4 = teste4.merge(teste5, on='Tempo')
teste4['porcentagem'] = (teste4.Quantidade_x / teste4.Quantidade_y)*100
teste6 = amostra4[amostra4['Ensaio'] == nomeamostra].copy()
teste6['Tempo'] = teste6['Tempo'].astype(int)
teste6 = teste6.groupby('Tempo')['Clusters'].value_counts()
teste6 = teste6.reset_index(name='Quantidade')
teste7 = teste6.groupby('Tempo')['Quantidade'].sum()
teste6 = teste6.merge(teste7, on='Tempo')
teste6['porcentagem'] = (teste6.Quantidade_x / teste6.Quantidade_y)*100
# função para gerar o gráfico com eixo x: Tempo, eixo y: Porcentagem, filtrando os valores pelo cluster classificado
sns.barplot(x= teste['Tempo'], y = teste['porcentagem'], hue=teste["Clusters"], ax=ax[0])
ax[0].set_title(f'Proporção de clusters : M = 1')
ax[0].set_xlabel('Tempo [h]')
ax[0].set_ylabel('Porcentagem [%]')
sns.barplot(x= teste2['Tempo'], y = teste2['porcentagem'], hue=teste2["Clusters"], ax=ax[1])
ax[1].set_title(f'Proporção de clusters : M = 5')
ax[1].set_xlabel('Tempo [h]')
ax[1].set_ylabel('Porcentagem [%]')
sns.barplot(x= teste4['Tempo'], y = teste4['porcentagem'], hue=teste4["Clusters"], ax=ax[2])
ax[2].set_title(f'Proporção de clusters : M = 10')
ax[2].set_xlabel('Tempo [h]')
ax[2].set_ylabel('Porcentagem [%]')
sns.barplot(x= teste6['Tempo'], y = teste6['porcentagem'], hue=teste6["Clusters"], ax=ax[3])
ax[3].set_title(f'Proporção de clusters : M = 30')
ax[3].set_xlabel('Tempo [h]')
ax[3].set_ylabel('Porcentagem [%]')
# plt.savefig(f'/content/drive/My Drive/Nicolas/IMG-Clusters_X_Tempo/{pasta}/{titulo}.pdf')
plt.show()
# -
# + [markdown] id="DhJIomdTMIk5"
# # Modelos
# + id="Z0Q87p7hCI3N"
def dif_medias(amostra: pd.DataFrame,grandeza: str, nome, numerofeat: int, janela: int, media: int, k_clusters: int = 3):
"""Cria um modelo de clusterização por k-means para dados de ensaios
Parâmetros
----------
amostra : DataFrame
Tabela contendo os dados de ensaios formatados utilizando a função aplica_tratamento()
grandeza: str
Nome da grandeza desejada no formato "Nicolas". Nomes disponíveis:
'CorrenteRMS' 'CorrenteVariancia' 'CorrenteCurtose'
'VibracaoCalotaInferiorRMS' 'VibracaoCalotaInferiorCurtose' 'VibracaoCalotaInferiorVariancia'
'VibracaoCalotaSuperiorRMS' 'VibracaoCalotaSuperiorCurtose' 'VibracaoCalotaSuperiorVariancia'
'Vazao'
nome: ???
Varíavel antiga mantida por legado
numerofeat: int
Valor N, número de elementos do vetor de predição (features) utilizados na formatação
janela: int
Valor D, espaçamento temporal entre features
media: int
Valor M, largura da janela do filtro de médias móveis
k_clusters: int
Número de clusters para k-means (default: 3)
Retorna
-------
dados_selecionados: DataFrame
Tabela contendo os vetores de predição utilizados e os clusters equivalentes
modelo:
Modelo de clusterização gerado
"""
# Seleciona as grandezas pelo nome "<NAME>" para os valores de N (numerofeat), M (media) e D (janela) fornecidos
features = [f'{grandeza}_MA_{media}']
for n in range(1,numerofeat):
features = features + [f'{grandeza}_MA_{media}(K-{n*janela})']
# retorna uma tabela com o nome das grandezas que foram atribuidas a lista features
dados_selecionados = amostra[features]
# retira todas a linhas que possuem algum valor NaN, Kmeans não aceita valores NaN
dados_selecionados = dados_selecionados.dropna()
# Classifica os dados em k clusters (default: 3)
modelo = KMeans(n_clusters=k_clusters, random_state=42)
modelo.fit_predict(dados_selecionados)
# Cria uma nova coluna chamada Clusters para armazenar os valores da clusterização do Kmeans
dados_selecionados['Clusters'] = modelo.labels_
# Retorna os dados selecionados e o modelo treinado
return dados_selecionados, modelo
# + id="IMZ5TSfJTan9"
# Aplica o kmeans para diferentes features, plota os gráficos e gera as tabelas com os clusters para cada ponto no tempo
def dif_feat(nomeamostra, a_1, a_2, a_3, abrev, coluna, pasta, numerofeat, tempo):
tabela = pd.DataFrame()
amostra = aplica_tratamento(a_1,a_2,a_3,abrev, numerofeat, tempo)
# aplica o kmeans para todas variações de média e janela se o número de features desejado for 1
if numerofeat == 1:
a3_m1_1,_ = dif_medias(amostra,coluna,f'{abrev}_m1_1',1,1,1)
a3_m5_1,_ = dif_medias(amostra,coluna,f'{abrev}_m5_1',1,1,5)
a3_m10_1,_ = dif_medias(amostra,coluna,f'{abrev}_m10_1',1,1,10)
a3_m30_1,_ = dif_medias(amostra,coluna,f'{abrev}_m30_1',1,1,30)
a3_m1_1020,_ = dif_medias(amostra,coluna,f'{abrev}_m1_10',1,10,1)
a3_m5_1020,_ = dif_medias(amostra,coluna,f'{abrev}_m5_10',1,10,5)
a3_m10_1020,_ = dif_medias(amostra,coluna,f'{abrev}_m10_10',1,10,10)
a3_m30_1020,_ = dif_medias(amostra,coluna,f'{abrev}_m30_10',1,10,30)
a3_m1_3060,_ = dif_medias(amostra,coluna,f'{abrev}_m1_30',1,30,1)
a3_m5_3060,_ = dif_medias(amostra,coluna,f'{abrev}_m5_30',1,30,5)
a3_m10_3060,_ = dif_medias(amostra,coluna,f'{abrev}_m10_30',1,30,10)
a3_m30_3060,_ = dif_medias(amostra,coluna,f'{abrev}_m30_30',1,30,30)
# aplica o kmeans para todas variações de média e janela se o número de features desejado for 3
if numerofeat == 3:
a3_m1_1,_ = dif_medias(amostra,coluna,f'{abrev}_m1_1',3,1,1)
a3_m5_1,_ = dif_medias(amostra,coluna,f'{abrev}_m5_1',3,1,5)
a3_m10_1,_ = dif_medias(amostra,coluna,f'{abrev}_m10_1',3,1,10)
a3_m30_1,_ = dif_medias(amostra,coluna,f'{abrev}_m30_1',3,1,30)
a3_m1_1020,_ = dif_medias(amostra,coluna,f'{abrev}_m1_10',3,10,1)
a3_m5_1020,_ = dif_medias(amostra,coluna,f'{abrev}_m5_10',3,10,5)
a3_m10_1020,_ = dif_medias(amostra,coluna,f'{abrev}_m10_10',3,10,10)
a3_m30_1020,_ = dif_medias(amostra,coluna,f'{abrev}_m30_10',3,10,30)
a3_m1_3060,_ = dif_medias(amostra,coluna,f'{abrev}_m1_30',3,30,1)
a3_m5_3060,_ = dif_medias(amostra,coluna,f'{abrev}_m5_30',3,30,5)
a3_m10_3060,_ = dif_medias(amostra,coluna,f'{abrev}_m10_30',3,30,10)
a3_m30_3060,_ = dif_medias(amostra,coluna,f'{abrev}_m30_30',3,30,30)
# aplica o kmeans para todas variações de média e janela se o número de features desejado for 5
if numerofeat == 5:
a3_m1_1,_ = dif_medias(amostra,coluna,f'{abrev}_m1_1',5,1,1)
a3_m5_1,_ = dif_medias(amostra,coluna,f'{abrev}_m5_1',5,1,5)
a3_m10_1,_ = dif_medias(amostra,coluna,f'{abrev}_m10_1',5,1,10)
a3_m30_1,_ = dif_medias(amostra,coluna,f'{abrev}_m30_1',5,1,30)
a3_m1_1020,_ = dif_medias(amostra,coluna,f'{abrev}_m1_10',5,10,1)
a3_m5_1020,_ = dif_medias(amostra,coluna,f'{abrev}_m5_10',5,10,5)
a3_m10_1020,_ = dif_medias(amostra,coluna,f'{abrev}_m10_10',5,10,10)
a3_m30_1020,_ = dif_medias(amostra,coluna,f'{abrev}_m30_10',5,10,30)
a3_m1_3060,_ = dif_medias(amostra,coluna,f'{abrev}_m1_30',5,30,1)
a3_m5_3060,_ = dif_medias(amostra,coluna,f'{abrev}_m5_30',5,30,5)
a3_m10_3060,_ = dif_medias(amostra,coluna,f'{abrev}_m10_30',5,30,10)
a3_m30_3060,_ = dif_medias(amostra,coluna,f'{abrev}_m30_30',5,30,30)
# # Chama a função que gera os gráficos para todas as médias, ensaio não amaciado e janela D = 1
# plot_pct_cluster(a3_m1_1,a3_m5_1,a3_m10_1,a3_m30_1,f'{abrev}_1_N', f'{nomeamostra} - {coluna} - Não amaciada (N = {numerofeat}, D = 1)', pasta)
# # Chama a função que gera os gráficos para todas as médias, ensaio não amaciado e janela D = 10
# plot_pct_cluster(a3_m1_1020,a3_m5_1020,a3_m10_1020,a3_m30_1020,f'{abrev}_1_N', f'{nomeamostra} - {coluna} - Não amaciada (N = {numerofeat}, D = 10)', pasta)
# # Chama a função que gera os gráficos para todas as médias, ensaio não amaciado e janela D = 30
# plot_pct_cluster(a3_m1_3060,a3_m5_3060,a3_m10_3060,a3_m30_3060,f'{abrev}_1_N', f'{nomeamostra} - {coluna} - Não amaciada (N = {numerofeat}, D = 30)', pasta)
# # Chama a função que gera os gráficos para todas as médias, primeiro ensaio amaciado e janela D = 1
# plot_pct_cluster(a3_m1_1,a3_m5_1,a3_m10_1,a3_m30_1,f'{abrev}_2_A', f'{nomeamostra} - {coluna} - Amaciada 1 (N = {numerofeat}, D = 1)', pasta)
# # Chama a função que gera os gráficos para todas as médias, primeiro ensaio amaciado e janela D = 10
# plot_pct_cluster(a3_m1_1020,a3_m5_1020,a3_m10_1020,a3_m30_1020,f'{abrev}_2_A', f'{nomeamostra} - {coluna} - Amaciada 1 (N = {numerofeat}, D = 10)', pasta)
# # Chama a função que gera os gráficos para todas as médias, primeiro ensaio amaciado e janela D = 30
# plot_pct_cluster(a3_m1_3060,a3_m5_3060,a3_m10_3060,a3_m30_3060,f'{abrev}_2_A', f'{nomeamostra} - {coluna} - Amaciada 1 (N = {numerofeat}, D = 30)', pasta)
# # Chama a função que gera os gráficos para todas as médias, segundo ensaio amaciado e janela D = 1
# plot_pct_cluster(a3_m1_1,a3_m5_1,a3_m10_1,a3_m30_1,f'{abrev}_3_A', f'{nomeamostra} - {coluna} - Amaciada 2 (N = {numerofeat}, D = 1)', pasta)
# # Chama a função que gera os gráficos para todas as médias, segundo ensaio amaciado e janela D = 10
# plot_pct_cluster(a3_m1_1020,a3_m5_1020,a3_m10_1020,a3_m30_1020,f'{abrev}_3_A', f'{nomeamostra} - {coluna} - Amaciada 2 (N = {numerofeat}, D = 10)', pasta)
# # Chama a função que gera os gráficos para todas as médias, segundo ensaio amaciado e janela D = 30
# plot_pct_cluster(a3_m1_3060,a3_m5_3060,a3_m10_3060,a3_m30_3060,f'{abrev}_3_A', f'{nomeamostra} - {coluna} - Amaciada 2 (N = {numerofeat}, D = 30)', pasta)
# Variáveis criadas para armazenar o resultado da clusterização para cada variação de média e janela
nomecluster8 = coluna + 'M1_01'
a3_m1_1 = a3_m1_1.rename(columns={"Clusters": nomecluster8})
nomecluster9 = coluna + 'M5_01'
a3_m5_1 = a3_m5_1.rename(columns={"Clusters": nomecluster9})
nomecluster10 = coluna + 'M10_01'
a3_m10_1 = a3_m10_1.rename(columns={"Clusters": nomecluster10})
nomecluster11 = coluna + 'M30_01'
a3_m30_1 = a3_m30_1.rename(columns={"Clusters": nomecluster11})
nomecluster0 = coluna + 'M1_10'
a3_m1_1020 = a3_m1_1020.rename(columns={"Clusters": nomecluster0})
nomecluster1 = coluna + 'M5_10'
a3_m5_1020 = a3_m5_1020.rename(columns={"Clusters": nomecluster1})
nomecluster2 = coluna + 'M10_10'
a3_m10_1020 = a3_m10_1020.rename(columns={"Clusters": nomecluster2})
nomecluster3 = coluna + 'M30_10'
a3_m30_1020 = a3_m30_1020.rename(columns={"Clusters": nomecluster3})
nomecluster4 = coluna + 'M1_30'
a3_m1_3060 = a3_m1_3060.rename(columns={"Clusters": nomecluster4})
nomecluster5 = coluna + 'M5_30'
a3_m5_3060 = a3_m5_3060.rename(columns={"Clusters": nomecluster5})
nomecluster6 = coluna + 'M10_30'
a3_m10_3060 = a3_m10_3060.rename(columns={"Clusters": nomecluster6})
nomecluster7 = coluna + 'M30_30'
a3_m30_3060 = a3_m30_3060.rename(columns={"Clusters": nomecluster7})
# concatenando todas as colunas com clusters armazenados
tabela = pd.concat([a3_m1_1[['Tempo',nomecluster8]],a3_m5_1[[nomecluster9]], a3_m10_1[[nomecluster10]] , a3_m30_1[[nomecluster11]],
a3_m1_1020[[nomecluster0]],a3_m5_1020[[nomecluster1]], a3_m10_1020[[nomecluster2]] , a3_m30_1020[[nomecluster3]],
a3_m1_3060[[nomecluster4]], a3_m5_3060[[nomecluster5]] , a3_m10_3060[[nomecluster6]] , a3_m30_3060[[nomecluster7]]], axis=1)
return tabela
# + id="iWEaftwHaHF4"
# retorna uma tabela final com os clusters para o ensaio não amaciado e os dois ensaios amaciados
def cria_tabelas(nomeamostra, a_1, a_2, a_3, abrev, process_colunas, pasta, numerofeat, tempo):
tabelao = pd.DataFrame()
for i in process_colunas:
t = dif_feat(nomeamostra, a_1,a_2,a_3, abrev, i, pasta, numerofeat, tempo)
tabelao = pd.concat([tabelao,t], axis=1)
tabelao = tabelao.loc[:,~tabelao.columns.duplicated()].copy()
return tabelao
# + id="q27Q42xXHyX9"
# Colunas usadas para análise
processadas = ['CorrenteRMS', 'CorrenteVariancia', 'CorrenteCurtose', 'VibracaoCalotaInferiorRMS', 'VibracaoCalotaSuperiorRMS',
'VibracaoCalotaInferiorCurtose','VibracaoCalotaSuperiorCurtose',
'VibracaoCalotaInferiorVariancia', 'VibracaoCalotaSuperiorVariancia', 'Vazao']
# + id="5X_tdxv9bnTq"
# # Amostra A3 , 1 feature
# t_a3_N1_s = cria_tabelas('amostra3', a3_1,a3_2,a3_3, 'a3', processadas,'Porcentagem_v3/Amostra3', 1, 19)
# # Amostra A3, 3 features
# t_a3_N3_s = cria_tabelas('amostra3', a3_1,a3_2,a3_3, 'a3', processadas,'Porcentagem_v3/Amostra3', 3, 19)
# # Amostra A3, 5 feature
# t_a3_N5_s = cria_tabelas('amostra3', a3_1,a3_2,a3_3, 'a3', processadas,'Porcentagem_v3/Amostra3', 5, 19)
# # Amostra A4, 1 feature
# t_a4_N1_s = cria_tabelas('amostra4', a4_1,a4_2,a4_3, 'a4', processadas,'Porcentagem_v3/Amostra4', 1, 19)
# # Amostra A4, 3 features
# t_a4_N3_s = cria_tabelas('amostra4', a4_1,a4_2,a4_3, 'a4', processadas,'Porcentagem_v3/Amostra4', 3, 19)
# # Amostra A4, 5 features
# t_a4_N5_s = cria_tabelas('amostra4', a4_1,a4_2,a4_3, 'a4', processadas,'Porcentagem_v3/Amostra4', 5, 19)
# # Amostra A5, 1 feature
# t_a5_N1_s = cria_tabelas('amostra5', a5_1,a5_2,a5_3, 'a5', processadas,'Porcentagem_v3/Amostra5', 1, 19)
# # Amostra A5, 3 features
# t_a5_N3_s = cria_tabelas('amostra5', a5_1,a5_2,a5_3, 'a5', processadas,'Porcentagem_v3/Amostra5', 3, 19)
# # Amostra A5, 5 features
# t_a5_N5_s = cria_tabelas('amostra5', a5_1,a5_2,a5_3, 'a5', processadas,'Porcentagem_v3/Amostra5', 5, 19)
# + id="UqQIuIrz6HGs"
# # exportando as tabelas em csv para o drive
# pastadriveA = '/content/drive/My Drive/Nicolas/IMG-Clusters_X_Tempo/Artigo_kmeans/non_scaled/'
# t_a3_N1_s.to_csv(f'{pastadriveA}a3_N1_s.csv')
# t_a3_N3_s.to_csv(f'{pastadriveA}a3_N3_s.csv')
# t_a3_N5_s.to_csv(f'{pastadriveA}a3_N5_s.csv')
# t_a4_N1_s.to_csv(f'{pastadriveA}a4_N1_s.csv')
# t_a4_N3_s.to_csv(f'{pastadriveA}a4_N3_s.csv')
# t_a4_N5_s.to_csv(f'{pastadriveA}a4_N5_s.csv')
# t_a5_N1_s.to_csv(f'{pastadriveA}a5_N1_s.csv')
# t_a5_N3_s.to_csv(f'{pastadriveA}a5_N3_s.csv')
# t_a5_N5_s.to_csv(f'{pastadriveA}a5_N5_s.csv')
# -
# # Teste de silhouette
# +
from sklearn.metrics import silhouette_samples, silhouette_score
import matplotlib.cm as cm
N = 5; D = 10; M = 1
n_clusters = 3;
grandeza = "CorrenteRMS"
# -
# ## Silhouette média das unidades do modelo A
# +
# Para cada uma das unidades do modelo:
for j in range(4):
if j == 0: # Unidade A2
name_unidade = "A2"
dados_tratados = aplica_tratamento(a2_1,a2_2,a2_3,'a', N, 20) # Aplica tratamento na amostra A2
elif j == 1: # Unidade A3
name_unidade = "A3"
dados_tratados = aplica_tratamento(a3_1,a3_2,a3_3,'a', N, 20) # Aplica tratamento na amostra A3
elif j == 2: # Unidade A4
name_unidade = "A4"
dados_tratados = aplica_tratamento(a4_1,a4_2,a4_3,'a', N, 20) # Aplica tratamento na amostra A4
elif j == 3: # Unidade A5
name_unidade = "A5"
dados_tratados = aplica_tratamento(a5_1,a5_2,a5_3,'a', N, 20) # Aplica tratamento na amostra A5
dados_tratados, modelo = dif_medias(dados_tratados,grandeza,"a_m" + str(M) +"_"+str(D),N,D,M) # Aplica k-means nos dados, retornando uma tabela com os preditores e respectivos clusters
# Filtra apenas as colunas de preditores utilizadas na clusterização
dados_filtrados = pd.DataFrame({(grandeza + "_MA_" + str(M)):dados_tratados["CorrenteRMS_MA_"+ str(M)]})
for n in range(1,N):
# Adiciona as colunas seguindo o padrão Nicolas de nomenclatura
dados_filtrados[grandeza + "_MA_" + str(M) + "(K-" + str(n*D) + ")"] = dados_tratados[grandeza + "_MA_" + str(M) + "(K-" + str(n*D) + ")"]
dados_filtrados = dados_filtrados.dropna() # Exclui linhas com valores NaN (não são utilizados no k-means, e se referem a instantes anteriores ao início do ensaio)
cluster_labels = dados_tratados["Clusters"] # Cria variável apenas com as clusterizações
silhouette_avg = silhouette_score(dados_filtrados, cluster_labels)
print("A média de silhouette da unidade", name_unidade , "é :", silhouette_avg)
fig,(ax1, ax2) = plt.subplots(1, 2)
fig.set_size_inches(18, 7)
ax1.set_xlim([-0.3, 1])
valores_silhouette = silhouette_samples(dados_filtrados, cluster_labels)
y_lower = 10
for i in range(n_clusters):
# Agrega os silhouettes por cluster
valor_silhouette_unico = valores_silhouette[cluster_labels == i]
valor_silhouette_unico.sort()
size_cluster_i = valor_silhouette_unico.shape[0]
y_upper = y_lower + size_cluster_i
color = cm.nipy_spectral(float(i) / n_clusters)
ax1.fill_betweenx(
np.arange(y_lower, y_upper),
0,
valor_silhouette_unico,
facecolor=color,
edgecolor=color,
alpha=0.7,
)
# Label the silhouette plots with their cluster numbers at the middle
ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))
# Compute the new y_lower for next plot
y_lower = y_upper + 10 # 10 for the 0 samples
ax1.set_title("The silhouette plot for the various clusters.")
ax1.set_xlabel("The silhouette coefficient values")
ax1.set_ylabel("Cluster label")
# The vertical line for average silhouette score of all the values
ax1.axvline(x=silhouette_avg, color="red", linestyle="--")
ax1.set_yticks([]) # Clear the yaxis labels / ticks
ax1.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])
# 2nd Plot showing the actual clusters formed
colors = cm.nipy_spectral(cluster_labels.astype(float) / n_clusters)
if n_clusters>1:
ax2.scatter(
dados_filtrados[f'{grandeza}_MA_{M}'], dados_filtrados[f'{grandeza}_MA_{M}(K-{D})'], marker=".", s=30, lw=0, alpha=0.7, c=colors, edgecolor="k"
)
else:
ax2.scatter(
dados_filtrados[f'{grandeza}_MA_{M}'], np.zeros(len(dados_filtrados)), marker=".", s=30, lw=0, alpha=0.7, c=colors, edgecolor="k"
)
# Labeling the clusters
centers = modelo.cluster_centers_
# Draw white circles at cluster centers
ax2.scatter(
centers[:, 0],
centers[:, 1],
marker="o",
c="white",
alpha=1,
s=200,
edgecolor="k",
)
for i, c in enumerate(centers):
ax2.scatter(c[0], c[1], marker="$%d$" % i, alpha=1, s=50, edgecolor="k")
ax2.set_title("The visualization of the clustered data.")
ax2.set_xlabel("Feature space for the 1st feature")
ax2.set_ylabel("Feature space for the 2nd feature")
plt.suptitle(
"Silhouette da Unidade" + name_unidade,
fontsize=14,
fontweight="bold",
)
plt.show()
# -
# + colab={"base_uri": "https://localhost:8080/"} id="3BHS5CwhHPj0" outputId="bdc4eb4b-d79e-43f0-f197-866f327ef9f5"
t_a2_N1_ns = cria_tabelas('amostra2', a2_1,a2_2,a2_3, 'a2', processadas,'Porcentagem_v3/Amostra2', 1, 20)
t_a2_N3_ns = cria_tabelas('amostra2', a2_1,a2_2,a2_3, 'a2', processadas,'Porcentagem_v3/Amostra2', 3, 20)
t_a2_N5_ns = cria_tabelas('amostra2', a2_1,a2_2,a2_3, 'a2', processadas,'Porcentagem_v3/Amostra2', 5, 20)
# t_a3_N1_ns = cria_tabelas('amostra3', a3_1,a3_2,a3_3, 'a3', processadas,'Porcentagem_v3/Amostra3', 1, 20)
# t_a3_N3_ns = cria_tabelas('amostra3', a3_1,a3_2,a3_3, 'a3', processadas,'Porcentagem_v3/Amostra3', 3, 20)
# t_a3_N5_ns = cria_tabelas('amostra3', a3_1,a3_2,a3_3, 'a3', processadas,'Porcentagem_v3/Amostra3', 5, 20)
# t_a4_N1_ns = cria_tabelas('amostra4', a4_1,a4_2,a4_3, 'a4', processadas,'Porcentagem_v3/Amostra4', 1, 20)
# t_a4_N3_ns = cria_tabelas('amostra4', a4_1,a4_2,a4_3, 'a4', processadas,'Porcentagem_v3/Amostra4', 3, 20)
# t_a4_N5_ns = cria_tabelas('amostra4', a4_1,a4_2,a4_3, 'a4', processadas,'Porcentagem_v3/Amostra4', 5, 20)
# t_a5_N1_ns = cria_tabelas('amostra5', a5_1,a5_2,a5_3, 'a5', processadas,'Porcentagem_v3/Amostra5', 1, 20)
# t_a5_N3_ns = cria_tabelas('amostra5', a5_1,a5_2,a5_3, 'a5', processadas,'Porcentagem_v3/Amostra5', 3, 20)
# t_a5_N5_ns = cria_tabelas('amostra5', a5_1,a5_2,a5_3, 'a5', processadas,'Porcentagem_v3/Amostra5', 5, 20)
# -
#
# + id="SU6G7kx0HPj7"
# pastadriveA = '/content/drive/My Drive/Nicolas/2021-08 - Cluster dados/compressor A/non_scaled/'
pastadriveA = 'D:/Documentos/Amaciamento/Clusterizações/Codigo_Nicolas/'
t_a2_N1_ns.to_csv(f'{pastadriveA}a2_N1_ns.csv')
t_a2_N3_ns.to_csv(f'{pastadriveA}a2_N3_ns.csv')
t_a2_N5_ns.to_csv(f'{pastadriveA}a2_N5_ns.csv')
# t_a3_N1_ns.to_csv(f'{pastadriveA}a3_N1_ns.csv')
# t_a3_N3_ns.to_csv(f'{pastadriveA}a3_N3_ns.csv')
# t_a3_N5_ns.to_csv(f'{pastadriveA}a3_N5_ns.csv')
# t_a4_N1_ns.to_csv(f'{pastadriveA}a4_N1_ns.csv')
# t_a4_N3_ns.to_csv(f'{pastadriveA}a4_N3_ns.csv')
# t_a4_N5_ns.to_csv(f'{pastadriveA}a4_N5_ns.csv')
# t_a5_N1_ns.to_csv(f'{pastadriveA}a5_N1_ns.csv')
# t_a5_N3_ns.to_csv(f'{pastadriveA}a5_N3_ns.csv')
# t_a5_N5_ns.to_csv(f'{pastadriveA}a5_N5_ns.csv')
# + id="dWHsr_TUtMeV"
# # Amostra B5 , 1 feature
# t_b5_N1_s = cria_tabelas('amostraB5', b5_1,b5_2,b5_3, 'b5', processadas,'Porcentagem_v3/AmostraB5', 1, 40)
# # Amostra B5 , 3 features
# t_b5_N3_s = cria_tabelas('amostraB5', b5_1,b5_2,b5_3, 'b5', processadas,'Porcentagem_v3/AmostraB5', 3, 40)
# # Amostra B5 , 5 features
# t_b5_N5_s = cria_tabelas('amostraB5', b5_1,b5_2,b5_3, 'b5', processadas,'Porcentagem_v3/AmostraB5', 5, 40)
# # Amostra B10 , 1 feature
# t_b10_N1_s = cria_tabelas('amostraB10', b10_1,b10_2,b10_3, 'b10', processadas,'Porcentagem_v3/AmostraB10', 1, 40)
# # Amostra B10 , 3 features
# t_b10_N3_s = cria_tabelas('amostraB10', b10_1,b10_2,b10_3, 'b10', processadas,'Porcentagem_v3/AmostraB10', 3, 40)
# # Amostra B10 , 5 features
# t_b10_N5_s = cria_tabelas('amostraB10', b10_1,b10_2,b10_3, 'b10', processadas,'Porcentagem_v3/AmostraB10', 5, 40)
# # Amostra B11 , 1 feature
# t_b11_N1_s = cria_tabelas('amostraB11', b11_1,b11_2,b11_3, 'b11', processadas,'Porcentagem_v3/AmostraB11', 1, 40)
# # Amostra B11 , 3 features
# t_b11_N3_s = cria_tabelas('amostraB11', b11_1,b11_2,b11_3, 'b11', processadas,'Porcentagem_v3/AmostraB11', 3, 40)
# # Amostra B11 , 5 features
# t_b11_N5_s = cria_tabelas('amostraB11', b11_1,b11_2,b11_3, 'b11', processadas,'Porcentagem_v3/AmostraB11', 5, 40)
# + id="5JM_HheWtXwS"
# # exportando as tabelas em csv para o drive
# pastadriveB = '/content/drive/My Drive/Nicolas/IMG-Clusters_X_Tempo/Artigo_kmeans/compressor B/scaled/'
# t_b5_N1_s.to_csv(f'{pastadriveB}b5_N1_s.csv')
# t_b5_N3_s.to_csv(f'{pastadriveB}b5_N3_s.csv')
# t_b5_N5_s.to_csv(f'{pastadriveB}b5_N5_s.csv')
# t_b10_N1_s.to_csv(f'{pastadriveB}b10_N1_s.csv')
# t_b10_N3_s.to_csv(f'{pastadriveB}b10_N3_s.csv')
# t_b10_N5_s.to_csv(f'{pastadriveB}b10_N5_s.csv')
# t_b11_N1_s.to_csv(f'{pastadriveB}b11_N1_s.csv')
# t_b11_N3_s.to_csv(f'{pastadriveB}b11_N3_s.csv')
# t_b11_N5_s.to_csv(f'{pastadriveB}b11_N5_s.csv')
# + id="fTfiCUfxVIjT"
# t_b5_N1_ns = cria_tabelas('amostraB5', b5_1,b5_2,b5_3, 'b5', processadas,'Porcentagem_v3/AmostraB5', 1, 40)
# t_b5_N3_ns = cria_tabelas('amostraB5', b5_1,b5_2,b5_3, 'b5', processadas,'Porcentagem_v3/AmostraB5', 3, 40)
# t_b5_N5_ns = cria_tabelas('amostraB5', b5_1,b5_2,b5_3, 'b5', processadas,'Porcentagem_v3/AmostraB5', 5, 40)
# t_b7_N1_ns = cria_tabelas('amostraB7', b7_1,b7_2,b7_3, 'b7', processadas,'Porcentagem_v3/AmostraB7', 1, 40)
# t_b7_N3_ns = cria_tabelas('amostraB7', b7_1,b7_2,b7_3, 'b7', processadas,'Porcentagem_v3/AmostraB7', 3, 40)
# t_b7_N5_ns = cria_tabelas('amostraB7', b7_1,b7_2,b7_3, 'b7', processadas,'Porcentagem_v3/AmostraB7', 5, 40)
# t_b8_N1_ns = cria_tabelas('amostraB8', b8_1,b8_2,b8_3, 'b8', processadas,'Porcentagem_v3/AmostraB8', 1, 40)
# t_b8_N3_ns = cria_tabelas('amostraB8', b8_1,b8_2,b8_3, 'b8', processadas,'Porcentagem_v3/AmostraB8', 3, 40)
# t_b8_N5_ns = cria_tabelas('amostraB8', b8_1,b8_2,b8_3, 'b8', processadas,'Porcentagem_v3/AmostraB8', 5, 40)
# t_b10_N1_ns = cria_tabelas('amostraB10', b10_1,b10_2,b10_3, 'b10', processadas,'Porcentagem_v3/AmostraB10', 1, 40)
# t_b10_N3_ns = cria_tabelas('amostraB10', b10_1,b10_2,b10_3, 'b10', processadas,'Porcentagem_v3/AmostraB10', 3, 40)
# t_b10_N5_ns = cria_tabelas('amostraB10', b10_1,b10_2,b10_3, 'b10', processadas,'Porcentagem_v3/AmostraB10', 5, 40)
# t_b11_N1_ns = cria_tabelas('amostraB11', b11_1,b11_2,b11_3, 'b11', processadas,'Porcentagem_v3/AmostraB11', 1, 40)
# t_b11_N3_ns = cria_tabelas('amostraB11', b11_1,b11_2,b11_3, 'b11', processadas,'Porcentagem_v3/AmostraB11', 3, 40)
# t_b11_N5_ns = cria_tabelas('amostraB11', b11_1,b11_2,b11_3, 'b11', processadas,'Porcentagem_v3/AmostraB11', 5, 40)
# t_b12_N1_ns = cria_tabelas('amostraB12', b12_1,b12_2,b12_3, 'b12', processadas,'Porcentagem_v3/AmostraB12', 1, 40)
# t_b12_N3_ns = cria_tabelas('amostraB12', b12_1,b12_2,b12_3, 'b12', processadas,'Porcentagem_v3/AmostraB12', 3, 40)
# t_b12_N5_ns = cria_tabelas('amostraB12', b12_1,b12_2,b12_3, 'b12', processadas,'Porcentagem_v3/AmostraB12', 5, 40)
# t_b15_N1_ns = cria_tabelas('amostraB15', b15_1,b15_2,b15_3, 'b15', processadas,'Porcentagem_v3/AmostraB15', 1, 40)
# t_b15_N3_ns = cria_tabelas('amostraB15', b15_1,b15_2,b15_3, 'b15', processadas,'Porcentagem_v3/AmostraB15', 3, 40)
# t_b15_N5_ns = cria_tabelas('amostraB15', b15_1,b15_2,b15_3, 'b15', processadas,'Porcentagem_v3/AmostraB15', 5, 40)
# + id="fWWblR2XV5gr"
# pastadriveB = '/content/drive/My Drive/Nicolas/2021-08 - Cluster dados/compressor B/non_scaled/'
# t_b5_N1_ns.to_csv(f'{pastadriveB}b5_N1_ns.csv')
# t_b5_N3_ns.to_csv(f'{pastadriveB}b5_N3_ns.csv')
# t_b5_N5_ns.to_csv(f'{pastadriveB}b5_N5_ns.csv')
# t_b7_N1_ns.to_csv(f'{pastadriveB}b7_N1_ns.csv')
# t_b7_N3_ns.to_csv(f'{pastadriveB}b7_N3_ns.csv')
# t_b7_N5_ns.to_csv(f'{pastadriveB}b7_N5_ns.csv')
# t_b8_N1_ns.to_csv(f'{pastadriveB}b8_N1_ns.csv')
# t_b8_N3_ns.to_csv(f'{pastadriveB}b8_N3_ns.csv')
# t_b8_N5_ns.to_csv(f'{pastadriveB}b8_N5_ns.csv')
# t_b10_N1_ns.to_csv(f'{pastadriveB}b10_N1_ns.csv')
# t_b10_N3_ns.to_csv(f'{pastadriveB}b10_N3_ns.csv')
# t_b10_N5_ns.to_csv(f'{pastadriveB}b10_N5_ns.csv')
# t_b11_N1_ns.to_csv(f'{pastadriveB}b11_N1_ns.csv')
# t_b11_N3_ns.to_csv(f'{pastadriveB}b11_N3_ns.csv')
# t_b11_N5_ns.to_csv(f'{pastadriveB}b11_N5_ns.csv')
# t_b12_N1_ns.to_csv(f'{pastadriveB}b12_N1_ns.csv')
# t_b12_N3_ns.to_csv(f'{pastadriveB}b12_N3_ns.csv')
# t_b12_N5_ns.to_csv(f'{pastadriveB}b12_N5_ns.csv')
# t_b15_N1_ns.to_csv(f'{pastadriveB}b15_N1_ns.csv')
# t_b15_N3_ns.to_csv(f'{pastadriveB}b15_N3_ns.csv')
# t_b15_N5_ns.to_csv(f'{pastadriveB}b15_N5_ns.csv')
# + colab={"base_uri": "https://localhost:8080/", "height": 456} id="pCRfDlvVQQKY" outputId="9cd35bd8-92b6-4a5f-9492-b0e378c07808"
# t_b11_N1_ns
tabelab = pd.read_csv('/content/drive/My Drive/Nicolas/2021-08 - Cluster dados/compressor A/non_scaled/a3_N5_ns.csv')
tabelab
# + id="zi-MFY8CmiC2"
import matplotlib as mpl
# + id="YPTuu8GnSVYK"
def pontos_cluster(amostra):
ensaios_amostra = list(amostra['Ensaio'].unique())
k = 0
fig, ax = plt.subplots(len(ensaios_amostra),3, figsize=(len(ensaios_amostra)*7,len(ensaios_amostra)*5))
sns.set(palette='bright',color_codes=True,style='whitegrid')
for i in ensaios_amostra:
sns.scatterplot(x='Tempo',y='CorrenteRMS',hue='Clusters',data=amostra[(amostra['Ensaio']==i) & (amostra['Clusters']==0)], ax=ax[k,0])
ax[k,0].set_title(i)
ax[k,0].set_ylabel('CorrenteRMS [A]')
ax[k,0].set_xlabel('Tempo [h]')
sns.scatterplot(x='Tempo',y='CorrenteRMS',hue='Clusters',data=amostra[(amostra['Ensaio']==i) & (amostra['Clusters']==1)], ax=ax[k,1])
ax[k,1].set_title(i)
ax[k,1].set_ylabel('CorrenteRMS [A]')
ax[k,1].set_xlabel('Tempo [h]')
sns.scatterplot(x='Tempo',y='CorrenteRMS',hue='Clusters',data=amostra[(amostra['Ensaio']==i) & (amostra['Clusters']==2)], ax=ax[k,2])
ax[k,2].set_title(i)
ax[k,2].set_ylabel('CorrenteRMS [A]')
ax[k,2].set_xlabel('Tempo [h]')
# plt.savefig(f'Gráfico de dispersão por cluster {i}')
k=k+1
fig.tight_layout()
plt.show()
# + id="to3TTKLYlGIk"
def plot_amostra(amostra, coluna, colunalegenda):
ensaios_amostra = list(amostra['Ensaio'].unique())
ensaiosl = ["Não amaciado","Amaciado 1", "Amaciado 2"]
k = 0
fig, ax = plt.subplots(1,3, figsize=(14,5))
sns.set(palette='bright',color_codes=True,style='whitegrid', font_scale=1.2)
palettes ={0: "C0", 1: "C1", 2: "C2"}
hue_order = [0,1,2]
legenda = {0: "Grupo 0", 1: "Grupo 1", 2: "Grupo 2"}
leg = ["Grupo 0", "Grupo 1", "Grupo 2"]
for i in ensaios_amostra:
sns.scatterplot(x='Tempo',y=coluna,hue='Clusters',hue_order=hue_order,data=amostra[amostra['Ensaio']==i], ax=ax[k], palette=palettes)
ax[k].set_title(ensaiosl[k])
ax[k].set_xlabel('Tempo [h]')
if k == 0:
ax[k].set_ylabel(f'{colunalegenda}')
# ax[k].set_yticklabels(['{:,}'.format(int(x)) for x in ax[k].get_yticks().tolist()])
else:
ax[k].set_yticklabels([])
ax[k].set_ylabel("")
# ax[k].legend(leg)
# sns.lineplot(x='Tempo',y='CorrenteRMS',hue='Clusters', data=amostra[amostra['Ensaio']==i], ax=ax[k,1])
# ax[k,1].set_title(i)
# ax[k,1].set_ylabel('CorrenteRMS [A]')
# ax[k,1].set_xlabel('Tempo [h]')
# plt.savefig(f' Gráfico de linha e dispersão da amostra {i}')
k=k+1
# fig.tight_layout()
plt.tight_layout()
plt.show()
# + id="yUgHRuAEuezt"
# rel = aplica_tratamento(a3_1,a3_2,a3_3,"a3", 5, 20)
# rot,_ = dif_medias(rel,"CorrenteRMS",'a3_m1_1',5,1,1)
rel = aplica_tratamento(a2_1,a2_2,a2_3,"a2", 1, 20)
rot,_ = dif_medias(rel,"CorrenteRMS",'a2_m1_1',1,1,1)
rel4 = aplica_tratamento(a4_1,a4_2,a4_3,"a4", 5,20)
rot4,_ = dif_medias(rel4,"CorrenteRMS",'a4_m1_1',5,1,1)
rel
# + id="CN-5dVD-7D25"
def rel_pct_cluster(amostra,ensaio1,ensaio2,ensaio3):
''' amostra1, amostra2, amostra3, amostra4: são os ensaios para diferentes médias a ser analisadas
nomeamostra: nome do ensaio que vai ser gerado o plot ex: a3_1_N
titulo: titulo do gráfico
pasta: pasta que vai ser armazenada o gráfico
'''
# Configurações para plot dos gráficos
fig, ax = plt.subplots(1,3,figsize=(14,5))
sns.set(palette='bright',color_codes=True,style='whitegrid', font_scale=1.2)
palettes ={0: "C0", 1: "C1", 2: "C2"}
hue_order = [0,1,2]
plt.xticks(rotation=70)
# Retorna o primeiro ensaio passado na função e
teste = amostra[amostra['Ensaio'] == ensaio1].copy()
# Tranforma o formato da coluna Tempo para integer, de forma que os tempos fiquem discretizados
teste['Tempo'] = teste['Tempo'].astype(int)
# Filtra e retorna para cada tempo o valor total de cada cluster
teste = teste.groupby('Tempo')['Clusters'].value_counts()
# Cria uma nova coluna chamada quantidade para armazanar a quantidade de cada cluster
teste = teste.reset_index(name='Quantidade')
#faz a soma do total de cluster em cada hora e armazena em uma lista
teste1 = teste.groupby('Tempo')['Quantidade'].sum()
# faz a fusão da coluna com o total de cada cluster com a coluna com a soma do total de clusters
teste = teste.merge(teste1, on='Tempo')
# cria uma coluna chamada porcentagem que retorna a porcentagem do total de cluster em relação a soma do total de clusters
teste['porcentagem'] = (teste.Quantidade_x / teste.Quantidade_y)*100
teste2 = amostra[amostra['Ensaio'] == ensaio2].copy()
teste2['Tempo'] = teste2['Tempo'].astype(int)
teste2 = teste2.groupby('Tempo')['Clusters'].value_counts()
teste2 = teste2.reset_index(name='Quantidade')
teste3 = teste2.groupby('Tempo')['Quantidade'].sum()
teste2 = teste2.merge(teste3, on='Tempo')
teste2['porcentagem'] = (teste2.Quantidade_x / teste2.Quantidade_y)*100
teste4 = amostra[amostra['Ensaio'] == ensaio3].copy()
teste4['Tempo'] = teste4['Tempo'].astype(int)
teste4 = teste4.groupby('Tempo')['Clusters'].value_counts()
teste4 = teste4.reset_index(name='Quantidade')
teste5 = teste4.groupby('Tempo')['Quantidade'].sum()
teste4 = teste4.merge(teste5, on='Tempo')
teste4['porcentagem'] = (teste4.Quantidade_x / teste4.Quantidade_y)*100
# função para gerar o gráfico com eixo x: Tempo, eixo y: Porcentagem, filtrando os valores pelo cluster classificado
sns.barplot(x= teste['Tempo'], y = teste['porcentagem'], hue=teste["Clusters"],hue_order=hue_order, ax=ax[0], palette=palettes)
ax[0].set_title('Não amaciado')
ax[0].set_xlabel('Tempo [h]')
ax[0].set_ylabel('Porcentagem [%]')
ax[0].tick_params(axis='x', rotation=50)
sns.barplot(x= teste2['Tempo'], y = teste2['porcentagem'], hue=teste2["Clusters"],hue_order=hue_order, ax=ax[1], palette=palettes)
ax[1].set_title('Amaciado 1')
ax[1].set_xlabel('Tempo [h]')
ax[1].set_ylabel('')
ax[1].set_yticklabels([])
ax[1].tick_params(axis='x', rotation=50)
sns.barplot(x= teste4['Tempo'], y = teste4['porcentagem'], hue=teste4["Clusters"], hue_order=hue_order,ax=ax[2], palette=palettes)
ax[2].set_title('Amaciado 2')
ax[2].set_xlabel('Tempo [h]')
ax[2].set_ylabel('')
ax[2].set_yticklabels([])
ax[2].tick_params(axis='x', rotation=50)
plt.tight_layout()
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 360} id="nKvHJyQz6aiE" outputId="d006881f-25ff-465f-9b2b-431c69ab6439"
plot_amostra(rot, "CorrenteRMS", "Valor eficaz da corrente [A]")
# + colab={"base_uri": "https://localhost:8080/", "height": 360} id="iJRe7fC6H02e" outputId="c7c23957-4da2-407b-a69d-060059cf17ae"
rel_pct_cluster(rot,"a3_1_N", "a3_2_A","a3_3_A")
# + colab={"base_uri": "https://localhost:8080/", "height": 360} id="bdSRSNojvAd3" outputId="99645fb4-cc5f-4b39-865b-c010561bea00"
plot_amostra(rot4, "CorrenteRMS", "Valor eficaz da corrente [A]")
# + colab={"base_uri": "https://localhost:8080/", "height": 360} id="N_0GMqNVIctb" outputId="2b21bae5-d1dd-4d96-8776-2a311e55fbc0"
rel_pct_cluster(rot4,"a4_1_N", "a4_2_A","a4_3_A")
# + id="TAtmpgIN39vZ"
# Colunas usadas para análise
processadas = ['CorrenteRMS', 'CorrenteVariancia', 'CorrenteCurtose', 'VibracaoCalotaInferiorRMS', 'VibracaoCalotaSuperiorRMS',
'VibracaoCalotaInferiorCurtose','VibracaoCalotaSuperiorCurtose',
'VibracaoCalotaInferiorVariancia', 'VibracaoCalotaSuperiorVariancia', 'Vazao']
# + [markdown] id="TFjhmpt32lXF"
# Comparativo entre grandezas
#
# + id="t7C58fiY2njl"
corr4 = aplica_tratamento(a4_1,a4_2,a4_3,"a4", 5, 20)
corrfim4,_ = dif_medias(corr4,"CorrenteRMS",'a4_m1_1',5,1,1)
vibinf4 = aplica_tratamento(a4_1,a4_2,a4_3,"a4", 5, 20)
vibinffim4,_ = dif_medias(vibinf4,"VibracaoCalotaInferiorRMS",'a4_m1_1',5,1,1)
vibsup4 = aplica_tratamento(a4_1,a4_2,a4_3,"a4", 5, 20)
vibsupfim4,_ = dif_medias(vibsup4,"VibracaoCalotaSuperiorRMS",'a4_m1_1',5,1,1)
vaz4 = aplica_tratamento(a4_1,a4_2,a4_3,"a4", 5, 20)
vazfim4,_ = dif_medias(vaz4,"Vazao",'a4_m1_1',5,1,1)
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="_A7xcJY44ODu" outputId="d88e9eee-e2b3-4e09-ab04-6bdac6ea1366"
plot_amostra(corrfim4, "CorrenteRMS", "Valor eficaz da corrente [A]")
plot_amostra(vibinffim4, "VibracaoCalotaInferiorRMS", "Valor eficaz da vibração inferior [g]")
plot_amostra(vibsupfim4, "VibracaoCalotaSuperiorRMS", "Valor eficaz da vibração superior [g]")
plot_amostra(vazfim4, "Vazao", "Vazão [Kg/h]")
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="3KIBBg_T4abs" outputId="2d92412d-5517-4166-b7c3-d64f2b963957"
rel_pct_cluster(corrfim4,"a4_1_N", "a4_2_A","a4_3_A")
rel_pct_cluster(vibinffim4,"a4_1_N", "a4_2_A","a4_3_A")
rel_pct_cluster(vibsupfim4,"a4_1_N", "a4_2_A","a4_3_A")
rel_pct_cluster(vazfim4,"a4_1_N", "a4_2_A","a4_3_A")
# + id="iWbDs5_S4ra_"
corr3 = aplica_tratamento(a3_1,a3_2,a3_3,"a3", 5, 20)
corrfim3,_ = dif_medias(corr3,"CorrenteRMS",'a3_m1_1',5,1,1)
vibinf3 = aplica_tratamento(a3_1,a3_2,a3_3,"a3", 5, 20)
vibinffim3,_ = dif_medias(vibinf3,"VibracaoCalotaInferiorRMS",'a3_m1_1',5,1,1)
vibsup3 = aplica_tratamento(a3_1,a3_2,a3_3,"a3", 5, 20)
vibsupfim3,_ = dif_medias(vibsup3,"VibracaoCalotaSuperiorRMS",'a3_m1_1',5,1,1)
vaz3 = aplica_tratamento(a3_1,a3_2,a3_3,"a3", 5, 20)
vazfim3,_ = dif_medias(vaz3,"Vazao",'a3_m1_1',5,1,1)
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="PGxInQ1r6ad3" outputId="4c6ea6a2-3203-42a8-e36f-f86e6e90dd51"
plot_amostra(corrfim3, "CorrenteRMS", "Valor eficaz da corrente [A]")
plot_amostra(vibinffim3, "VibracaoCalotaInferiorRMS", "Valor eficaz da vibração inferior [g]")
plot_amostra(vibsupfim3, "VibracaoCalotaSuperiorRMS", "Valor eficaz da vibração superior [g]")
plot_amostra(vazfim3, "Vazao", "Vazão [Kg/h]")
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="m-9TlDcP6ad4" outputId="f0e6ec2c-a2e2-4c9c-c5a1-15eaf34f1c0d"
rel_pct_cluster(corrfim3,"a3_1_N", "a3_2_A","a3_3_A")
rel_pct_cluster(vibinffim3,"a3_1_N", "a3_2_A","a3_3_A")
rel_pct_cluster(vibsupfim3,"a3_1_N", "a3_2_A","a3_3_A")
rel_pct_cluster(vazfim3,"a3_1_N", "a3_2_A","a3_3_A")
# + id="fALpy7_x7aPg"
def calcular_wcss(data):
wcss = []
for k in range(1,10):
kmeans = KMeans(n_clusters = k)
kmeans.fit(X=data)
data['Clusters']=kmeans.labels_
wcss.append(kmeans.inertia_)
return wcss
# + id="i9CfWIrlQAgA"
amostra = aplica_tratamento(a2_1,a2_2,a2_3,'a2', 1, 20)
# + colab={"base_uri": "https://localhost:8080/", "height": 339} id="yS07p7GoLxW6" outputId="eb559ee6-e2df-472a-c17a-268d7f68f5f7"
warnings.simplefilter('ignore')
soma_quadrados = calcular_wcss(amostra[['CorrenteRMS_MA_1']])
plt.figure(figsize=(10,5))
plt.plot(soma_quadrados)
plt.xlabel('Número de clusters')
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 328} id="FLn4i2JmRUEJ" outputId="e048dfb5-e13c-4e3e-8879-a7ac5178bf73"
pd.DataFrame(soma_quadrados)
# + id="yJe3mhDRSGUT"
# pastadrive = '/content/drive/My Drive/Nicolas/2021-08 - Cluster dados/'
# pd.DataFrame(soma_quadrados).to_csv(f'{pastadrive}A2_D1_M1_N1_elbow.csv')
|
notebooks/artigo_kmeans.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Example of using BigQuery and Pandas together
#
# This notebook illustrates a typical data science workflow that leverages the relative advantages of both Pandas and BigQuery:
# * Pandas for transformations and graphics.
# * BigQuery for scale
#
# ## Install library and extensions if needed
#
# On Notebook instances on Google Cloud, the BigQuery client library is already installed.
# #!python -m pip install google-cloud-bigquery
# %matplotlib inline
# #%load_ext google.cloud.bigquery
PROJECT='cloud-training-demos' # CHANGE THIS
#
# ## Problem
#
# We believe that if someone rents a bicycle for less than 10 minutes and returns the bicycle to the same station that they rented it at, it is likely that the bicycle has a problem. We'll call this a "bad" trip.
#
# We want to send a crew out to examine a few of the stations that had lots of bad trips to see if there are any systemic problems.
# ### Find stations with problem bikes
# As a first step, let's find which stations had the most "bad" trips in 2015
# %%bigquery badtrips --project $PROJECT
SELECT *, bad_trips / num_trips AS fraction_bad FROM (
SELECT
start_station_name
, SUM(IF(duration < 600 AND start_station_name = end_station_name, 1, 0)) AS bad_trips
, COUNT(start_station_name) as num_trips
FROM `bigquery-public-data`.london_bicycles.cycle_hire
WHERE EXTRACT(YEAR FROM start_date) = 2015
GROUP BY start_station_name
HAVING num_trips > 10
)
ORDER BY fraction_bad DESC
# ### Statistics
#
# Let's look at the statistics of the dataset to learn what kind of threshold of the fraction_bad is appropriate
badtrips.describe()
# Looks like the fraction_bad ranges from 0 to 0.4, but is not clear how relevant this ratio is because the stations also vary quite dramatically. For example, the number of trips ranges from 11 to 95740. A 0.4 fraction_bad is not terrible if num_trips=11.
#
# Let's look at a scatter plot to see if there is any clear trend here.
badtrips.plot.scatter('num_trips', 'fraction_bad');
# Let's zoom in a bit and add a line of best fit
import seaborn as sns
ax = sns.regplot(badtrips['num_trips'],badtrips['fraction_bad']);
ax.set_ylim(0, 0.05);
# Indeed, we see that higher fraction_bad are associated with lower num_trips. We need to use the fraction_bad carefully.
# ### Banding
# Let's pick the 5 worst of the really busy stations, 5 of the next most busy, etc.
# We can do this by creating
# 4 different bands from the quantile of the station by num_trips, and within each band, finding the 5 stations that are most bad.
# +
stations_to_examine = []
import pandas as pd
for band in range(1,5):
min_trips = badtrips['num_trips'].quantile(0.2*(band))
max_trips = badtrips['num_trips'].quantile(0.2*(band+1))
query = 'num_trips >= {} and num_trips < {}'.format(min_trips, max_trips)
print(query) # band
stations = badtrips.query(query)
stations = stations.sort_values(by=['fraction_bad'], ascending=False)[:5]
print(stations) # 5 worst
stations_to_examine.append(stations)
print()
# -
# Notice how, by banding, we are able to use lower thresholds for the busier stations. Had we chosen a single threshold, our crew would have not get to look at a diverse set of stations.
# ### Stations to examine
stations_to_examine = pd.concat(stations_to_examine)
stations_to_examine
# ### Write the stations to a new table in BigQuery
# !pip install pyarrow
# +
from google.cloud import bigquery
bq = bigquery.Client(project=PROJECT)
table_id = '{}.ch05eu.bad_bikes'.format(PROJECT)
job = bq.load_table_from_dataframe(stations_to_examine, table_id)
job.result() # blocks and waits
print("Loaded {} rows into {}".format(job.output_rows, table_id))
# -
# ### Plot map for crew
# Add latitude and longitude through a join
# %%bigquery stations_to_examine --project $PROJECT
SELECT
start_station_name AS station_name
, num_trips
, fraction_bad
, latitude
, longitude
FROM ch05eu.bad_bikes AS bad
JOIN `bigquery-public-data`.london_bicycles.cycle_stations AS s
ON bad.start_station_name = s.name
# !pip install folium
import folium
map_pts = folium.Map(location=[51.5, -0.15], zoom_start=12)
for idx, row in stations_to_examine.iterrows():
folium.Marker( location=[row['latitude'], row['longitude']], popup=row['station_name'] ).add_to(map_pts)
map_pts
# Copyright 2019 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License
|
05_devel/pandas.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Exp 8 analysis
#
# See `./informercial/Makefile` for experimental
# details.
# +
import os
import numpy as np
from IPython.display import Image
import matplotlib
import matplotlib.pyplot as plt
# %matplotlib inline
# %config InlineBackend.figure_format = 'retina'
import seaborn as sns
sns.set_style('ticks')
matplotlib.rcParams.update({'font.size': 16})
matplotlib.rc('axes', titlesize=16)
from infomercial.exp import meta_bandit
from infomercial.local_gym import bandit
from infomercial.exp.meta_bandit import load_checkpoint
import gym
# +
# # ls ../data/exp2*
# -
# # Load and process data
data_path ="/Users/qualia/Code/infomercial/data/"
exp_name = "exp8"
num_exps = 50
num_episodes = 10000
env_names = [
"BanditOneHigh2-v0",
"BanditOneHigh10-v0",
"BanditOneHigh121-v0",
"BanditOneHigh1000-v0",
"BanditHardAndSparse2-v0",
"BanditHardAndSparse10-v0",
"BanditHardAndSparse121-v0",
"BanditHardAndSparse1000-v0"
]
# +
# Gather traces by bandit: scores, Qs in a big numpy array (n_exp, n_episodes)
scores_E = {}
scores_R = {}
values_E = {}
values_R = {}
controlling = {}
actions = {}
best = {}
for env in env_names:
# Preallocate the arrays for this env
scores_E[env] = np.zeros((num_episodes, num_exps))
scores_R[env] = np.zeros((num_episodes, num_exps))
values_E[env] = np.zeros((num_episodes, num_exps))
values_R[env] = np.zeros((num_episodes, num_exps))
controlling[env] = np.zeros((num_episodes, num_exps))
actions[env] = np.zeros((num_episodes, num_exps))
best[env] = None
# Load and repackage
for n in range(num_exps):
result = load_checkpoint(os.path.join(data_path, f"{exp_name}_{env}_{n+1}.pkl"))
scores_E[env][:, n] = result["scores_E"]
scores_R[env][:, n] = result["scores_R"]
values_E[env][:, n] = result["values_E"]
values_R[env][:, n] = result["values_R"]
controlling[env][:, n] = result["policies"]
actions[env][:, n] = result["actions"]
best[env] = result["best"]
# -
#scores_E
best
# +
# Gather stats by bandit: total R, p_best[-100:-1], Avg score
total_R = {}
for env in env_names:
total_R[env] = np.zeros(num_exps)
for n in range(num_exps):
total = scores_R[env][:, n].sum()
total_R[env][n] = total
# Est. prob. that the action was correct.
p_best = {}
for env in env_names:
b = best[env]
p_best[env] = np.zeros(num_episodes)
for i in range(num_episodes):
actions_i = actions[env][i,:]
p_best[env][i] = np.sum(actions_i == b) / actions_i.size
# Avg scores
avg_scores_E = {}
avg_scores_R = {}
for env in env_names:
avg_scores_E[env] = np.zeros(num_episodes)
avg_scores_R[env] = np.zeros(num_episodes)
for i in range(num_episodes):
s_E_i = scores_E[env][i,:]
s_R_i = scores_R[env][i,:]
avg_scores_E[env][i] = s_E_i.mean()
avg_scores_R[env][i] = s_R_i.mean()
# -
avg_scores_E[env].shape
# # Learning performance
#
# For each bandit separatly.
def plot_env_performance(plot_names):
episodes = list(range(num_episodes))
tie_threshold = 1e-8
for env in plot_names:
# ---------------------
# Re-init figure
fig = plt.figure(figsize=(5, 6))
fig.suptitle(f"{env}")
grid = plt.GridSpec(3, 1, wspace=0.3, hspace=0.6)
# ---------------------
# Plot actions
plt.subplot(grid[0, 0])
b = best[env]
for n in range(num_exps):
a = actions[env][:, n]
plt.scatter(episodes, a, color="black", alpha=.1, s=2, label="Bandit")
# plot best
plt.plot(episodes, np.repeat(b, np.max(episodes)+1), color="red", alpha=0.8, ls='--', linewidth=2)
plt.ylim(-.1, np.max(a)+1.1)
plt.ylabel("Arm choice")
plt.xlabel("Episode")
# ---------------------
# plot p_best
plt.subplot(grid[1, 0])
plt.scatter(episodes, p_best[env], color="red", alpha=0.4, s=2, label="E")
plt.ylim(0,1)
plt.ylabel("p(best)")
plt.xlabel("Episode")
# --------------------
# Plot scores
plt.subplot(grid[2, 0])
for n in range(num_exps-1):
s_E = scores_E[env][:,n]
s_R = scores_R[env][:,n]
plt.scatter(episodes, s_E, color="purple", alpha=0.01, s=.1)
plt.scatter(episodes, s_R, color="grey", alpha=0.01, s=.1)
# Last one w/ labels for the legend
s_E = scores_E[env][:,-1]
s_R = scores_R[env][:,-1]
plt.scatter(episodes, s_E, color="purple", alpha=0.01, s=.1, label="E")
plt.scatter(episodes, s_R, color="grey", alpha=0.01, s=.1, label="R")
# Add epsilon
plt.plot(episodes, np.repeat(tie_threshold, np.max(episodes)+1),
color="violet", alpha=0.8, ls='--', linewidth=2)
plt.ylabel("log score")
plt.xlabel("Episode")
plt.semilogy()
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5))
_ = sns.despine()
# ## One high
# +
plot_names = env_names[0:4]
print(plot_names)
plot_env_performance(plot_names)
# -
# ## Sparse
# +
plot_names = env_names[4:8]
print(plot_names)
plot_env_performance(plot_names)
# -
# # Performance summary
#
# For all bandits.
# +
# # For all bandits plot total reward,
# # with a line added to indicate
# # the max. exp. total value
# # Convert to df
# df_total_R = None
# sns.boxplot(x="env", y="total", data=df_total_R, palette="Set2")
# sns.swarmplot(x="env", y="total", data=df_total_R, color=".25")
# _ = sns.despine()
# +
# # For all bandits plot avg p_best for last 100 episodes
# # Convert to df
# df_p_pest = None
# sns.boxplot(x="env", y="p", data=df_p_pest, palette="Set2")
# sns.swarmplot(x="env", y="p", data=df_p_pest, color=".25")
# _ = sns.despine()
# -
|
notebooks/exp8_analysis.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
### distance dependent state transition demo
# In this model the transition probability depends on the distance
# between the latent locations associated with each discrete state.
# Specifically, each state k has a location, \ell_k \in R^J, and
# the probability of transitioning is
# Pr(z_t = k | z_{t-1} = k')
# \propto exp(-||\ell_k - \ell_{k'}||/L) if k \neq k'
# \propto p_kk if k = k'
# where L is a length scale that we treat as a hyperparameter.
#
# Here the observations are marked point processes.
# +
#import os
#os.chdir("/Users/scott/Projects/ssm-xinyi")
import autograd.numpy as np
import autograd.numpy.random as npr
import matplotlib.pyplot as plt
from scipy.linalg import orthogonal_procrustes, norm
#matplotlib inline
import seaborn as sns
sns.set_style("white")
sns.set_context("talk")
from ssm.models import HMM
from ssm.util import find_permutation
#### clusterless HMM
#### input data format: tensor T by D0 by 1+D1
#### spikes = datas[:, :, 0]
#### marks = datas[:, :, 1:]
### e.g. parameters
npr.seed(1)
# +
#### Generate an HMM
# Set the parameters of the HMM
T = 2000 # number of time bins
K = 6 # number of discrete states, e.g. location
### Poisson
D0 = 10 # number of tetrodes
D1 = 50 # max number of events (Poisson)
D2 = 3 # spike count dimension (per event 0 or 1) + two mark dimensions
D = (D0, D1, D2) # tuple
# Make an HMM
true_hmm = HMM(K, D, transitions='distance', observations="clusterless")
# Sample some data from the HMM
z, y = true_hmm.sample(T)
# likelihood
true_ll = true_hmm.log_probability(y)
spikes = np.sum(y[:, :, :, 0], axis=2) # T by D0
marks = y[:, :, :, 1:] # T by D0 by D2 by D3
### true parameters ############
L = true_hmm.transitions.L # length scale/smoothing
ell = true_hmm.transitions.ell # latent 2D space
log_p = true_hmm.params[1][1]
prr = np.exp(log_p) # diagonal prob
Ps_dist = np.sqrt(np.sum((ell[:, :, None] - ell[:, :, None].T) ** 2, axis = 1))
log_P = -Ps_dist / L
log_P += np.diag(log_p)
Ps = np.exp(log_P)
Ps /= Ps.sum(axis=1, keepdims=True)
# +
### plot simulated data
##### ground spike trains
plt.figure(figsize=(8, 5)) # width, height
plt.plot(spikes + 3 * np.arange(D0), '-', linewidth=5)
plt.xlim(0, 60)
plt.ylim(0, 3*D0+8)
plt.title("spiking activity", fontsize=32)
plt.ylabel('tetrode id', fontsize=32)
plt.xticks(np.arange(0, 60, step=20), fontsize=32)
plt.yticks([])
for spine in plt.gca().spines.values():
spine.set_visible(False)
plt.show()
# +
##### marked point processes
plt.figure(figsize=(12, 9)) # width, height
plt.subplot(411)
plt.imshow(np.transpose(marks[:, 2, :, 0]), cmap="Spectral", vmin=-1.5, vmax=1.5, aspect="auto")
plt.xlim(0, 60)
plt.ylim(0, D1/2)
plt.title("marks, tetrode = 1, dim = 1", fontsize=32)
plt.yticks([])
plt.xticks([])
plt.ylabel("spike", fontsize=32)
for spine in plt.gca().spines.values():
spine.set_visible(False)
plt.subplot(412)
plt.imshow(np.transpose(marks[:, 2, :, 1]), cmap="Spectral", vmin=-1.5, vmax=1.5, aspect="auto")
plt.xlim(0, 60)
plt.ylim(0, D1/2)
plt.title("marks, tetrode = 1, dim = 2", fontsize=32)
plt.yticks([]);
plt.xticks([]);
plt.ylabel("spike", fontsize=32)
for spine in plt.gca().spines.values():
spine.set_visible(False)
plt.subplot(413)
plt.imshow(np.transpose(marks[:, 3, :, 0]), cmap="Spectral", vmin=-1.5, vmax=1.5, aspect="auto")
plt.xlim(0, 60)
plt.ylim(0, D1/2)
plt.title("marks, tetrode = 2, dim = 1", fontsize=32)
plt.yticks([]);
plt.xticks([]);
plt.ylabel("spike", fontsize=32)
for spine in plt.gca().spines.values():
spine.set_visible(False)
plt.subplot(414)
plt.imshow(np.transpose(marks[:, 3, :, 1]), cmap="Spectral", vmin=-1.5, vmax=1.5, aspect="auto")
plt.xlim(0, 60)
plt.ylim(0, D1/2)
plt.title("marks, tetrode = 2, dim = 2", fontsize=32)
plt.yticks([]);
plt.xticks([]);
plt.ylabel("spike", fontsize=32)
for spine in plt.gca().spines.values():
spine.set_visible(False)
plt.show()
# +
###### mark space
plt.figure(figsize=(8*1.5, 6))
plt.subplot(121)
a1 = marks[:, 2, :, 0]
a2 = marks[:, 2, :, 1]
b1 = np.reshape(a1, (np.product(a1.shape),))
b2 = np.reshape(a2, (np.product(a1.shape),))
plt.plot(b1,b2,'.g')
plt.title("mark space, tetrode = 1", fontsize=32)
plt.xlabel('mark dim = 1', fontsize=32)
plt.ylabel('mark dim = 2', fontsize=32)
plt.xlim(-3, 3)
plt.ylim(-3, 3)
plt.xticks(np.arange(-3, 3.01, step=2), fontsize=28)
plt.yticks(np.arange(-3, 3.01, step=2), fontsize=28)
for spine in plt.gca().spines.values():
spine.set_visible(False)
plt.subplot(122)
a1 = marks[:, 3, :, 0]
a2 = marks[:, 3, :, 1]
b1 = np.reshape(a1, (np.product(a1.shape),))
b2 = np.reshape(a2, (np.product(a1.shape),))
plt.plot(b1,b2,'.y')
plt.title("mark space, tetrode = 2", fontsize=32)
plt.xlabel('mark dim = 1', fontsize=32)
plt.ylabel('mark dim = 2', fontsize=32)
plt.xlim(-3, 3)
plt.ylim(-3, 3)
plt.xticks(np.arange(-3, 3.01, step=2), fontsize=28)
plt.yticks(np.arange(-3, 3.01, step=2), fontsize=28)
for spine in plt.gca().spines.values():
spine.set_visible(False)
plt.show()
# +
### ############################################### ###
### Fit an HMM to this synthetic data ###
### ############################################### ###
N_iters = 30
hmm = HMM(K, D, transitions="distance", observations="clusterless")
hmm_lls = hmm.fit(y, method="em", num_em_iters=N_iters)
#hmm_lls = hmm.fit(y)
# Find a permutation of the states that best matches the true and inferred states
hmm.permute(find_permutation(z, hmm.most_likely_states(y)))
### AssertionError ssm\util\line 41
hmm_z = hmm.most_likely_states(y)
# +
##### ####################
### parameter estimates #################
L_hmm = hmm.transitions.L # length scale/smoothing
ell_hmm = hmm.transitions.ell # latent 2D space
log_p_hmm = hmm.params[1][1]
prr_hmm = np.exp(log_p_hmm) # diagonal prob
Ps_dist_hmm = np.sum((ell_hmm[:, :, None] - ell_hmm[:, :, None].T) ** 2, axis = 1)
log_P_hmm = -Ps_dist_hmm / L_hmm
log_P_hmm += np.diag(log_p_hmm)
Ps_hmm = np.exp(log_P_hmm)
Ps_hmm /= Ps_hmm.sum(axis=1, keepdims=True)
# +
##### ####################
##### plot fitted results
######### LogL
plt.figure(figsize=(8, 4))
plt.plot(hmm_lls, label="EM")
plt.plot([0, N_iters], true_ll * np.ones(2), ':k', label="True")
plt.xticks(np.arange(0, N_iters, step=40), fontsize=32)
plt.xlabel("EM iteration", fontsize=32)
plt.ylabel("log probability", fontsize=32)
plt.legend(loc="lower right", fontsize=32)
for spine in plt.gca().spines.values():
spine.set_visible(False)
plt.show()
# -
######## Zs
plt.figure(figsize=(8, 4))
plt.subplot(211)
plt.imshow(z[None,:], cmap='Paired', vmin=0, vmax=D0, aspect="auto")
plt.xlim(0, 60)
plt.ylabel("$z_{\\mathrm{true}}$", fontsize=32)
plt.yticks([])
plt.xticks([])
for spine in plt.gca().spines.values():
spine.set_visible(False)
plt.subplot(212)
plt.imshow(hmm_z[None,:], cmap='Paired', vmin=0, vmax=D0, aspect="auto")
plt.xlim(0, 60)
plt.ylabel("$z_{\\mathrm{inferred}}$", fontsize=32)
plt.yticks([])
plt.xticks(np.arange(0, 60, step=20), fontsize=32)
plt.xlabel("time bin", fontsize=32)
for spine in plt.gca().spines.values():
spine.set_visible(False)
plt.show()
# +
###### Ps
plt.figure(figsize=(12, 4))
plt.subplot(121)
plt.imshow(true_hmm.transitions.transition_matrix, cmap="autumn_r", vmin=0, vmax=1, aspect="equal")
plt.xlim(-.5,K-.5)
plt.ylim(K-.5,-.5)
plt.yticks(np.arange(0, K, step=2), fontsize=32)
plt.xticks(np.arange(0, K, step=2), fontsize=32)
plt.title("true \n state transition", fontsize=32)
plt.xlabel("$z_{\\mathrm{true},t+1}$", fontsize=32)
plt.ylabel("$z_{\\mathrm{true},t}$", fontsize=32)
for spine in plt.gca().spines.values():
spine.set_visible(False)
plt.subplot(122)
plt.imshow(hmm.transitions.transition_matrix, cmap="autumn_r", vmin=0, vmax=1, aspect="equal")
plt.xlim(-.5,K-.5)
plt.ylim(K-.5,-.5)
plt.yticks(np.arange(0, K, step=2), fontsize=32)
plt.xticks(np.arange(0, K, step=2), fontsize=32)
plt.title("inferred \n state transition", fontsize=32)
plt.xlabel("$z_{\\mathrm{inferred},t+1}$", fontsize=32)
plt.ylabel("$z_{\\mathrm{inferred},t}$", fontsize=32)
for spine in plt.gca().spines.values():
spine.set_visible(False)
plt.subplots_adjust(top=0.88)
plt.show()
# +
##### ell
### orthogonal transformation
def orthogonal_transformation(A, B):
# A: input matrix
# B: target matrix
A_mu = A - A.mean(axis=0)
B_mu = B - B.mean(axis=0)
R, s = orthogonal_procrustes(A_mu, B_mu)
scale = s / np.square(norm(A_mu))
A2 = scale * np.dot(A_mu, R) + B.mean(axis=0)
return A2
ell_hmm_ot = orthogonal_transformation(ell_hmm, ell)
plt.figure(figsize=(12, 3))
plt.subplot(131)
plt.scatter(ell[:,0], ell[:,1],
c=range(K), cmap="Set1", marker="*", s=250, edgecolors='k', alpha=1)
plt.title("true \n latent space \n L = %.2f" % L, fontsize=32)
plt.xlim(-3, 3)
plt.ylim(-3, 3)
plt.xticks(np.arange(-3, 3.01, step=2), fontsize=28)
plt.yticks(np.arange(-3, 3.01, step=2), fontsize=28)
plt.gca().set_aspect('equal', adjustable='box')
for spine in plt.gca().spines.values():
spine.set_visible(False)
plt.subplot(132)
plt.scatter(ell_hmm[:,0], ell_hmm[:,1],
c=range(K), cmap="Set1", marker="o", s=250, edgecolors='k', alpha=1)
plt.title("inferred \n latent space \n L = %.2f" % L_hmm, fontsize=32)
plt.xlim(-3, 3)
plt.ylim(-3, 3)
plt.xticks(np.arange(-3, 3.01, step=2), fontsize=28)
plt.yticks(np.arange(-3, 3.01, step=2), fontsize=28)
plt.gca().set_aspect('equal', adjustable='box')
for spine in plt.gca().spines.values():
spine.set_visible(False)
plt.subplot(133)
plt.scatter(ell[:,0], ell[:,1],
c=range(K), cmap="Set1", marker="*", s=250, edgecolors='k', alpha=.8)
plt.scatter(ell_hmm_ot[:,0], ell_hmm_ot[:,1],
c=range(K), cmap="Set1", marker="o", s=250, edgecolors='k', alpha=.7)
plt.xlim(-3, 3)
plt.ylim(-3, 3)
plt.xticks(np.arange(-3, 3.01, step=2), fontsize=28)
plt.yticks(np.arange(-3, 3.01, step=2), fontsize=28)
plt.gca().set_aspect('equal', adjustable='box')
for spine in plt.gca().spines.values():
spine.set_visible(False)
plt.subplots_adjust(top=0.92)
plt.show()
# +
### ############################################################################ ###
### Use true ell and Ps, calculate likelihood curve for L of a simulated SWR ###
### ############################################################################ ###
import copy
#### simulate an SWR from the true model with a scaled-up L
T_swr = 500
L_swr = 5
swr_hmm = copy.deepcopy(true_hmm)
swr_hmm.transitions.L = L_swr
#### increase the firing rate a bit
#log_lambdas_swr = np.log(np.exp(hmm.params[2][0][:]) * 1.2)
#for state_i in range(K):
# swr_hmm.params[2][0][state_i] = log_lambdas_swr[state_i]
z_swr, y_swr = swr_hmm.sample(T_swr)
spikes_swr = np.sum(y_swr[:, :, :, 0], axis=2) # T by D0
marks_swr = y_swr[:, :, :, 1:] # T by D0 by D2 by D3
### find MLE for L
copy_hmm = copy.deepcopy(hmm)
list_L = np.exp(np.linspace(np.log(1e-16), np.log(10), num=100)) # log scale
list_ll = []
for num in list_L:
copy_hmm.transitions.L = num
list_ll.append(copy_hmm.log_probability(y_swr))
### plot log-likelihood for different values of L
fig=plt.figure(figsize=(12, 4))
plt.plot(list_L, list_ll)
plt.xlabel("length scale, $L$", fontsize=24)
plt.ylabel("log-likelihood", fontsize=24)
plt.ylim((np.max(list_ll)-100, np.max(list_ll)+30))
plt.title("L$_{MLE}$ = %.2f" % list_L[np.argmax(list_ll)], fontsize=32)
for spine in plt.gca().spines.values():
spine.set_visible(False)
|
notebooks/6 Distance-dependent HMM.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="Ka9-BtIZhpgT"
# # Ungraded Lab: Denoising with a CNN Autoencoder
#
# In the final lab for this week, you will introduce noise to the Fashion MNIST dataset and train an autoencoder to reconstruct the original input images.
# + [markdown] id="k-WXDV3Tk6N6"
# ## Imports
# + id="3EXwoz-KHtWO"
try:
# # %tensorflow_version only exists in Colab.
# %tensorflow_version 2.x
except Exception:
pass
import tensorflow as tf
import tensorflow_datasets as tfds
import numpy as np
import matplotlib.pyplot as plt
# + [markdown] id="RX4GbMpwk8Y5"
# ## Prepare the Dataset
# + [markdown] id="OLuUvE7ieuAl"
# You will prepare the train and test sets a little differently this time. Instead of just normalizing the images, you will also introduce random noise and the generated images will be used as input to your model. The target or label will still be the clean images.
# + id="t9F7YsCNIKSA"
def map_image_with_noise(image, label):
'''Normalizes the images and generates noisy inputs.'''
image = tf.cast(image, dtype=tf.float32)
image = image / 255.0
noise_factor = 0.5
factor = noise_factor * tf.random.normal(shape=image.shape)
image_noisy = image + factor
image_noisy = tf.clip_by_value(image_noisy, 0.0, 1.0)
return image_noisy, image
# + id="Ub3k-XfMeTol" colab={"base_uri": "https://localhost:8080/", "height": 345, "referenced_widgets": ["e522ddf9e1364f5a8bb2c4658f6aefff", "03abe1eee8a9495a99587b9e0b7552d9", "c8eda6f04a5d46d78b9fe6657056afb0", "9f71e1d3193d443aab7e3e2255202bb3", "<KEY>", "8f20bb2698e24e6bbdf68187760e23d5", "<KEY>", "<KEY>", "<KEY>", "621b9fe30e8e439480810d224813e623", "<KEY>", "669ca96b4f1f4366b1e4a4d9db896004", "a733e9b6075d4849840e8748c96a0bce", "<KEY>", "35b4309fe0a54c6097159eeae6f5be7b", "f6fd6a4052ae4711ab8216494f008f0f", "6f95927940b647c98c2be7264a7fec44", "37a8fc58c5304cae9cc9474e672a8a59", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "6603980b383e47d68f65da3b557ffd79", "<KEY>", "76eff27b7ec7447595afcfb4ead92684", "d8a5698f4d1749ac9a83897f2fe43e2b", "<KEY>", "4a0bd98335434ff191e787f830cac164", "<KEY>", "<KEY>", "fe7fdd1d0fd2481ea0aa9b7a2bc2038a", "<KEY>", "<KEY>", "9980b5c71b134ee5954d71a165dd8641", "7d62568e57be4a92b9154a14be62be93", "<KEY>", "e173635054254015bd144e5f95336c7e", "<KEY>", "64c804375ee94662880f5d9ee208b625", "06012e715b1b4e21a2772cd8548afaf6", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "4aee3816fbe74ca1b0baa291ffa24b6a", "a3a4477bb74849419044ae965d2ef67f", "<KEY>", "<KEY>", "b964d7be5abd47deaca030ba6a2cafbb", "f7cc56a23c6941b4b76eaa6d061d968c", "<KEY>", "adcf517cdad94931b29028bb162c52fd", "3162edd768c249b4ac0f82603f4d8cf8", "172a2e5b8d494a5499189dedd0d1d3fb", "479823cd18344495a7671374223a271f"]} outputId="44add6d2-9953-48c4-9e38-3153a13773c2"
BATCH_SIZE = 128
SHUFFLE_BUFFER_SIZE = 1024
train_dataset = tfds.load('fashion_mnist', as_supervised=True, split="train")
train_dataset = train_dataset.map(map_image_with_noise)
train_dataset = train_dataset.shuffle(SHUFFLE_BUFFER_SIZE).batch(BATCH_SIZE).repeat()
test_dataset = tfds.load('fashion_mnist', as_supervised=True, split="test")
test_dataset = test_dataset.map(map_image_with_noise)
test_dataset = test_dataset.batch(BATCH_SIZE).repeat()
# + [markdown] id="0dJ4QWDMk_Wd"
# ## Build the Model
# + [markdown] id="Z8t_TQP3fOLZ"
# You will use the same model from the previous lab.
# + [markdown] id="e_8SD8jRfVG7"
# <img src="https://drive.google.com/uc?export=view&id=15zh7bst9KKvciRdCvMAH7kXt3nNkABzO" width="75%" height="75%"/>
# + id="wxh8h-UMk2iL"
def encoder(inputs):
'''Defines the encoder with two Conv2D and max pooling layers.'''
conv_1 = tf.keras.layers.Conv2D(filters=64, kernel_size=(3,3), activation='relu', padding='same')(inputs)
max_pool_1 = tf.keras.layers.MaxPooling2D(pool_size=(2,2))(conv_1)
conv_2 = tf.keras.layers.Conv2D(filters=128, kernel_size=(3,3), activation='relu', padding='same')(max_pool_1)
max_pool_2 = tf.keras.layers.MaxPooling2D(pool_size=(2,2))(conv_2)
return max_pool_2
# + id="wRWmLA3VliDr"
def bottle_neck(inputs):
'''Defines the bottleneck.'''
bottle_neck = tf.keras.layers.Conv2D(filters=256, kernel_size=(3,3), activation='relu', padding='same')(inputs)
encoder_visualization = tf.keras.layers.Conv2D(filters=1, kernel_size=(3,3), activation='sigmoid', padding='same')(bottle_neck)
return bottle_neck, encoder_visualization
# + id="XZgLt5uAmArk"
def decoder(inputs):
'''Defines the decoder path to upsample back to the original image size.'''
conv_1 = tf.keras.layers.Conv2D(filters=128, kernel_size=(3,3), activation='relu', padding='same')(inputs)
up_sample_1 = tf.keras.layers.UpSampling2D(size=(2,2))(conv_1)
conv_2 = tf.keras.layers.Conv2D(filters=64, kernel_size=(3,3), activation='relu', padding='same')(up_sample_1)
up_sample_2 = tf.keras.layers.UpSampling2D(size=(2,2))(conv_2)
conv_3 = tf.keras.layers.Conv2D(filters=1, kernel_size=(3,3), activation='sigmoid', padding='same')(up_sample_2)
return conv_3
# + id="fQKwO64iiOYl"
def convolutional_auto_encoder():
'''Builds the entire autoencoder model.'''
inputs = tf.keras.layers.Input(shape=(28, 28, 1,))
encoder_output = encoder(inputs)
bottleneck_output, encoder_visualization = bottle_neck(encoder_output)
decoder_output = decoder(bottleneck_output)
model = tf.keras.Model(inputs =inputs, outputs=decoder_output)
encoder_model = tf.keras.Model(inputs=inputs, outputs=encoder_visualization)
return model, encoder_model
# + id="1MmS7r0tkuIf" colab={"base_uri": "https://localhost:8080/"} outputId="cd778b5d-b4ef-4706-b4f8-c43b3bc2c586"
convolutional_model, convolutional_encoder_model = convolutional_auto_encoder()
convolutional_model.summary()
# + [markdown] id="rCh0ea08lERp"
# ## Compile and Train the Model
# + id="J0Umj_xaiHL_" colab={"base_uri": "https://localhost:8080/"} outputId="b18a064d-b0a9-43ce-e923-a57ec343a59a"
train_steps = 60000 // BATCH_SIZE
valid_steps = 60000 // BATCH_SIZE
convolutional_model.compile(optimizer=tf.keras.optimizers.Adam(), loss='binary_crossentropy')
conv_model_history = convolutional_model.fit(train_dataset, steps_per_epoch=train_steps, validation_data=test_dataset, validation_steps=valid_steps, epochs=40)
# + [markdown] id="npl9MHErlJa2"
# ## Display sample results
#
# Let's see if the model can generate the clean image from noisy inputs.
# + id="aqFR12I6fwBe"
def display_one_row(disp_images, offset, shape=(28, 28)):
'''Display sample outputs in one row.'''
for idx, noisy_image in enumerate(disp_images):
plt.subplot(3, 10, offset + idx + 1)
plt.xticks([])
plt.yticks([])
noisy_image = np.reshape(noisy_image, shape)
plt.imshow(noisy_image, cmap='gray')
def display_results(disp_input_images, disp_encoded, disp_predicted, enc_shape=(8,4)):
'''Displays the input, encoded, and decoded output values.'''
plt.figure(figsize=(15, 5))
display_one_row(disp_input_images, 0, shape=(28,28,))
display_one_row(disp_encoded, 10, shape=enc_shape)
display_one_row(disp_predicted, 20, shape=(28,28,))
# + id="qtQyQRxRN_hH" colab={"base_uri": "https://localhost:8080/", "height": 297} outputId="4cc30dd6-dd56-451f-b5bd-d38f23c1abc4"
# take 1 batch of the dataset
test_dataset = test_dataset.take(1)
# take the input images and put them in a list
output_samples = []
for input_image, image in tfds.as_numpy(test_dataset):
output_samples = input_image
# pick 10 indices
idxs = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
# prepare test samples as a batch of 10 images
conv_output_samples = np.array(output_samples[idxs])
conv_output_samples = np.reshape(conv_output_samples, (10, 28, 28, 1))
# get the encoder ouput
encoded = convolutional_encoder_model.predict(conv_output_samples)
# get a prediction for some values in the dataset
predicted = convolutional_model.predict(conv_output_samples)
# display the samples, encodings and decoded values!
display_results(conv_output_samples, encoded, predicted, enc_shape=(7,7))
|
Generative-deep learning-with-tensorflow/week-2/C4_W2_Lab_5_FashionMNIST_NoisyCNNAutoEncoder.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from PIL import Image
import os
def resize_image(input_image_path,output_image_path,size):
original_image = Image.open(input_image_path)
width, height = resized_image.size
if width<height:
size=(150, 180)
resized_image = original_image.resize(size)
resized_image.save(output_image_path)
for file in os.listdir('/Users/mila1/Downloads/test/'):
resize_image(input_image_path='/Users/mila1/Downloads/test/'+file,
output_image_path='/Users/mila1/Downloads/test/sm'+file,
size=(300, 180))
# -
|
.ipynb_checkpoints/Untitled9-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
# %pylab inline
from sigvisa import Sigvisa
from sigvisa.source.event import get_event
from sigvisa.infer.coarse_to_fine_init import ModelSpec, EventRunSpec, do_coarse_to_fine, initialize_from, do_inference
from sigvisa.infer.correlations.event_proposal import correlation_location_proposal #, generate_historical_db
from sigvisa.infer.correlations.ar_correlation_model import estimate_ar, ar_advantage, iid_advantage
from sigvisa.graph.sigvisa_graph import SigvisaGraph
from sigvisa.treegp.gp import GPCov
import os, sys, traceback
import cPickle as pickle
stas = ['ASAR', 'KURK', 'MKAR', 'SONM', 'BVAR', 'FITZ', 'CTA', 'CMAR', 'WRA', 'ZALV', 'MJAR', 'AKTO', 'INK']
doublet = 5334939
evids = [5334501, 5334991, 5334726, 5335144, 5349684, 5335822, 5348178, 5334971, 5349536, 5335079, 5335116, 5335138, 5350499, 5336237, 5335425, 5335424, 5349441, 5336640, 5335577, 5350077, 5336889, 5335760, 5336967, 5337111, 533\
6015, 5337461, 5351821, 5351657, 5336724, 5351713, 5338302, 5338318, 5338388]
# -
# +
doublet_ev = get_event(evid=doublet)
rs = EventRunSpec(evids=[doublet,], stas=["MKAR"], runids=(1,), disable_conflict_checking=False)
ms1 = ModelSpec(template_model_type="gp_lld", wiggle_family="db4_2.0_3_20.0", wiggle_model_type="gp_lld", max_hz=10.0, raw_signals=True)
#ms1 = ModelSpec(template_model_type="param", wiggle_family="iid", max_hz=10.0, raw_signals=True)
sg = rs.build_sg(ms1)
sg.event_end_time = doublet_ev.time + 200
sg.event_start_time = doublet_ev.time - 200
sg.correlation_proposal_stas=["MK31"]
# -
# +
#ev, evlp, (proposals, weights, posteriors, x) = correlation_location_proposal(sg)
# +
#print ev, evlp
#print weights
#print x
# +
from sigvisa.infer.run_mcmc import run_open_world_MH
run_open_world_MH(sg, steps=1000,
enable_event_openworld=True,
enable_event_moves=True,
enable_template_openworld=False,
enable_template_moves=True)
# -
# +
from sigvisa.infer.event_birthdeath import ev_birth_move_abstract
def correlation_prop_MKAR(sg, fix_result=None):
return correlation_location_proposal(sg, fix_result=fix_result)
ev_birth_move_abstract(sg, location_proposal=correlation_location_proposal, #correlation_prop_MKAR,
proposal_includes_mb=False, use_correlation=True, force_outcome=None)
# +
print sg.current_log_p()
wn = sg.station_waves["MK31"][0]
for arr in wn.arrivals():
a, tg = wn.get_template_params_for_arrival(*arr)
print arr
print a
# +
def plot_debug_dists():
dd = sg.debug_dists["wave_MK31_BHZ_freq_0.8_4.5_1240246002.8"]
for k in dd.keys():
figure()
plot(dd[k])
title("%s %d" % (k, np.argmax(dd[k])))
plot_debug_dists()
# -
print sg.current_log_p()
print sg.current_log_p_breakdown()
# +
si=200
ei=900
s1 = wn.tssm.obs_var(1700)
def plot_predicted():
f = figure(figsize=(20, 5))
wn = sg.station_waves["MK31"][0]
z1 = wn.tssm.prior_sample(1700, 0)
#plot(z1[200:600])
s1 = wn.tssm.obs_var(1700)
m1 = wn.tssm.mean_obs(1700)
#plot(m1[si:ei], lw=2)
#plot(m1[si:ei]+np.sqrt(s1)[si:ei], lw=4)
print wn.log_p()
#u1 = wn.unexplained_kalman()
figure(figsize=(20, 5))
plot(wn.get_value()[si:ei])
#plot(u1[si:ei])
plot(m1[si:ei], lw=2)
plt.fill_between(np.arange(ei-si), m1[si:ei]+2*np.sqrt(s1[si:ei]), m1[si:ei]-2*np.sqrt(s1[si:ei]), facecolor="green", alpha=0.2)
#figure(figsize=(20, 5))
#subtracted1 = wn.get_value() - m1
#plot(subtracted1[si:ei])
#print wn.nm.log_p(subtracted1)
plot_predicted()
# -
"""
from sigvisa.infer.run_mcmc import run_open_world_MH
run_open_world_MH(sg, steps=1000,
enable_event_openworld=False,
enable_event_moves=True,
enable_template_openworld=False,
enable_template_moves=True)
"""
print sg.all_nodes["1;P;MK31;:;:;amp_transfer"].get_value()
#sg.all_nodes["1;P;MK31;:;:;amp_transfer"].set_value(3.06387673875)
print sg.current_log_p()
plot_predicted()
#3.70620502114 [-2564.34738735]
from sigvisa.infer.event_birthdeath import ev_death_move_abstract
ev_death_move_abstract(sg, location_proposal=correlation_prop_MKAR,
proposal_includes_mb=False, use_correlation=True)
# +
print sg.current_log_p()
wn = sg.station_waves["MK31"][0]
for arr in wn.arrivals():
a, tg = wn.get_template_params_for_arrival(*arr)
print arr
print a
# +
s2 = wn.tssm.obs_var(1700)
plot_predicted()
plot_debug_dists()
# -
print sg.current_log_p()
print sg.current_log_p_breakdown()
# +
plot(np.sqrt(s1)[200:600])
plot(np.sqrt(s2)[200:600])
# -
|
notebooks/bayesian_correlation_evproposal_3_full_mh.ipynb
|
# ---
# jupyter:
# jupytext:
# split_at_heading: true
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#hide
#skip
! [ -e /content ] && pip install -Uqq fastai # upgrade fastai on colab
# +
# default_exp learner
# -
#export
from fastai.data.all import *
from fastai.optimizer import *
from fastai.callback.core import *
import pickle
#hide
from nbdev.showdoc import *
#export
_all_ = ['CancelStepException','CancelFitException','CancelEpochException','CancelTrainException','CancelValidException','CancelBatchException']
# # Learner, Metrics, and Basic Callbacks
#
# > Basic class for handling the training loop
# You probably want to jump directly to the definition of `Learner`.
# ## Utils function
#hide
#For tests
from torch.utils.data import TensorDataset
# +
#hide
def synth_dbunch(a=2, b=3, bs=16, n_train=10, n_valid=2, cuda=False):
"A simple dataset where `x` is random and `y = a*x + b` plus some noise."
def get_data(n):
x = torch.randn(int(bs*n))
return TensorDataset(x, a*x + b + 0.1*torch.randn(int(bs*n)))
train_ds = get_data(n_train)
valid_ds = get_data(n_valid)
device = default_device() if cuda else None
train_dl = TfmdDL(train_ds, bs=bs, shuffle=True, num_workers=0)
valid_dl = TfmdDL(valid_ds, bs=bs, num_workers=0)
return DataLoaders(train_dl, valid_dl, device=device)
class RegModel(Module):
"A r"
def __init__(self): self.a,self.b = nn.Parameter(torch.randn(1)),nn.Parameter(torch.randn(1))
def forward(self, x): return x*self.a + self.b
# -
# export
defaults.lr = 1e-3
# export
def replacing_yield(o, attr, val):
"Context manager to temporarily replace an attribute"
old = getattr(o,attr)
try: yield setattr(o,attr,val)
finally: setattr(o,attr,old)
# +
class _A:
def __init__(self, a): self.a = a
@contextmanager
def a_changed(self, v): return replacing_yield(self, 'a', v)
a = _A(42)
with a.a_changed(32):
test_eq(a.a, 32)
test_eq(a.a, 42)
# -
#export
def mk_metric(m):
"Convert `m` to an `AvgMetric`, unless it's already a `Metric`"
if isinstance(m,type): m = m()
return m if isinstance(m, Metric) else AvgMetric(m)
# See the class `Metric` below for more information.
#export
def save_model(file, model, opt, with_opt=True, pickle_protocol=2):
"Save `model` to `file` along with `opt` (if available, and if `with_opt`)"
if rank_distrib(): return # don't save if child proc
if opt is None: with_opt=False
state = get_model(model).state_dict()
if with_opt: state = {'model': state, 'opt':opt.state_dict()}
torch.save(state, file, pickle_protocol=pickle_protocol)
# `file` can be a `Path` object, a string or an opened file object. `pickle_protocol` is passed along to `torch.save`
# export
def load_model(file, model, opt, with_opt=True, device=None, strict=True):
"Load `model` from `file` along with `opt` (if available, and if `with_opt`)"
distrib_barrier()
if isinstance(device, int): device = torch.device('cuda', device)
elif device is None: device = 'cpu'
state = torch.load(file, map_location=device)
hasopt = set(state)=={'model', 'opt'}
model_state = state['model'] if hasopt else state
get_model(model).load_state_dict(model_state, strict=strict)
if hasopt and with_opt:
try: opt.load_state_dict(state['opt'])
except:
if with_opt: warn("Could not load the optimizer state.")
elif with_opt: warn("Saved filed doesn't contain an optimizer state.")
# `file` can be a `Path` object, a string or an opened file object. If a `device` is passed, the model is loaded on it, otherwise it's loaded on the CPU.
#
# If `strict` is `True`, the file must exactly contain weights for every parameter key in `model`, if `strict` is `False`, only the keys that are in the saved model are loaded in `model`.
# export
def _try_concat(o):
try: return torch.cat(o)
except: return sum([L(o_[i,:] for i in range_of(o_)) for o_ in o], L())
#export
_before_epoch = [event.before_fit, event.before_epoch]
_after_epoch = [event.after_epoch, event.after_fit]
#export
class _ConstantFunc():
"Returns a function that returns `o`"
def __init__(self, o): self.o = o
def __call__(self, *args, **kwargs): return self.o
# ## Learner -
#export
_loop = ['Start Fit', 'before_fit', 'Start Epoch Loop', 'before_epoch', 'Start Train', 'before_train',
'Start Batch Loop', 'before_batch', 'after_pred', 'after_loss', 'before_backward', 'before_step',
'after_step', 'after_cancel_batch', 'after_batch','End Batch Loop','End Train',
'after_cancel_train', 'after_train', 'Start Valid', 'before_validate','Start Batch Loop',
'**CBs same as train batch**', 'End Batch Loop', 'End Valid', 'after_cancel_validate',
'after_validate', 'End Epoch Loop', 'after_cancel_epoch', 'after_epoch', 'End Fit',
'after_cancel_fit', 'after_fit']
# +
# export
class Learner(GetAttr):
_default='model'
def __init__(self, dls, model, loss_func=None, opt_func=Adam, lr=defaults.lr, splitter=trainable_params, cbs=None,
metrics=None, path=None, model_dir='models', wd=None, wd_bn_bias=False, train_bn=True,
moms=(0.95,0.85,0.95)):
path = Path(path) if path is not None else getattr(dls, 'path', Path('.'))
if loss_func is None:
loss_func = getattr(dls.train_ds, 'loss_func', None)
assert loss_func is not None, "Could not infer loss function from the data, please pass a loss function."
self.dls,self.model = dls,model
store_attr(but='dls,model,cbs')
self.training,self.create_mbar,self.logger,self.opt,self.cbs = False,True,print,None,L()
self.add_cbs(L(defaults.callbacks)+L(cbs))
self("after_create")
@property
def metrics(self): return self._metrics
@metrics.setter
def metrics(self,v): self._metrics = L(v).map(mk_metric)
def _grab_cbs(self, cb_cls): return L(cb for cb in self.cbs if isinstance(cb, cb_cls))
def add_cbs(self, cbs):
L(cbs).map(self.add_cb)
return self
def remove_cbs(self, cbs):
L(cbs).map(self.remove_cb)
return self
def add_cb(self, cb):
if isinstance(cb, type): cb = cb()
cb.learn = self
setattr(self, cb.name, cb)
self.cbs.append(cb)
return self
def remove_cb(self, cb):
if isinstance(cb, type): self.remove_cbs(self._grab_cbs(cb))
else:
cb.learn = None
if hasattr(self, cb.name): delattr(self, cb.name)
if cb in self.cbs: self.cbs.remove(cb)
return self
@contextmanager
def added_cbs(self, cbs):
self.add_cbs(cbs)
try: yield
finally: self.remove_cbs(cbs)
@contextmanager
def removed_cbs(self, cbs):
self.remove_cbs(cbs)
try: yield self
finally: self.add_cbs(cbs)
def ordered_cbs(self, event): return [cb for cb in self.cbs.sorted('order') if hasattr(cb, event)]
def __call__(self, event_name): L(event_name).map(self._call_one)
def _call_one(self, event_name):
if not hasattr(event, event_name): raise Exception(f'missing {event_name}')
for cb in self.cbs.sorted('order'): cb(event_name)
def _bn_bias_state(self, with_bias): return norm_bias_params(self.model, with_bias).map(self.opt.state)
def create_opt(self):
self.opt = self.opt_func(self.splitter(self.model), lr=self.lr)
if not self.wd_bn_bias:
for p in self._bn_bias_state(True ): p['do_wd'] = False
if self.train_bn:
for p in self._bn_bias_state(False): p['force_train'] = True
def _split(self, b):
i = getattr(self.dls, 'n_inp', 1 if len(b)==1 else len(b)-1)
self.xb,self.yb = b[:i],b[i:]
def _with_events(self, f, event_type, ex, final=noop):
try: self(f'before_{event_type}'); f()
except ex: self(f'after_cancel_{event_type}')
self(f'after_{event_type}'); final()
def all_batches(self):
self.n_iter = len(self.dl)
for o in enumerate(self.dl): self.one_batch(*o)
def _do_one_batch(self):
self.pred = self.model(*self.xb)
self('after_pred')
if len(self.yb):
self.loss_grad = self.loss_func(self.pred, *self.yb)
self.loss = self.loss_grad.clone()
self('after_loss')
if not self.training or not len(self.yb): return
self('before_backward')
self.loss_grad.backward()
self._with_events(self.opt.step, 'step', CancelStepException)
self.opt.zero_grad()
def one_batch(self, i, b):
self.iter = i
b_on_device = to_device(b, device=self.dls.device) if self.dls.device is not None else b
self._split(b_on_device)
self._with_events(self._do_one_batch, 'batch', CancelBatchException)
def _do_epoch_train(self):
self.dl = self.dls.train
self._with_events(self.all_batches, 'train', CancelTrainException)
def _do_epoch_validate(self, ds_idx=1, dl=None):
if dl is None: dl = self.dls[ds_idx]
self.dl = dl
with torch.no_grad(): self._with_events(self.all_batches, 'validate', CancelValidException)
def _do_epoch(self):
self._do_epoch_train()
self._do_epoch_validate()
def _do_fit(self):
for epoch in range(self.n_epoch):
self.epoch=epoch
self._with_events(self._do_epoch, 'epoch', CancelEpochException)
def fit(self, n_epoch, lr=None, wd=None, cbs=None, reset_opt=False):
with self.added_cbs(cbs):
if reset_opt or not self.opt: self.create_opt()
if wd is None: wd = self.wd
if wd is not None: self.opt.set_hypers(wd=wd)
self.opt.set_hypers(lr=self.lr if lr is None else lr)
self.n_epoch = n_epoch
self._with_events(self._do_fit, 'fit', CancelFitException, self._end_cleanup)
def _end_cleanup(self): self.dl,self.xb,self.yb,self.pred,self.loss = None,(None,),(None,),None,None
def __enter__(self): self(_before_epoch); return self
def __exit__(self, exc_type, exc_value, tb): self(_after_epoch)
def validation_context(self, cbs=None, inner=False):
cms = [self.no_logging(),self.no_mbar()]
if cbs: cms.append(self.added_cbs(cbs))
if not inner: cms.append(self)
return ContextManagers(cms)
def validate(self, ds_idx=1, dl=None, cbs=None):
if dl is None: dl = self.dls[ds_idx]
with self.validation_context(cbs=cbs): self._do_epoch_validate(ds_idx, dl)
return getattr(self, 'final_record', None)
@delegates(GatherPredsCallback.__init__)
def get_preds(self, ds_idx=1, dl=None, with_input=False, with_decoded=False, with_loss=False, act=None,
inner=False, reorder=True, cbs=None, **kwargs):
if dl is None: dl = self.dls[ds_idx].new(shuffled=False, drop_last=False)
else:
try: len(dl)
except TypeError as e:
raise TypeError("`dl` is something other than a single `DataLoader` object")
if reorder and hasattr(dl, 'get_idxs'):
idxs = dl.get_idxs()
dl = dl.new(get_idxs = _ConstantFunc(idxs))
cb = GatherPredsCallback(with_input=with_input, with_loss=with_loss, **kwargs)
ctx_mgrs = self.validation_context(cbs=L(cbs)+[cb], inner=inner)
if with_loss: ctx_mgrs.append(self.loss_not_reduced())
with ContextManagers(ctx_mgrs):
self._do_epoch_validate(dl=dl)
if act is None: act = getattr(self.loss_func, 'activation', noop)
res = cb.all_tensors()
pred_i = 1 if with_input else 0
if res[pred_i] is not None:
res[pred_i] = act(res[pred_i])
if with_decoded: res.insert(pred_i+2, getattr(self.loss_func, 'decodes', noop)(res[pred_i]))
if reorder and hasattr(dl, 'get_idxs'): res = nested_reorder(res, tensor(idxs).argsort())
return tuple(res)
self._end_cleanup()
def predict(self, item, rm_type_tfms=None, with_input=False):
dl = self.dls.test_dl([item], rm_type_tfms=rm_type_tfms, num_workers=0)
inp,preds,_,dec_preds = self.get_preds(dl=dl, with_input=True, with_decoded=True)
i = getattr(self.dls, 'n_inp', -1)
inp = (inp,) if i==1 else tuplify(inp)
dec = self.dls.decode_batch(inp + tuplify(dec_preds))[0]
dec_inp,dec_targ = map(detuplify, [dec[:i],dec[i:]])
res = dec_targ,dec_preds[0],preds[0]
if with_input: res = (dec_inp,) + res
return res
def show_results(self, ds_idx=1, dl=None, max_n=9, shuffle=True, **kwargs):
if dl is None: dl = self.dls[ds_idx].new(shuffle=shuffle)
b = dl.one_batch()
_,_,preds = self.get_preds(dl=[b], with_decoded=True)
self.dls.show_results(b, preds, max_n=max_n, **kwargs)
def show_training_loop(self):
indent = 0
for s in _loop:
if s.startswith('Start'): print(f'{" "*indent}{s}'); indent += 2
elif s.startswith('End'): indent -= 2; print(f'{" "*indent}{s}')
else: print(f'{" "*indent} - {s:15}:', self.ordered_cbs(s))
@contextmanager
def no_logging(self): return replacing_yield(self, 'logger', noop)
@contextmanager
def no_mbar(self): return replacing_yield(self, 'create_mbar', False)
@contextmanager
def loss_not_reduced(self):
if hasattr(self.loss_func, 'reduction'): return replacing_yield(self.loss_func, 'reduction', 'none')
else: return replacing_yield(self, 'loss_func', partial(self.loss_func, reduction='none'))
def to_detach(self,b,cpu=True,gather=True):
return self.dl.to_detach(b,cpu,gather) if hasattr(getattr(self,'dl',None),'to_detach') else to_detach(b,cpu,gather)
Learner.x,Learner.y = add_props(lambda i,x: detuplify((x.xb,x.yb)[i]))
# -
#export
add_docs(Learner, "Group together a `model`, some `dls` and a `loss_func` to handle training",
add_cbs="Add `cbs` to the list of `Callback` and register `self` as their learner",
add_cb="Add `cb` to the list of `Callback` and register `self` as their learner",
remove_cbs="Remove `cbs` from the list of `Callback` and deregister `self` as their learner",
remove_cb="Add `cb` from the list of `Callback` and deregister `self` as their learner",
added_cbs="Context manage that temporarily adds `cbs`",
removed_cbs="Context manage that temporarily removes `cbs`",
ordered_cbs="List of `Callback`s, in order, for an `event` in the training loop",
create_opt="Create an optimizer with default hyper-parameters",
one_batch="Train or evaluate `self.model` on batch `(xb,yb)`",
all_batches="Train or evaluate `self.model` on all the batches of `self.dl`",
fit="Fit `self.model` for `n_epoch` using `cbs`. Optionally `reset_opt`.",
validate="Validate on `dl` with potential new `cbs`.",
get_preds="Get the predictions and targets on the `ds_idx`-th dbunchset or `dl`, optionally `with_input` and `with_loss`",
predict="Prediction on `item`, fully decoded, loss function decoded and probabilities",
validation_context="A `ContextManagers` suitable for validation, with optional `cbs`",
show_results="Show some predictions on `ds_idx`-th dataset or `dl`",
show_training_loop="Show each step in the training loop",
no_logging="Context manager to temporarily remove `logger`",
no_mbar="Context manager to temporarily prevent the master progress bar from being created",
loss_not_reduced="A context manager to evaluate `loss_func` with reduction set to none.",
to_detach="Calls `to_detach` if `self.dl` provides a `.to_detach` function otherwise calls global `to_detach`",
__call__="Call `event_name` for all `Callback`s in `self.cbs`"
)
show_doc(Learner)
# `opt_func` will be used to create an optimizer when `Learner.fit` is called, with `lr` as a default learning rate. `splitter` is a function that takes `self.model` and returns a list of parameter groups (or just one parameter group if there are no different parameter groups). The default is `trainable_params`, which returns all trainable parameters of the model.
#
# `cbs` is one or a list of `Callback`s to pass to the `Learner`. `Callback`s are used for every tweak of the training loop. Each `Callback` is registered as an attribute of `Learner` (with camel case). At creation, all the callbacks in `defaults.callbacks` (`TrainEvalCallback`, `Recorder` and `ProgressCallback`) are associated to the `Learner`.
#
# `metrics` is an optional list of metrics, that can be either functions or `Metric`s (see below).
#
# `path` and `model_dir` are used to save and/or load models. Often `path` will be inferred from `dls`, but you can override it or pass a `Path` object to `model_dir`. Make sure you can write in `path/model_dir`!
#
# `wd` is the default weight decay used when training the model; `moms`, the default momentums used in `Learner.fit_one_cycle`. `wd_bn_bias` controls if weight decay is applied to `BatchNorm` layers and bias.
#
# Lastly, `train_bn` controls if `BatchNorm` layers are trained even when they are supposed to be frozen according to the `splitter`. Our empirical experiments have shown that it's the best behavior for those layers in transfer learning.
# ### PyTorch interop
# You can use regular PyTorch functionality for most of the arguments of the `Learner`, although the experience will be smoother with pure fastai objects and you will be able to use the full functionality of the library. The expectation is that the training loop will work smoothly even if you did not use fastai end to end. What you might lose are interpretation objects or showing functionality. The list below explains how to use plain PyTorch objects for all the arguments and what you might lose.
#
# The most important is `opt_func`. If you are not using a fastai optimizer, you will need to write a function that wraps your PyTorch optimizer in an `OptimWrapper`. See the [optimizer module](http://docs.fast.ai/optimizer) for more details. This is to ensure the library's schedulers/freeze API work with your code.
#
# - `dls` is a `DataLoaders` object, that you can create from standard PyTorch dataloaders. By doing so, you will lose all showing functionality like `show_batch`/`show_results`. You can check the [data block API](http://docs.fast.ai/tutorial.datablock) or the [mid-level data API tutorial](http://docs.fast.ai/tutorial.pets) to learn how to use fastai to gather your data!
# - `model` is a standard PyTorch model. You can use anyone you like, just make sure it accepts the number of inputs you have in your `DataLoaders` and returns as many outputs as you have targets.
# - `loss_func` can be any loss function you like. It needs to be one of fastai's if you want to use `Learn.predict` or `Learn.get_preds`, or you will have to implement special methods (see more details after the `BaseLoss` documentation).
# ### Training loop
# Now let's look at the main thing the `Learner` class implements: the training loop.
#export
if not hasattr(defaults, 'callbacks'): defaults.callbacks = [TrainEvalCallback]
show_doc(Learner.fit)
# Uses `lr` and `wd` if they are provided, otherwise use the defaults values given by the `lr` and `wd` attributes of `Learner`.
# All the examples use `synth_learner` which is a simple `Learner` training a linear regression model.
#hide
def synth_learner(n_train=10, n_valid=2, cuda=False, lr=defaults.lr, **kwargs):
data = synth_dbunch(n_train=n_train,n_valid=n_valid, cuda=cuda)
return Learner(data, RegModel(), loss_func=MSELossFlat(), lr=lr, **kwargs)
#Training a few epochs should make the model better
learn = synth_learner(lr=0.1)
learn(_before_epoch)
learn.model = learn.model.cpu()
xb,yb = learn.dls.one_batch()
init_loss = learn.loss_func(learn.model(xb), yb)
learn.fit(10)
xb,yb = learn.dls.one_batch()
final_loss = learn.loss_func(learn.model(xb), yb)
assert final_loss < init_loss, (final_loss,init_loss)
# +
#hide
class TestTrainEvalCallback(Callback):
run_after,run_valid = TrainEvalCallback,False
def before_fit(self):
test_eq([self.pct_train,self.train_iter], [0., 0])
self.old_pct_train,self.old_train_iter = self.pct_train,self.train_iter
def before_batch(self): test_eq(next(self.parameters()).device, find_device(self.xb))
def after_batch(self):
assert self.training
test_eq(self.pct_train , self.old_pct_train+1/(self.n_iter*self.n_epoch))
test_eq(self.train_iter, self.old_train_iter+1)
self.old_pct_train,self.old_train_iter = self.pct_train,self.train_iter
def before_train(self):
assert self.training and self.model.training
test_eq(self.pct_train, self.epoch/self.n_epoch)
self.old_pct_train = self.pct_train
def before_validate(self):
assert not self.training and not self.model.training
learn = synth_learner(cbs=TestTrainEvalCallback)
learn.fit(1)
#Check order is properly taken into account
learn.cbs = L(reversed(learn.cbs))
# -
#hide
#cuda
#Check model is put on the GPU if needed
learn = synth_learner(cbs=TestTrainEvalCallback, cuda=True)
learn.fit(1)
# +
#hide
#Check wd is not applied on bn/bias when option wd_bn_bias=False
class _TstModel(nn.Module):
def __init__(self):
super().__init__()
self.a,self.b = nn.Parameter(torch.randn(1)),nn.Parameter(torch.randn(1))
self.tst = nn.Sequential(nn.Linear(4,5), nn.BatchNorm1d(3))
self.tst[0].bias.data,self.tst[1].bias.data = torch.randn(5),torch.randn(3)
def forward(self, x): return x * self.a + self.b
class _PutGrad(Callback):
def before_step(self):
for p in self.learn.tst.parameters():
p.grad = torch.ones_like(p.data)
learn = synth_learner(n_train=5, opt_func = partial(SGD, wd=1, decouple_wd=True), cbs=_PutGrad)
learn.model = _TstModel()
init = [p.clone() for p in learn.tst.parameters()]
learn.fit(1, lr=1e-2)
end = list(learn.tst.parameters())
assert not torch.allclose(end[0]-init[0], -0.05 * torch.ones_like(end[0]))
for i in [1,2,3]: test_close(end[i]-init[i], -0.05 * torch.ones_like(end[i]))
# -
show_doc(Learner.one_batch)
# This is an internal method called by `Learner.fit`. If passed, `i` is the index of this iteration in the epoch. In training mode, this does a full training step on the batch (compute predictions, loss, gradients, update the model parameters and zero the gradients). In validation mode, it stops at the loss computation. Training or validation is controlled internally by the `TrainEvalCallback` through the `training` attribute.
#
# Nothing is returned, but the attributes `x`, `y`, `pred`, `loss` of the `Learner` are set with the proper values:
b = learn.dls.one_batch()
learn.one_batch(0, b)
test_eq(learn.x, b[0])
test_eq(learn.y, b[1])
out = learn.model(learn.x)
test_eq(learn.pred, out)
test_eq(learn.loss, learn.loss_func(out, b[1]))
#hide
class VerboseCallback(Callback):
"Callback that prints the name of each event called"
def __call__(self, event_name):
print(event_name)
super().__call__(event_name)
#hide
class TestOneBatch(VerboseCallback):
def __init__(self, xb, yb, i):
self.save_xb,self.save_yb,self.i = xb,yb,i
self.old_pred,self.old_loss = None,tensor(0.)
def before_batch(self):
self.old_a,self.old_b = self.a.data.clone(),self.b.data.clone()
test_eq(self.iter, self.i)
test_eq(self.save_xb, *self.xb)
test_eq(self.save_yb, *self.yb)
if hasattr(self.learn, 'pred'): test_eq(self.pred, self.old_pred)
def after_pred(self):
self.old_pred = self.pred
test_eq(self.pred, self.a.data * self.x + self.b.data)
test_eq(self.loss, self.old_loss)
def after_loss(self):
self.old_loss = self.loss
test_eq(self.loss, self.loss_func(self.old_pred, self.save_yb))
for p in self.parameters():
if not hasattr(p, 'grad') or p.grad is not None: test_eq(p.grad, tensor([0.]))
def before_step(self):
self.grad_a = (2 * self.x * (self.pred.data - self.y)).mean()
self.grad_b = 2 * (self.pred.data - self.y).mean()
test_close(self.a.grad.data, self.grad_a)
test_close(self.b.grad.data, self.grad_b)
test_eq(self.a.data, self.old_a)
test_eq(self.b.data, self.old_b)
def after_step(self):
test_close(self.a.data, self.old_a - self.lr * self.grad_a)
test_close(self.b.data, self.old_b - self.lr * self.grad_b)
self.old_a,self.old_b = self.a.data.clone(),self.b.data.clone()
test_close(self.a.grad.data, self.grad_a)
test_close(self.b.grad.data, self.grad_b)
def after_batch(self):
for p in self.parameters(): test_eq(p.grad, tensor([0.]))
#hide
learn = synth_learner()
b = learn.dls.one_batch()
learn = synth_learner(cbs=TestOneBatch(*b, 42), lr=1e-2)
#Remove train/eval
learn.cbs = learn.cbs[1:]
#Setup
learn.loss,learn.training = tensor(0.),True
learn.opt = SGD(learn.parameters(), lr=learn.lr)
learn.model.train()
batch_events = ['before_batch', 'after_pred', 'after_loss', 'before_backward', 'before_step', 'after_step', 'after_batch']
test_stdout(lambda: learn.one_batch(42, b), '\n'.join(batch_events))
test_stdout(lambda: learn.one_batch(42, b), '\n'.join(batch_events)) #Check it works for a second batch
show_doc(Learner.all_batches)
# +
#hide
learn = synth_learner(n_train=5, cbs=VerboseCallback())
learn.opt = SGD(learn.parameters(), lr=learn.lr)
with redirect_stdout(io.StringIO()):
learn(_before_epoch)
learn.epoch,learn.dl = 0,learn.dls.train
learn('before_train')
test_stdout(learn.all_batches, '\n'.join(batch_events * 5))
test_eq(learn.train_iter, 5)
valid_events = ['before_batch', 'after_pred', 'after_loss', 'after_batch']
with redirect_stdout(io.StringIO()):
learn.dl = learn.dls.valid
learn('before_validate')
test_stdout(learn.all_batches, '\n'.join(valid_events * 2))
test_eq(learn.train_iter, 5)
# -
#hide
learn = synth_learner(n_train=5, cbs=VerboseCallback())
test_stdout(lambda: learn(_before_epoch), 'before_fit\nbefore_epoch')
test_eq(learn.loss, tensor(0.))
#hide
learn.opt = SGD(learn.parameters(), lr=learn.lr)
learn.epoch = 0
test_stdout(lambda: learn._do_epoch_train(), '\n'.join(['before_train'] + batch_events * 5 + ['after_train']))
#hide
test_stdout(learn._do_epoch_validate, '\n'.join(['before_validate'] + valid_events * 2+ ['after_validate']))
show_doc(Learner.create_opt)
# This method is called internally to create the optimizer, the hyper-parameters are then adjusted by what you pass to `Learner.fit` or your particular schedulers (see `callback.schedule`).
learn = synth_learner(n_train=5, cbs=VerboseCallback())
assert learn.opt is None
learn.create_opt()
assert learn.opt is not None
test_eq(learn.opt.hypers[0]['lr'], learn.lr)
# ### Callback handling
# We only describe the basic functionality linked to `Callback`s here. To learn more about `Callback`s an how to write them, check the [callback.core](http://docs.fast.ai/callback.core) module documentation.
#
# Let's first see how the `Callback`s become attributes of `Learner`:
# +
#Test init with callbacks
class TstCallback(Callback):
def batch_begin(self): self.learn.a = self.a + 1
tst_learn = synth_learner()
test_eq(len(tst_learn.cbs), 1)
assert isinstance(tst_learn.cbs[0], TrainEvalCallback)
assert hasattr(tst_learn, ('train_eval'))
tst_learn = synth_learner(cbs=TstCallback())
test_eq(len(tst_learn.cbs), 2)
assert isinstance(tst_learn.cbs[1], TstCallback)
assert hasattr(tst_learn, ('tst'))
# -
show_doc(Learner.__call__)
# This how the `Callback`s are called internally. For instance a `VerboseCallback` just prints the event names (can be useful for debugging):
learn = synth_learner(cbs=VerboseCallback())
learn('after_fit')
show_doc(Learner.add_cb)
learn = synth_learner()
learn.add_cb(TestTrainEvalCallback())
test_eq(len(learn.cbs), 2)
assert isinstance(learn.cbs[1], TestTrainEvalCallback)
test_eq(learn.train_eval.learn, learn)
show_doc(Learner.add_cbs)
learn.add_cbs([TestTrainEvalCallback(), TestTrainEvalCallback()])
test_eq(len(learn.cbs), 4)
show_doc(Learner.added_cbs)
learn = synth_learner()
test_eq(len(learn.cbs), 1)
with learn.added_cbs(TestTrainEvalCallback()):
test_eq(len(learn.cbs), 2)
show_doc(Learner.ordered_cbs)
# By order, we mean using the internal ordering of the `Callback`s (see `callback.core` for more information on how it works).
learn = synth_learner()
learn.add_cb(TestTrainEvalCallback())
learn.ordered_cbs('before_fit')
show_doc(Learner.remove_cb)
learn = synth_learner()
learn.add_cb(TestTrainEvalCallback())
cb = learn.cbs[1]
learn.remove_cb(learn.cbs[1])
test_eq(len(learn.cbs), 1)
assert cb.learn is None
assert not getattr(learn,'test_train_eval',None)
# `cb` can simply be the class of the `Callback` we want to remove (in which case all instances of that callback are removed).
learn = synth_learner()
learn.add_cbs([TestTrainEvalCallback(), TestTrainEvalCallback()])
learn.remove_cb(TestTrainEvalCallback)
test_eq(len(learn.cbs), 1)
assert not getattr(learn,'test_train_eval',None)
show_doc(Learner.remove_cbs)
# Elements of `cbs` can either be types of callbacks or actual callbacks of the `Learner`.
learn = synth_learner()
learn.add_cbs([TestTrainEvalCallback() for _ in range(3)])
cb = learn.cbs[1]
learn.remove_cbs(learn.cbs[1:])
test_eq(len(learn.cbs), 1)
show_doc(Learner.removed_cbs)
# Elements of `cbs` can either be types of callbacks or actual callbacks of the `Learner`.
learn = synth_learner()
learn.add_cb(TestTrainEvalCallback())
with learn.removed_cbs(learn.cbs[1]):
test_eq(len(learn.cbs), 1)
test_eq(len(learn.cbs), 2)
show_doc(Learner.show_training_loop)
# At each step, callbacks are shown in order, which can help debugging.
learn = synth_learner()
learn.show_training_loop()
#export
def _before_batch_cb(f, self):
xb,yb = f(self, self.xb, self.yb)
self.learn.xb,self.learn.yb = xb,yb
#export
def before_batch_cb(f):
"Shortcut for creating a Callback on the `before_batch` event, which takes and returns `xb,yb`"
return Callback(before_batch=partial(_before_batch_cb, f))
# In order to change the data passed to your model, you will generally want to hook into the `before_batch` event, like so:
class TstCallback(Callback):
def before_batch(self):
self.learn.xb = self.xb + 1000
self.learn.yb = self.yb - 1000
# Since that is so common, we provide the `before_batch_cb` decorator to make it easier.
@before_batch_cb
def cb(self, xb, yb): return xb+1000,yb-1000
# ### Serializing
#export
@patch
@delegates(save_model)
def save(self:Learner, file, **kwargs):
"Save model and optimizer state (if `with_opt`) to `self.path/self.model_dir/file`"
file = join_path_file(file, self.path/self.model_dir, ext='.pth')
save_model(file, self.model, getattr(self,'opt',None), **kwargs)
return file
# `file` can be a `Path`, a `string` or a buffer. `pickle_protocol` is passed along to `torch.save`.
#export
@patch
@delegates(load_model)
def load(self:Learner, file, device=None, **kwargs):
"Load model and optimizer state (if `with_opt`) from `self.path/self.model_dir/file` using `device`"
if device is None and hasattr(self.dls, 'device'): device = self.dls.device
if self.opt is None: self.create_opt()
file = join_path_file(file, self.path/self.model_dir, ext='.pth')
load_model(file, self.model, self.opt, device=device, **kwargs)
return self
# `file` can be a `Path`, a `string` or a buffer. Use `device` to load the model/optimizer state on a device different from the one it was saved.
with tempfile.TemporaryDirectory() as d:
learn = synth_learner(path=d)
learn.fit(1)
#Test save created a file
learn.save('tmp')
assert (Path(d)/'models/tmp.pth').exists()
#Test load did load the model
learn1 = synth_learner(path=d)
learn1 = learn1.load('tmp')
test_eq(learn.a, learn1.a)
test_eq(learn.b, learn1.b)
test_eq(learn.opt.state_dict(), learn1.opt.state_dict())
#hide
#Test load works when the model is saved without opt
with tempfile.TemporaryDirectory() as d:
learn = synth_learner(path=d)
learn.fit(1)
learn.save('tmp', with_opt=False)
learn1 = synth_learner(path=d)
learn1 = learn1.load('tmp', with_opt=False)
test_eq(learn.a, learn1.a)
test_eq(learn.b, learn1.b)
test_ne(learn.opt.state_dict(), learn1.opt.state_dict())
#export
@patch
def export(self:Learner, fname='export.pkl', pickle_module=pickle, pickle_protocol=2):
"Export the content of `self` without the items and the optimizer state for inference"
if rank_distrib(): return # don't export if child proc
self._end_cleanup()
old_dbunch = self.dls
self.dls = self.dls.new_empty()
state = self.opt.state_dict() if self.opt is not None else None
self.opt = None
with warnings.catch_warnings():
#To avoid the warning that come from PyTorch about model not being checked
warnings.simplefilter("ignore")
torch.save(self, self.path/fname, pickle_module=pickle_module, pickle_protocol=pickle_protocol)
self.create_opt()
if state is not None: self.opt.load_state_dict(state)
self.dls = old_dbunch
# The `Learner` is saved in `self.path/fname`, using `pickle_protocol`. Note that serialization in Python saves the names of functions, not the code itself. Therefore, any custom code you have for models, data transformation, loss function etc... should be put in a module that you will import in your training environment before exporting, and in your deployment environment before loading it.
#export
def load_learner(fname, cpu=True, pickle_module=pickle):
"Load a `Learner` object in `fname`, optionally putting it on the `cpu`"
distrib_barrier()
res = torch.load(fname, map_location='cpu' if cpu else None, pickle_module=pickle_module)
if hasattr(res, 'to_fp32'): res = res.to_fp32()
if cpu: res.dls.cpu()
return res
# > Warning: `load_learner` requires all your custom code be in the exact same place as when exporting your `Learner` (the main script, or the module you imported it from).
# ### DataLoader aware `to_detach` -
# fastai provides `to_detach` which by default detachs tensor gradients, and gathers (calling `maybe_gather`) tensors from all ranks if running in distributed data parallel (DDP) mode.
#
# When running in DDP mode all ranks need to have the same batch size, and `DistributedDL` takes care of padding batches as needed; however when gathering all tensors (e.g. for calculating metrics, inference, etc.) we need to discard the padded items. `DistributedDL` provides a method `to_detach` that removes padding appropriately.
#
# Calling `to_detach_from_dl` with `learn` as a learner will attempt to find a `to_detach` method in the learner's last used `DataLoader` `dl` and use that one if found, otherwise it will resort to the vanilla `to_detach`.
#export
def to_detach_from_dl(learn:(Learner,NoneType),b:object,cpu:bool=True,gather:bool=True):
return learn.dl.to_detach(b,cpu,gather) if hasattr(getattr(learn,'dl',None),'to_detach') else to_detach(b,cpu,gather)
#hide
learn = synth_learner()
test_eq(to_detach_from_dl(learn,Tensor([123])),Tensor([123]))
learn.dl = learn.dls[0]
test_eq(to_detach_from_dl(learn,Tensor([123])),Tensor([123]))
learn.dl.to_detach = lambda b,cpu,gather: b-100
test_eq(to_detach_from_dl(learn,Tensor([123.])),Tensor([23.]))
# ## Metrics -
#export
@docs
class Metric():
"Blueprint for defining a metric"
def reset(self): pass
def accumulate(self, learn): pass
@property
def value(self): raise NotImplementedError
@property
def name(self): return class2attr(self, 'Metric')
_docs = dict(
reset="Reset inner state to prepare for new computation",
name="Name of the `Metric`, camel-cased and with Metric removed",
accumulate="Use `learn` to update the state with new results",
value="The value of the metric")
show_doc(Metric, title_level=3)
# Metrics can be simple averages (like accuracy) but sometimes their computation is a little bit more complex and can't be averaged over batches (like precision or recall), which is why we need a special class for them. For simple functions that can be computed as averages over batches, we can use the class `AvgMetric`, otherwise you'll need to implement the following methods.
#
# > Note: If your <code>Metric</code> has state depending on tensors, don't forget to store it on the CPU to avoid any potential memory leaks.
show_doc(Metric.reset)
show_doc(Metric.accumulate)
show_doc(Metric.value, name='Metric.value')
show_doc(Metric.name, name='Metric.name')
#export
def _maybe_reduce(val):
if num_distrib()>1:
val = val.clone()
torch.distributed.all_reduce(val, op=torch.distributed.ReduceOp.SUM)
val /= num_distrib()
return val
#export
class AvgMetric(Metric):
"Average the values of `func` taking into account potential different batch sizes"
def __init__(self, func): self.func = func
def reset(self): self.total,self.count = 0.,0
def accumulate(self, learn):
bs = find_bs(learn.yb)
self.total += learn.to_detach(self.func(learn.pred, *learn.yb))*bs
self.count += bs
@property
def value(self): return self.total/self.count if self.count != 0 else None
@property
def name(self): return self.func.func.__name__ if hasattr(self.func, 'func') else self.func.__name__
show_doc(AvgMetric, title_level=3)
learn = synth_learner()
tst = AvgMetric(lambda x,y: (x-y).abs().mean())
t,u = torch.randn(100),torch.randn(100)
tst.reset()
for i in range(0,100,25):
learn.pred,learn.yb = t[i:i+25],(u[i:i+25],)
tst.accumulate(learn)
test_close(tst.value, (t-u).abs().mean())
#hide
#With varying batch size
tst.reset()
splits = [0, 30, 50, 60, 100]
for i in range(len(splits )-1):
learn.pred,learn.yb = t[splits[i]:splits[i+1]],(u[splits[i]:splits[i+1]],)
tst.accumulate(learn)
test_close(tst.value, (t-u).abs().mean())
#export
class AvgLoss(Metric):
"Average the losses taking into account potential different batch sizes"
def reset(self): self.total,self.count = 0.,0
def accumulate(self, learn):
bs = find_bs(learn.yb)
self.total += learn.to_detach(learn.loss.mean())*bs
self.count += bs
@property
def value(self): return self.total/self.count if self.count != 0 else None
@property
def name(self): return "loss"
show_doc(AvgLoss, title_level=3)
tst = AvgLoss()
t = torch.randn(100)
tst.reset()
for i in range(0,100,25):
learn.yb,learn.loss = t[i:i+25],t[i:i+25].mean()
tst.accumulate(learn)
test_close(tst.value, t.mean())
#hide
#With varying batch size
tst.reset()
splits = [0, 30, 50, 60, 100]
for i in range(len(splits )-1):
learn.yb,learn.loss = t[splits[i]:splits[i+1]],t[splits[i]:splits[i+1]].mean()
tst.accumulate(learn)
test_close(tst.value, t.mean())
#export
class AvgSmoothLoss(Metric):
"Smooth average of the losses (exponentially weighted with `beta`)"
def __init__(self, beta=0.98): self.beta = beta
def reset(self): self.count,self.val = 0,tensor(0.)
def accumulate(self, learn):
self.count += 1
self.val = torch.lerp(to_detach(learn.loss.mean(), gather=False), self.val, self.beta)
@property
def value(self): return self.val/(1-self.beta**self.count)
show_doc(AvgSmoothLoss, title_level=3)
tst = AvgSmoothLoss()
t = torch.randn(100)
tst.reset()
val = tensor(0.)
for i in range(4):
learn.loss = t[i*25:(i+1)*25].mean()
tst.accumulate(learn)
val = val*0.98 + t[i*25:(i+1)*25].mean()*(1-0.98)
test_close(val/(1-0.98**(i+1)), tst.value)
#export
class ValueMetric(Metric):
"Use to include a pre-calculated metric value (for instance calculated in a `Callback`) and returned by `func`"
def __init__(self, func, metric_name=None): store_attr('func, metric_name')
@property
def value(self): return self.func()
@property
def name(self): return self.metric_name if self.metric_name else self.func.__name__
show_doc(ValueMetric, title_level=3)
# +
def metric_value_fn(): return 5e-3
vm = ValueMetric(metric_value_fn, 'custom_value_metric')
test_eq(vm.value, 5e-3)
test_eq(vm.name, 'custom_value_metric')
vm = ValueMetric(metric_value_fn)
test_eq(vm.name, 'metric_value_fn')
# -
# ## Recorder --
#export
from fastprogress.fastprogress import format_time
#export
def _maybe_item(t):
t = t.value
try: return t.item()
except: return t
#export
class Recorder(Callback):
"Callback that registers statistics (lr, loss and metrics) during training"
_stateattrs=('lrs','iters','losses','values')
remove_on_fetch,order = True,50
def __init__(self, add_time=True, train_metrics=False, valid_metrics=True, beta=0.98):
store_attr('add_time,train_metrics,valid_metrics')
self.loss,self.smooth_loss = AvgLoss(),AvgSmoothLoss(beta=beta)
def before_fit(self):
"Prepare state for training"
self.lrs,self.iters,self.losses,self.values = [],[],[],[]
names = self.metrics.attrgot('name')
if self.train_metrics and self.valid_metrics:
names = L('loss') + names
names = names.map('train_{}') + names.map('valid_{}')
elif self.valid_metrics: names = L('train_loss', 'valid_loss') + names
else: names = L('train_loss') + names
if self.add_time: names.append('time')
self.metric_names = 'epoch'+names
self.smooth_loss.reset()
def after_batch(self):
"Update all metrics and records lr and smooth loss in training"
if len(self.yb) == 0: return
mets = self._train_mets if self.training else self._valid_mets
for met in mets: met.accumulate(self.learn)
if not self.training: return
self.lrs.append(self.opt.hypers[-1]['lr'])
self.losses.append(self.smooth_loss.value)
self.learn.smooth_loss = self.smooth_loss.value
def before_epoch(self):
"Set timer if `self.add_time=True`"
self.cancel_train,self.cancel_valid = False,False
if self.add_time: self.start_epoch = time.time()
self.log = L(getattr(self, 'epoch', 0))
def before_train (self): self._train_mets[1:].map(Self.reset())
def before_validate(self): self._valid_mets.map(Self.reset())
def after_train (self): self.log += self._train_mets.map(_maybe_item)
def after_validate(self): self.log += self._valid_mets.map(_maybe_item)
def after_cancel_train(self): self.cancel_train = True
def after_cancel_validate(self): self.cancel_valid = True
def after_epoch(self):
"Store and log the loss/metric values"
self.learn.final_record = self.log[1:].copy()
self.values.append(self.learn.final_record)
if self.add_time: self.log.append(format_time(time.time() - self.start_epoch))
self.logger(self.log)
self.iters.append(self.smooth_loss.count)
@property
def _train_mets(self):
if getattr(self, 'cancel_train', False): return L()
return L(self.smooth_loss) + (self.metrics if self.train_metrics else L())
@property
def _valid_mets(self):
if getattr(self, 'cancel_valid', False): return L()
return (L(self.loss) + self.metrics if self.valid_metrics else L())
def plot_loss(self, skip_start=5, with_valid=True):
plt.plot(list(range(skip_start, len(self.losses))), self.losses[skip_start:], label='train')
if with_valid:
idx = (np.array(self.iters)<skip_start).sum()
plt.plot(self.iters[idx:], L(self.values[idx:]).itemgot(1), label='valid')
plt.legend()
# +
#export
add_docs(Recorder,
before_train = "Reset loss and metrics state",
after_train = "Log loss and metric values on the training set (if `self.training_metrics=True`)",
before_validate = "Reset loss and metrics state",
after_validate = "Log loss and metric values on the validation set",
after_cancel_train = "Ignore training metrics for this epoch",
after_cancel_validate = "Ignore validation metrics for this epoch",
plot_loss = "Plot the losses from `skip_start` and onward")
if Recorder not in defaults.callbacks: defaults.callbacks.append(Recorder)
# -
# By default, metrics are computed on the validation set only, although that can be changed by adjusting `train_metrics` and `valid_metrics`. `beta` is the weight used to compute the exponentially weighted average of the losses (which gives the `smooth_loss` attribute to `Learner`).
#
# The `logger` attribute of a `Learner` determines what happens to those metrics. By default, it just print them:
#Test printed output
def tst_metric(out, targ): return F.mse_loss(out, targ)
learn = synth_learner(n_train=5, metrics=tst_metric)
# pat = r"[tensor\(\d.\d*\), tensor\(\d.\d*\), tensor\(\d.\d*\), 'dd:dd']"
pat = r"\[\d, \d+.\d+, \d+.\d+, \d+.\d+, '\d\d:\d\d'\]"
test_stdout(lambda: learn.fit(1), pat, regex=True)
#hide
class TestRecorderCallback(Callback):
order=51
def before_fit(self):
self.train_metrics,self.add_time = self.recorder.train_metrics,self.recorder.add_time
self.beta = self.recorder.smooth_loss.beta
for m in self.metrics: assert isinstance(m, Metric)
test_eq(self.recorder.smooth_loss.val, 0.)
#To test what the recorder logs, we use a custom logger function.
self.learn.logger = self.test_log
self.old_smooth,self.count = tensor(0.),0
def after_batch(self):
if self.training:
self.count += 1
test_eq(len(self.recorder.lrs), self.count)
test_eq(self.recorder.lrs[-1], self.opt.hypers[-1]['lr'])
test_eq(len(self.recorder.losses), self.count)
smooth = (1 - self.beta**(self.count-1)) * self.old_smooth * self.beta + self.loss * (1-self.beta)
smooth /= 1 - self.beta**self.count
test_close(self.recorder.losses[-1], smooth, eps=1e-4)
test_close(self.smooth_loss, smooth, eps=1e-4)
self.old_smooth = self.smooth_loss
self.bs += find_bs(self.yb)
if not self.training: test_eq(self.recorder.loss.count, self.bs)
if self.train_metrics or not self.training:
for m in self.metrics: test_eq(m.count, self.bs)
self.losses.append(self.loss.detach().cpu())
def before_epoch(self):
if self.add_time: self.start_epoch = time.time()
self.log = [self.epoch]
def before_train(self):
self.bs = 0
self.losses = []
for m in self.recorder._train_mets: test_eq(m.count, self.bs)
def after_train(self):
mean = tensor(self.losses).mean()
self.log += [self.smooth_loss, mean] if self.train_metrics else [self.smooth_loss]
test_close(self.log, self.recorder.log)
self.losses = []
def before_validate(self):
self.bs = 0
self.losses = []
for m in [self.recorder.loss] + self.metrics: test_eq(m.count, self.bs)
def test_log(self, log):
res = tensor(self.losses).mean()
self.log += [res, res]
if self.add_time: self.log.append(format_time(time.time() - self.start_epoch))
test_close(log[:-1], self.log[:-1])
test_eq(log[-1], self.log[-1])
# +
#hide
learn = synth_learner(n_train=5, metrics = tst_metric, cbs = TestRecorderCallback)
learn.fit(1)
test_eq(learn.recorder.metric_names, ['epoch', 'train_loss', 'valid_loss', 'tst_metric', 'time'])
learn = synth_learner(n_train=5, metrics = tst_metric, cbs = TestRecorderCallback)
learn.recorder.train_metrics=True
learn.fit(1)
test_eq(learn.recorder.metric_names,
['epoch', 'train_loss', 'train_tst_metric', 'valid_loss', 'valid_tst_metric', 'time'])
learn = synth_learner(n_train=5, metrics = tst_metric, cbs = TestRecorderCallback)
learn.recorder.add_time=False
learn.fit(1)
test_eq(learn.recorder.metric_names, ['epoch', 'train_loss', 'valid_loss', 'tst_metric'])
# -
#hide
#Test numpy metric
def tst_metric_np(out, targ): return F.mse_loss(out, targ).numpy()
learn = synth_learner(n_train=5, metrics=tst_metric_np)
learn.fit(1)
# ### Internals
show_doc(Recorder.before_fit)
show_doc(Recorder.before_epoch)
show_doc(Recorder.before_validate)
show_doc(Recorder.after_batch)
show_doc(Recorder.after_epoch)
# ### Plotting tools
show_doc(Recorder.plot_loss)
#hide
learn.recorder.plot_loss(skip_start=1)
# ## Inference functions
show_doc(Learner.validate)
#Test result
learn = synth_learner(n_train=5, metrics=tst_metric)
res = learn.validate()
test_eq(res[0], res[1])
x,y = learn.dls.valid_ds.tensors
test_close(res[0], F.mse_loss(learn.model(x), y), 1e-3)
#hide
#Test other dl
res = learn.validate(dl=learn.dls.train)
test_eq(res[0], res[1])
x,y = learn.dls.train_ds.tensors
test_close(res[0], F.mse_loss(learn.model(x), y), 1e-3)
show_doc(Learner.get_preds)
# `with_decoded` will also return the decoded predictions using the <code>decodes</code> function of the loss function (if it exists). For instance, fastai's `CrossEntropyFlat` takes the argmax or predictions in its decodes.
#
# Depending on the `loss_func` attribute of `Learner`, an activation function will be picked automatically so that the predictions make sense. For instance if the loss is a case of cross-entropy, a softmax will be applied, or if the loss is binary cross entropy with logits, a sigmoid will be applied. If you want to make sure a certain activation function is applied, you can pass it with `act`.
#
# `save_preds` and `save_targs` should be used when your predictions are too big to fit all in memory. Give a `Path` object that points to a folder where the predictions and targets will be saved.
#
# `concat_dim` is the batch dimension, where all the tensors will be concatenated.
#
# `inner` is an internal attribute that tells `get_preds` it's called internally, inside another training loop, to avoid recursion errors.
# > Note: If you want to use the option `with_loss=True` on a custom loss function, make sure you have implemented a `reduction` attribute that supports 'none'
# +
#Test result
learn = synth_learner(n_train=5, metrics=tst_metric)
preds,targs = learn.get_preds()
x,y = learn.dls.valid_ds.tensors
test_eq(targs, y)
test_close(preds, learn.model(x))
preds,targs = learn.get_preds(act = torch.sigmoid)
test_eq(targs, y)
test_close(preds, torch.sigmoid(learn.model(x)))
# -
#hide
#Test get_preds work with ds not evenly divisible by bs
learn = synth_learner(n_train=2.5, metrics=tst_metric)
preds,targs = learn.get_preds(ds_idx=0)
# +
#hide
#Test other dataset
x = torch.randn(16*5)
y = 2*x + 3 + 0.1*torch.randn(16*5)
dl = TfmdDL(TensorDataset(x, y), bs=16)
preds,targs = learn.get_preds(dl=dl)
test_eq(targs, y)
test_close(preds, learn.model(x))
#Test with loss
preds,targs,losses = learn.get_preds(dl=dl, with_loss=True)
test_eq(targs, y)
test_close(preds, learn.model(x))
test_close(losses, F.mse_loss(preds, targs, reduction='none'))
#Test with inputs
inps,preds,targs = learn.get_preds(dl=dl, with_input=True)
test_eq(inps,x)
test_eq(targs, y)
test_close(preds, learn.model(x))
# -
#hide
#Test with no target
learn = synth_learner(n_train=5)
x = torch.randn(16*5)
dl = TfmdDL(TensorDataset(x), bs=16)
preds,targs = learn.get_preds(dl=dl)
assert targs is None
# +
#hide
#Test with targets that are tuples
def _fake_loss(x,y,z,reduction=None): return F.mse_loss(x,y)
learn = synth_learner(n_train=5)
x = torch.randn(16*5)
y = 2*x + 3 + 0.1*torch.randn(16*5)
learn.dls.n_inp=1
learn.loss_func = _fake_loss
dl = TfmdDL(TensorDataset(x, y, y), bs=16)
preds,targs = learn.get_preds(dl=dl)
test_eq(targs, [y,y])
# +
#hide
#Test with inputs that are tuples
class _TupleModel(Module):
def __init__(self, model): self.model=model
def forward(self, x1, x2): return self.model(x1)
learn = synth_learner(n_train=5)
#learn.dls.n_inp=2
x = torch.randn(16*5)
y = 2*x + 3 + 0.1*torch.randn(16*5)
learn.model = _TupleModel(learn.model)
learn.dls = DataLoaders(TfmdDL(TensorDataset(x, x, y), bs=16),TfmdDL(TensorDataset(x, x, y), bs=16))
inps,preds,targs = learn.get_preds(ds_idx=0, with_input=True)
test_eq(inps, [x,x])
t = learn.get_preds(ds_idx=0, with_input=True)
# -
#hide
#Test auto activation function is picked
learn = synth_learner(n_train=5)
learn.loss_func = BCEWithLogitsLossFlat()
x = torch.randn(16*5)
y = 2*x + 3 + 0.1*torch.randn(16*5)
dl = TfmdDL(TensorDataset(x, y), bs=16)
preds,targs = learn.get_preds(dl=dl)
test_close(preds, torch.sigmoid(learn.model(x)))
#hide
#Test reorder is done
learn = synth_learner(n_train=5)
x = torch.randn(16*5)
y = 2*x + 3 + 0.1*torch.randn(16*5)
dl = TfmdDL(TensorDataset(x, y), bs=16, shuffle=True)
preds,targs = learn.get_preds(dl=dl)
test_eq(targs, y)
#hide
inps,preds,targs = learn.get_preds(ds_idx=0, with_input=True)
tst = learn.get_preds(ds_idx=0, with_input=True, with_decoded=True)
show_doc(Learner.predict)
# It returns a tuple of three elements with, in reverse order,
# - the prediction from the model, potentially passed through the activation of the loss function (if it has one)
# - the decoded prediction, using the potential <code>decodes</code> method from it
# - the fully decoded prediction, using the transforms used to build the `Datasets`/`DataLoaders`
# `rm_type_tfms` is a deprecated argument that should not be used and will be removed in a future version. `with_input` will add the decoded inputs to the result.
# +
class _FakeLossFunc(Module):
reduction = 'none'
def forward(self, x, y): return F.mse_loss(x,y)
def activation(self, x): return x+1
def decodes(self, x): return 2*x
class _Add1(Transform):
def encodes(self, x): return x+1
def decodes(self, x): return x-1
learn = synth_learner(n_train=5)
dl = TfmdDL(Datasets(torch.arange(50), tfms = [L(), [_Add1()]]))
learn.dls = DataLoaders(dl, dl)
learn.loss_func = _FakeLossFunc()
inp = tensor([2.])
out = learn.model(inp).detach()+1 #applying model + activation
dec = 2*out #decodes from loss function
full_dec = dec-1 #decodes from _Add1
test_eq(learn.predict(inp), [full_dec,dec,out])
test_eq(learn.predict(inp, with_input=True), [inp,full_dec,dec,out])
# -
show_doc(Learner.show_results)
# Will show `max_n` samples (unless the batch size of `ds_idx` or `dl` is less than `max_n`, in which case it will show as many samples) and `shuffle` the data unless you pass `false` to that flag. `kwargs` are application-dependent.
#
# We can't show an example on our synthetic `Learner`, but check all the beginners tutorials which will show you how that method works across applications.
# The last functions in this section are used internally for inference, but should be less useful to you.
show_doc(Learner.no_logging)
learn = synth_learner(n_train=5, metrics=tst_metric)
with learn.no_logging():
test_stdout(lambda: learn.fit(1), '')
test_eq(learn.logger, print)
show_doc(Learner.loss_not_reduced)
# This requires your loss function to either have a `reduction` attribute or a `reduction` argument (like all fastai and PyTorch loss functions).
#hide
test_eq(learn.loss_func.reduction, 'mean')
with learn.loss_not_reduced():
test_eq(learn.loss_func.reduction, 'none')
x,y = learn.dls.one_batch()
p = learn.model(x)
losses = learn.loss_func(p, y)
test_eq(losses.shape, y.shape)
test_eq(losses, F.mse_loss(p,y, reduction='none'))
test_eq(learn.loss_func.reduction, 'mean')
# ## Transfer learning
# +
#export
@patch
def freeze_to(self:Learner, n):
if self.opt is None: self.create_opt()
self.opt.freeze_to(n)
self.opt.clear_state()
@patch
def freeze(self:Learner): self.freeze_to(-1)
@patch
def unfreeze(self:Learner): self.freeze_to(0)
add_docs(Learner,
freeze_to="Freeze parameter groups up to `n`",
freeze="Freeze up to last parameter group",
unfreeze="Unfreeze the entire model")
# +
#hide
class _TstModel(nn.Module):
def __init__(self):
super().__init__()
self.a,self.b = nn.Parameter(torch.randn(1)),nn.Parameter(torch.randn(1))
self.tst = nn.Sequential(nn.Linear(4,5), nn.BatchNorm1d(3))
self.tst[0].bias.data,self.tst[1].bias.data = torch.randn(5),torch.randn(3)
def forward(self, x): return x * self.a + self.b
class _PutGrad(Callback):
def before_step(self):
for p in self.learn.tst.parameters():
if p.requires_grad: p.grad = torch.ones_like(p.data)
def _splitter(m): return [list(m.tst[0].parameters()), list(m.tst[1].parameters()), [m.a,m.b]]
learn = synth_learner(n_train=5, opt_func = partial(SGD), cbs=_PutGrad, splitter=_splitter, lr=1e-2)
learn.model = _TstModel()
learn.freeze()
init = [p.clone() for p in learn.tst.parameters()]
learn.fit(1, wd=0.)
end = list(learn.tst.parameters())
#linear was not trained
for i in [0,1]: test_close(end[i],init[i])
#bn was trained even frozen since `train_bn=True` by default
for i in [2,3]: test_close(end[i]-init[i], -0.05 * torch.ones_like(end[i]))
# +
#hide
learn = synth_learner(n_train=5, opt_func = partial(SGD), cbs=_PutGrad, splitter=_splitter, train_bn=False, lr=1e-2)
learn.model = _TstModel()
learn.freeze()
init = [p.clone() for p in learn.tst.parameters()]
learn.fit(1, wd=0.)
end = list(learn.tst.parameters())
#linear and bn were not trained
for i in range(4): test_close(end[i],init[i])
learn.freeze_to(-2)
init = [p.clone() for p in learn.tst.parameters()]
learn.fit(1, wd=0.)
end = list(learn.tst.parameters())
#linear was not trained
for i in [0,1]: test_close(end[i],init[i])
#bn was trained
for i in [2,3]: test_close(end[i]-init[i], -0.05 * torch.ones_like(end[i]))
learn.unfreeze()
init = [p.clone() for p in learn.tst.parameters()]
learn.fit(1, wd=0.)
end = list(learn.tst.parameters())
#linear and bn were trained
for i in range(4): test_close(end[i]-init[i], -0.05 * torch.ones_like(end[i]), 1e-3)
# -
# ## TTA
#export
@patch
def tta(self:Learner, ds_idx=1, dl=None, n=4, item_tfms=None, batch_tfms=None, beta=0.25, use_max=False):
"Return predictions on the `ds_idx` dataset or `dl` using Test Time Augmentation"
if dl is None: dl = self.dls[ds_idx]
if item_tfms is not None or batch_tfms is not None: dl = dl.new(after_item=item_tfms, after_batch=batch_tfms)
try:
self(_before_epoch)
with dl.dataset.set_split_idx(0), self.no_mbar():
if hasattr(self,'progress'): self.progress.mbar = master_bar(list(range(n)))
aug_preds = []
for i in self.progress.mbar if hasattr(self,'progress') else range(n):
self.epoch = i #To keep track of progress on mbar since the progress callback will use self.epoch
aug_preds.append(self.get_preds(dl=dl, inner=True)[0][None])
aug_preds = torch.cat(aug_preds)
aug_preds = aug_preds.max(0)[0] if use_max else aug_preds.mean(0)
self.epoch = n
with dl.dataset.set_split_idx(1): preds,targs = self.get_preds(dl=dl, inner=True)
finally: self(event.after_fit)
if use_max: return torch.stack([preds, aug_preds], 0).max(0)[0],targs
preds = (aug_preds,preds) if beta is None else torch.lerp(aug_preds, preds, beta)
return preds,targs
# In practice, we get the predictions `n` times with the transforms of the training set and average those. The final predictions are `(1-beta)` multiplied by this average + `beta` multiplied by the predictions obtained with the transforms of the dataset. Set `beta` to `None` to get a tuple of the predictions and tta results. You can also use the maximum of all predictions instead of an average by setting `use_max=True`.
#
# If you want to use new transforms, you can pass them with `item_tfms` and `batch_tfms`.
#hide
learn = synth_learner()
dl = TfmdDL(Datasets(torch.arange(50), [noop,noop]))
learn.dls = DataLoaders(dl, dl)
preds,targs = learn.tta()
assert len(preds),len(targs)
# ## Export -
#hide
from nbdev.export import notebook2script
notebook2script()
|
nbs/13a_learner.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Introduction
#
#
# The selection effects assign a probability of observing a system.
#
# We determine if we can confidently select a system by choosing a signal-to-noise ratio (SNR), for which we often use 8.
#
# The SNR depends on the individual component masses, the distance and the orientation of the binary compared to the gravitational wave detector.
#
# By sampling uniformly over the possible orientations of the system
# we can assign the fraction of the time the system at that distance can be observed. This fraction is the probability.
#
#
# The SNR also depends on the sensitivity of the detector
#
# combined we need to pass to the function
#
# detection_probability(m1, m2, redshift, distance, snr_threshold,sensitivity='design')
#
# If you use this pipeline we would appreciate it if you cite
# Selection effects ; https://arxiv.org/pdf/1711.06287
#
# # Paths
# +
import os
pathNoteBook = os.getcwd()
pathClassCOMPAS = pathNoteBook + '/PythonScripts/'
# -
# # Imports
import numpy as np
import sys
#custom scripts
sys.path.append(pathClassCOMPAS)
import selection_effects
# # Quick example
m1 = 40 #Msun
m2 = 40 #Msun
redshift = 0.1
distance = 463.4 # Mpc quick estimate for illustration purpuses
# code uses astropy toconvert
# redshift to luminosity distance
snr_threshold = 8
sensitivity = 'O1'
P = selection_effects.detection_probability(m1, m2, redshift, distance, snr_threshold,sensitivity=sensitivity)
print(P)
|
postProcessing/Folders/CosmicIntegration/3_SelectionEffects.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# **Chapter 4 – Training Linear Models**
# _This notebook contains all the sample code and solutions to the exercises in chapter 4._
# # Setup
# First, let's import a few common modules, ensure MatplotLib plots figures inline and prepare a function to save the figures. We also check that Python 3.5 or later is installed (although Python 2.x may work, it is deprecated so we strongly recommend you use Python 3 instead), as well as Scikit-Learn ≥0.20.
# +
# Python ≥3.5 is required
import sys
assert sys.version_info >= (3, 5)
# Scikit-Learn ≥0.20 is required
import sklearn
assert sklearn.__version__ >= "0.20"
# Common imports
import numpy as np
import os
# to make this notebook's output stable across runs
np.random.seed(42)
# To plot pretty figures
# %matplotlib inline
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.rc('axes', labelsize=14)
mpl.rc('xtick', labelsize=12)
mpl.rc('ytick', labelsize=12)
# Where to save the figures
PROJECT_ROOT_DIR = "."
CHAPTER_ID = "training_linear_models"
IMAGES_PATH = os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID)
os.makedirs(IMAGES_PATH, exist_ok=True)
def save_fig(fig_id, tight_layout=True, fig_extension="png", resolution=300):
path = os.path.join(IMAGES_PATH, fig_id + "." + fig_extension)
print("Saving figure", fig_id)
if tight_layout:
plt.tight_layout()
plt.savefig(path, format=fig_extension, dpi=resolution)
# Ignore useless warnings (see SciPy issue #5998)
import warnings
warnings.filterwarnings(action="ignore", message="^internal gelsd")
# -
# # Linear regression using the Normal Equation
# +
import numpy as np
X = 2 * np.random.rand(100, 1)
y = 4 + 3 * X + np.random.randn(100, 1)
# -
plt.plot(X, y, "b.")
plt.xlabel("$x_1$", fontsize=18)
plt.ylabel("$y$", rotation=0, fontsize=18)
plt.axis([0, 2, 0, 15])
save_fig("generated_data_plot")
plt.show()
X_b = np.c_[np.ones((100, 1)), X] # add x0 = 1 to each instance
theta_best = np.linalg.inv(X_b.T.dot(X_b)).dot(X_b.T).dot(y)
theta_best
X_new = np.array([[0], [2]])
X_new_b = np.c_[np.ones((2, 1)), X_new] # add x0 = 1 to each instance
y_predict = X_new_b.dot(theta_best)
y_predict
plt.plot(X_new, y_predict, "r-")
plt.plot(X, y, "b.")
plt.axis([0, 2, 0, 15])
plt.show()
# The figure in the book actually corresponds to the following code, with a legend and axis labels:
plt.plot(X_new, y_predict, "r-", linewidth=2, label="Predictions")
plt.plot(X, y, "b.")
plt.xlabel("$x_1$", fontsize=18)
plt.ylabel("$y$", rotation=0, fontsize=18)
plt.legend(loc="upper left", fontsize=14)
plt.axis([0, 2, 0, 15])
save_fig("linear_model_predictions")
plt.show()
# +
from sklearn.linear_model import LinearRegression
lin_reg = LinearRegression()
lin_reg.fit(X, y)
lin_reg.intercept_, lin_reg.coef_
# -
lin_reg.predict(X_new)
# The `LinearRegression` class is based on the `scipy.linalg.lstsq()` function (the name stands for "least squares"), which you could call directly:
theta_best_svd, residuals, rank, s = np.linalg.lstsq(X_b, y, rcond=1e-6)
theta_best_svd
# This function computes $\mathbf{X}^+\mathbf{y}$, where $\mathbf{X}^{+}$ is the _pseudoinverse_ of $\mathbf{X}$ (specifically the Moore-Penrose inverse). You can use `np.linalg.pinv()` to compute the pseudoinverse directly:
np.linalg.pinv(X_b).dot(y)
# # Linear regression using batch gradient descent
# +
eta = 0.1 # learning rate
n_iterations = 1000
m = 100
theta = np.random.randn(2,1) # random initialization
for iteration in range(n_iterations):
gradients = 2/m * X_b.T.dot(X_b.dot(theta) - y)
theta = theta - eta * gradients
# -
theta
X_new_b.dot(theta)
# +
theta_path_bgd = []
def plot_gradient_descent(theta, eta, theta_path=None):
m = len(X_b)
plt.plot(X, y, "b.")
n_iterations = 1000
for iteration in range(n_iterations):
if iteration < 10:
y_predict = X_new_b.dot(theta)
style = "b-" if iteration > 0 else "r--"
plt.plot(X_new, y_predict, style)
gradients = 2/m * X_b.T.dot(X_b.dot(theta) - y)
theta = theta - eta * gradients
if theta_path is not None:
theta_path.append(theta)
plt.xlabel("$x_1$", fontsize=18)
plt.axis([0, 2, 0, 15])
plt.title(r"$\eta = {}$".format(eta), fontsize=16)
# +
np.random.seed(42)
theta = np.random.randn(2,1) # random initialization
plt.figure(figsize=(10,4))
plt.subplot(131); plot_gradient_descent(theta, eta=0.02)
plt.ylabel("$y$", rotation=0, fontsize=18)
plt.subplot(132); plot_gradient_descent(theta, eta=0.1, theta_path=theta_path_bgd)
plt.subplot(133); plot_gradient_descent(theta, eta=0.5)
save_fig("gradient_descent_plot")
plt.show()
# -
# # Stochastic Gradient Descent
theta_path_sgd = []
m = len(X_b)
np.random.seed(42)
# +
n_epochs = 50
t0, t1 = 5, 50 # learning schedule hyperparameters
def learning_schedule(t):
return t0 / (t + t1)
theta = np.random.randn(2,1) # random initialization
for epoch in range(n_epochs):
for i in range(m):
if epoch == 0 and i < 20: # not shown in the book
y_predict = X_new_b.dot(theta) # not shown
style = "b-" if i > 0 else "r--" # not shown
plt.plot(X_new, y_predict, style) # not shown
random_index = np.random.randint(m)
xi = X_b[random_index:random_index+1]
yi = y[random_index:random_index+1]
gradients = 2 * xi.T.dot(xi.dot(theta) - yi)
eta = learning_schedule(epoch * m + i)
theta = theta - eta * gradients
theta_path_sgd.append(theta) # not shown
plt.plot(X, y, "b.") # not shown
plt.xlabel("$x_1$", fontsize=18) # not shown
plt.ylabel("$y$", rotation=0, fontsize=18) # not shown
plt.axis([0, 2, 0, 15]) # not shown
save_fig("sgd_plot") # not shown
plt.show() # not shown
# -
theta
# +
from sklearn.linear_model import SGDRegressor
sgd_reg = SGDRegressor(max_iter=1000, tol=1e-3, penalty=None, eta0=0.1, random_state=42)
sgd_reg.fit(X, y.ravel())
# -
sgd_reg.intercept_, sgd_reg.coef_
# # Mini-batch gradient descent
# +
theta_path_mgd = []
n_iterations = 50
minibatch_size = 20
np.random.seed(42)
theta = np.random.randn(2,1) # random initialization
t0, t1 = 200, 1000
def learning_schedule(t):
return t0 / (t + t1)
t = 0
for epoch in range(n_iterations):
shuffled_indices = np.random.permutation(m)
X_b_shuffled = X_b[shuffled_indices]
y_shuffled = y[shuffled_indices]
for i in range(0, m, minibatch_size):
t += 1
xi = X_b_shuffled[i:i+minibatch_size]
yi = y_shuffled[i:i+minibatch_size]
gradients = 2/minibatch_size * xi.T.dot(xi.dot(theta) - yi)
eta = learning_schedule(t)
theta = theta - eta * gradients
theta_path_mgd.append(theta)
# -
theta
theta_path_bgd = np.array(theta_path_bgd)
theta_path_sgd = np.array(theta_path_sgd)
theta_path_mgd = np.array(theta_path_mgd)
plt.figure(figsize=(7,4))
plt.plot(theta_path_sgd[:, 0], theta_path_sgd[:, 1], "r-s", linewidth=1, label="Stochastic")
plt.plot(theta_path_mgd[:, 0], theta_path_mgd[:, 1], "g-+", linewidth=2, label="Mini-batch")
plt.plot(theta_path_bgd[:, 0], theta_path_bgd[:, 1], "b-o", linewidth=3, label="Batch")
plt.legend(loc="upper left", fontsize=16)
plt.xlabel(r"$\theta_0$", fontsize=20)
plt.ylabel(r"$\theta_1$ ", fontsize=20, rotation=0)
plt.axis([2.5, 4.5, 2.3, 3.9])
save_fig("gradient_descent_paths_plot")
plt.show()
# # Polynomial regression
# +
import numpy as np
import numpy.random as rnd
np.random.seed(42)
# -
m = 100
X = 6 * np.random.rand(m, 1) - 3
y = 0.5 * X**2 + X + 2 + np.random.randn(m, 1)
plt.plot(X, y, "b.")
plt.xlabel("$x_1$", fontsize=18)
plt.ylabel("$y$", rotation=0, fontsize=18)
plt.axis([-3, 3, 0, 10])
save_fig("quadratic_data_plot")
plt.show()
from sklearn.preprocessing import PolynomialFeatures
poly_features = PolynomialFeatures(degree=2, include_bias=False)
X_poly = poly_features.fit_transform(X)
X[0]
X_poly[0]
lin_reg = LinearRegression()
lin_reg.fit(X_poly, y)
lin_reg.intercept_, lin_reg.coef_
X_new=np.linspace(-3, 3, 100).reshape(100, 1)
X_new_poly = poly_features.transform(X_new)
y_new = lin_reg.predict(X_new_poly)
plt.plot(X, y, "b.")
plt.plot(X_new, y_new, "r-", linewidth=2, label="Predictions")
plt.xlabel("$x_1$", fontsize=18)
plt.ylabel("$y$", rotation=0, fontsize=18)
plt.legend(loc="upper left", fontsize=14)
plt.axis([-3, 3, 0, 10])
save_fig("quadratic_predictions_plot")
plt.show()
# +
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
for style, width, degree in (("g-", 1, 300), ("b--", 2, 2), ("r-+", 2, 1)):
polybig_features = PolynomialFeatures(degree=degree, include_bias=False)
std_scaler = StandardScaler()
lin_reg = LinearRegression()
polynomial_regression = Pipeline([
("poly_features", polybig_features),
("std_scaler", std_scaler),
("lin_reg", lin_reg),
])
polynomial_regression.fit(X, y)
y_newbig = polynomial_regression.predict(X_new)
plt.plot(X_new, y_newbig, style, label=str(degree), linewidth=width)
plt.plot(X, y, "b.", linewidth=3)
plt.legend(loc="upper left")
plt.xlabel("$x_1$", fontsize=18)
plt.ylabel("$y$", rotation=0, fontsize=18)
plt.axis([-3, 3, 0, 10])
save_fig("high_degree_polynomials_plot")
plt.show()
# +
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
def plot_learning_curves(model, X, y):
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=10)
train_errors, val_errors = [], []
for m in range(1, len(X_train)):
model.fit(X_train[:m], y_train[:m])
y_train_predict = model.predict(X_train[:m])
y_val_predict = model.predict(X_val)
train_errors.append(mean_squared_error(y_train[:m], y_train_predict))
val_errors.append(mean_squared_error(y_val, y_val_predict))
plt.plot(np.sqrt(train_errors), "r-+", linewidth=2, label="train")
plt.plot(np.sqrt(val_errors), "b-", linewidth=3, label="val")
plt.legend(loc="upper right", fontsize=14) # not shown in the book
plt.xlabel("Training set size", fontsize=14) # not shown
plt.ylabel("RMSE", fontsize=14) # not shown
# -
lin_reg = LinearRegression()
plot_learning_curves(lin_reg, X, y)
plt.axis([0, 80, 0, 3]) # not shown in the book
save_fig("underfitting_learning_curves_plot") # not shown
plt.show() # not shown
# +
from sklearn.pipeline import Pipeline
polynomial_regression = Pipeline([
("poly_features", PolynomialFeatures(degree=10, include_bias=False)),
("lin_reg", LinearRegression()),
])
plot_learning_curves(polynomial_regression, X, y)
plt.axis([0, 80, 0, 3]) # not shown
save_fig("learning_curves_plot") # not shown
plt.show() # not shown
# -
# # Regularized models
np.random.seed(42)
m = 20
X = 3 * np.random.rand(m, 1)
y = 1 + 0.5 * X + np.random.randn(m, 1) / 1.5
X_new = np.linspace(0, 3, 100).reshape(100, 1)
from sklearn.linear_model import Ridge
ridge_reg = Ridge(alpha=1, solver="cholesky", random_state=42)
ridge_reg.fit(X, y)
ridge_reg.predict([[1.5]])
ridge_reg = Ridge(alpha=1, solver="sag", random_state=42)
ridge_reg.fit(X, y)
ridge_reg.predict([[1.5]])
# +
from sklearn.linear_model import Ridge
def plot_model(model_class, polynomial, alphas, **model_kargs):
for alpha, style in zip(alphas, ("b-", "g--", "r:")):
model = model_class(alpha, **model_kargs) if alpha > 0 else LinearRegression()
if polynomial:
model = Pipeline([
("poly_features", PolynomialFeatures(degree=10, include_bias=False)),
("std_scaler", StandardScaler()),
("regul_reg", model),
])
model.fit(X, y)
y_new_regul = model.predict(X_new)
lw = 2 if alpha > 0 else 1
plt.plot(X_new, y_new_regul, style, linewidth=lw, label=r"$\alpha = {}$".format(alpha))
plt.plot(X, y, "b.", linewidth=3)
plt.legend(loc="upper left", fontsize=15)
plt.xlabel("$x_1$", fontsize=18)
plt.axis([0, 3, 0, 4])
plt.figure(figsize=(8,4))
plt.subplot(121)
plot_model(Ridge, polynomial=False, alphas=(0, 10, 100), random_state=42)
plt.ylabel("$y$", rotation=0, fontsize=18)
plt.subplot(122)
plot_model(Ridge, polynomial=True, alphas=(0, 10**-5, 1), random_state=42)
save_fig("ridge_regression_plot")
plt.show()
# -
# **Note**: to be future-proof, we set `max_iter=1000` and `tol=1e-3` because these will be the default values in Scikit-Learn 0.21.
sgd_reg = SGDRegressor(penalty="l2", max_iter=1000, tol=1e-3, random_state=42)
sgd_reg.fit(X, y.ravel())
sgd_reg.predict([[1.5]])
# +
from sklearn.linear_model import Lasso
plt.figure(figsize=(8,4))
plt.subplot(121)
plot_model(Lasso, polynomial=False, alphas=(0, 0.1, 1), random_state=42)
plt.ylabel("$y$", rotation=0, fontsize=18)
plt.subplot(122)
plot_model(Lasso, polynomial=True, alphas=(0, 10**-7, 1), random_state=42)
save_fig("lasso_regression_plot")
plt.show()
# -
from sklearn.linear_model import Lasso
lasso_reg = Lasso(alpha=0.1)
lasso_reg.fit(X, y)
lasso_reg.predict([[1.5]])
from sklearn.linear_model import ElasticNet
elastic_net = ElasticNet(alpha=0.1, l1_ratio=0.5, random_state=42)
elastic_net.fit(X, y)
elastic_net.predict([[1.5]])
# +
np.random.seed(42)
m = 100
X = 6 * np.random.rand(m, 1) - 3
y = 2 + X + 0.5 * X**2 + np.random.randn(m, 1)
X_train, X_val, y_train, y_val = train_test_split(X[:50], y[:50].ravel(), test_size=0.5, random_state=10)
# -
# Early stopping example:
# +
from sklearn.base import clone
poly_scaler = Pipeline([
("poly_features", PolynomialFeatures(degree=90, include_bias=False)),
("std_scaler", StandardScaler())
])
X_train_poly_scaled = poly_scaler.fit_transform(X_train)
X_val_poly_scaled = poly_scaler.transform(X_val)
sgd_reg = SGDRegressor(max_iter=1, tol=-np.infty, warm_start=True,
penalty=None, learning_rate="constant", eta0=0.0005, random_state=42)
minimum_val_error = float("inf")
best_epoch = None
best_model = None
for epoch in range(1000):
sgd_reg.fit(X_train_poly_scaled, y_train) # continues where it left off
y_val_predict = sgd_reg.predict(X_val_poly_scaled)
val_error = mean_squared_error(y_val, y_val_predict)
if val_error < minimum_val_error:
minimum_val_error = val_error
best_epoch = epoch
best_model = clone(sgd_reg)
# -
# Create the graph:
# +
sgd_reg = SGDRegressor(max_iter=1, tol=-np.infty, warm_start=True,
penalty=None, learning_rate="constant", eta0=0.0005, random_state=42)
n_epochs = 500
train_errors, val_errors = [], []
for epoch in range(n_epochs):
sgd_reg.fit(X_train_poly_scaled, y_train)
y_train_predict = sgd_reg.predict(X_train_poly_scaled)
y_val_predict = sgd_reg.predict(X_val_poly_scaled)
train_errors.append(mean_squared_error(y_train, y_train_predict))
val_errors.append(mean_squared_error(y_val, y_val_predict))
best_epoch = np.argmin(val_errors)
best_val_rmse = np.sqrt(val_errors[best_epoch])
plt.annotate('Best model',
xy=(best_epoch, best_val_rmse),
xytext=(best_epoch, best_val_rmse + 1),
ha="center",
arrowprops=dict(facecolor='black', shrink=0.05),
fontsize=16,
)
best_val_rmse -= 0.03 # just to make the graph look better
plt.plot([0, n_epochs], [best_val_rmse, best_val_rmse], "k:", linewidth=2)
plt.plot(np.sqrt(val_errors), "b-", linewidth=3, label="Validation set")
plt.plot(np.sqrt(train_errors), "r--", linewidth=2, label="Training set")
plt.legend(loc="upper right", fontsize=14)
plt.xlabel("Epoch", fontsize=14)
plt.ylabel("RMSE", fontsize=14)
save_fig("early_stopping_plot")
plt.show()
# -
best_epoch, best_model
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
# +
t1a, t1b, t2a, t2b = -1, 3, -1.5, 1.5
# ignoring bias term
t1s = np.linspace(t1a, t1b, 500)
t2s = np.linspace(t2a, t2b, 500)
t1, t2 = np.meshgrid(t1s, t2s)
T = np.c_[t1.ravel(), t2.ravel()]
Xr = np.array([[-1, 1], [-0.3, -1], [1, 0.1]])
yr = 2 * Xr[:, :1] + 0.5 * Xr[:, 1:]
J = (1/len(Xr) * np.sum((T.dot(Xr.T) - yr.T)**2, axis=1)).reshape(t1.shape)
N1 = np.linalg.norm(T, ord=1, axis=1).reshape(t1.shape)
N2 = np.linalg.norm(T, ord=2, axis=1).reshape(t1.shape)
t_min_idx = np.unravel_index(np.argmin(J), J.shape)
t1_min, t2_min = t1[t_min_idx], t2[t_min_idx]
t_init = np.array([[0.25], [-1]])
# +
def bgd_path(theta, X, y, l1, l2, core = 1, eta = 0.1, n_iterations = 50):
path = [theta]
for iteration in range(n_iterations):
gradients = core * 2/len(X) * X.T.dot(X.dot(theta) - y) + l1 * np.sign(theta) + 2 * l2 * theta
theta = theta - eta * gradients
path.append(theta)
return np.array(path)
plt.figure(figsize=(12, 8))
for i, N, l1, l2, title in ((0, N1, 0.5, 0, "Lasso"), (1, N2, 0, 0.1, "Ridge")):
JR = J + l1 * N1 + l2 * N2**2
tr_min_idx = np.unravel_index(np.argmin(JR), JR.shape)
t1r_min, t2r_min = t1[tr_min_idx], t2[tr_min_idx]
levelsJ=(np.exp(np.linspace(0, 1, 20)) - 1) * (np.max(J) - np.min(J)) + np.min(J)
levelsJR=(np.exp(np.linspace(0, 1, 20)) - 1) * (np.max(JR) - np.min(JR)) + np.min(JR)
levelsN=np.linspace(0, np.max(N), 10)
path_J = bgd_path(t_init, Xr, yr, l1=0, l2=0)
path_JR = bgd_path(t_init, Xr, yr, l1, l2)
path_N = bgd_path(t_init, Xr, yr, np.sign(l1)/3, np.sign(l2), core=0)
plt.subplot(221 + i * 2)
plt.grid(True)
plt.axhline(y=0, color='k')
plt.axvline(x=0, color='k')
plt.contourf(t1, t2, J, levels=levelsJ, alpha=0.9)
plt.contour(t1, t2, N, levels=levelsN)
plt.plot(path_J[:, 0], path_J[:, 1], "w-o")
plt.plot(path_N[:, 0], path_N[:, 1], "y-^")
plt.plot(t1_min, t2_min, "rs")
plt.title(r"$\ell_{}$ penalty".format(i + 1), fontsize=16)
plt.axis([t1a, t1b, t2a, t2b])
if i == 1:
plt.xlabel(r"$\theta_1$", fontsize=20)
plt.ylabel(r"$\theta_2$", fontsize=20, rotation=0)
plt.subplot(222 + i * 2)
plt.grid(True)
plt.axhline(y=0, color='k')
plt.axvline(x=0, color='k')
plt.contourf(t1, t2, JR, levels=levelsJR, alpha=0.9)
plt.plot(path_JR[:, 0], path_JR[:, 1], "w-o")
plt.plot(t1r_min, t2r_min, "rs")
plt.title(title, fontsize=16)
plt.axis([t1a, t1b, t2a, t2b])
if i == 1:
plt.xlabel(r"$\theta_1$", fontsize=20)
save_fig("lasso_vs_ridge_plot")
plt.show()
# -
# # Logistic regression
t = np.linspace(-10, 10, 100)
sig = 1 / (1 + np.exp(-t))
plt.figure(figsize=(9, 3))
plt.plot([-10, 10], [0, 0], "k-")
plt.plot([-10, 10], [0.5, 0.5], "k:")
plt.plot([-10, 10], [1, 1], "k:")
plt.plot([0, 0], [-1.1, 1.1], "k-")
plt.plot(t, sig, "b-", linewidth=2, label=r"$\sigma(t) = \frac{1}{1 + e^{-t}}$")
plt.xlabel("t")
plt.legend(loc="upper left", fontsize=20)
plt.axis([-10, 10, -0.1, 1.1])
save_fig("logistic_function_plot")
plt.show()
from sklearn import datasets
iris = datasets.load_iris()
list(iris.keys())
print(iris.DESCR)
X = iris["data"][:, 3:] # petal width
y = (iris["target"] == 2).astype(np.int) # 1 if Iris-Virginica, else 0
# **Note**: To be future-proof we set `solver="lbfgs"` since this will be the default value in Scikit-Learn 0.22.
from sklearn.linear_model import LogisticRegression
log_reg = LogisticRegression(solver="lbfgs", random_state=42)
log_reg.fit(X, y)
# +
X_new = np.linspace(0, 3, 1000).reshape(-1, 1)
y_proba = log_reg.predict_proba(X_new)
plt.plot(X_new, y_proba[:, 1], "g-", linewidth=2, label="Iris-Virginica")
plt.plot(X_new, y_proba[:, 0], "b--", linewidth=2, label="Not Iris-Virginica")
# -
# The figure in the book actually is actually a bit fancier:
# +
X_new = np.linspace(0, 3, 1000).reshape(-1, 1)
y_proba = log_reg.predict_proba(X_new)
decision_boundary = X_new[y_proba[:, 1] >= 0.5][0]
plt.figure(figsize=(8, 3))
plt.plot(X[y==0], y[y==0], "bs")
plt.plot(X[y==1], y[y==1], "g^")
plt.plot([decision_boundary, decision_boundary], [-1, 2], "k:", linewidth=2)
plt.plot(X_new, y_proba[:, 1], "g-", linewidth=2, label="Iris-Virginica")
plt.plot(X_new, y_proba[:, 0], "b--", linewidth=2, label="Not Iris-Virginica")
plt.text(decision_boundary+0.02, 0.15, "Decision boundary", fontsize=14, color="k", ha="center")
plt.arrow(decision_boundary, 0.08, -0.3, 0, head_width=0.05, head_length=0.1, fc='b', ec='b')
plt.arrow(decision_boundary, 0.92, 0.3, 0, head_width=0.05, head_length=0.1, fc='g', ec='g')
plt.xlabel("Petal width (cm)", fontsize=14)
plt.ylabel("Probability", fontsize=14)
plt.legend(loc="center left", fontsize=14)
plt.axis([0, 3, -0.02, 1.02])
save_fig("logistic_regression_plot")
plt.show()
# -
decision_boundary
log_reg.predict([[1.7], [1.5]])
# +
from sklearn.linear_model import LogisticRegression
X = iris["data"][:, (2, 3)] # petal length, petal width
y = (iris["target"] == 2).astype(np.int)
log_reg = LogisticRegression(solver="lbfgs", C=10**10, random_state=42)
log_reg.fit(X, y)
x0, x1 = np.meshgrid(
np.linspace(2.9, 7, 500).reshape(-1, 1),
np.linspace(0.8, 2.7, 200).reshape(-1, 1),
)
X_new = np.c_[x0.ravel(), x1.ravel()]
y_proba = log_reg.predict_proba(X_new)
plt.figure(figsize=(10, 4))
plt.plot(X[y==0, 0], X[y==0, 1], "bs")
plt.plot(X[y==1, 0], X[y==1, 1], "g^")
zz = y_proba[:, 1].reshape(x0.shape)
contour = plt.contour(x0, x1, zz, cmap=plt.cm.brg)
left_right = np.array([2.9, 7])
boundary = -(log_reg.coef_[0][0] * left_right + log_reg.intercept_[0]) / log_reg.coef_[0][1]
plt.clabel(contour, inline=1, fontsize=12)
plt.plot(left_right, boundary, "k--", linewidth=3)
plt.text(3.5, 1.5, "Not Iris-Virginica", fontsize=14, color="b", ha="center")
plt.text(6.5, 2.3, "Iris-Virginica", fontsize=14, color="g", ha="center")
plt.xlabel("Petal length", fontsize=14)
plt.ylabel("Petal width", fontsize=14)
plt.axis([2.9, 7, 0.8, 2.7])
save_fig("logistic_regression_contour_plot")
plt.show()
# +
X = iris["data"][:, (2, 3)] # petal length, petal width
y = iris["target"]
softmax_reg = LogisticRegression(multi_class="multinomial",solver="lbfgs", C=10, random_state=42)
softmax_reg.fit(X, y)
# +
x0, x1 = np.meshgrid(
np.linspace(0, 8, 500).reshape(-1, 1),
np.linspace(0, 3.5, 200).reshape(-1, 1),
)
X_new = np.c_[x0.ravel(), x1.ravel()]
y_proba = softmax_reg.predict_proba(X_new)
y_predict = softmax_reg.predict(X_new)
zz1 = y_proba[:, 1].reshape(x0.shape)
zz = y_predict.reshape(x0.shape)
plt.figure(figsize=(10, 4))
plt.plot(X[y==2, 0], X[y==2, 1], "g^", label="Iris-Virginica")
plt.plot(X[y==1, 0], X[y==1, 1], "bs", label="Iris-Versicolor")
plt.plot(X[y==0, 0], X[y==0, 1], "yo", label="Iris-Setosa")
from matplotlib.colors import ListedColormap
custom_cmap = ListedColormap(['#fafab0','#9898ff','#a0faa0'])
plt.contourf(x0, x1, zz, cmap=custom_cmap)
contour = plt.contour(x0, x1, zz1, cmap=plt.cm.brg)
plt.clabel(contour, inline=1, fontsize=12)
plt.xlabel("Petal length", fontsize=14)
plt.ylabel("Petal width", fontsize=14)
plt.legend(loc="center left", fontsize=14)
plt.axis([0, 7, 0, 3.5])
save_fig("softmax_regression_contour_plot")
plt.show()
# -
softmax_reg.predict([[5, 2]])
softmax_reg.predict_proba([[5, 2]])
# # Exercise solutions
# ## 1. to 11.
# See appendix A.
# ## 12. Batch Gradient Descent with early stopping for Softmax Regression
# (without using Scikit-Learn)
# Let's start by loading the data. We will just reuse the Iris dataset we loaded earlier.
X = iris["data"][:, (2, 3)] # petal length, petal width
y = iris["target"]
# We need to add the bias term for every instance ($x_0 = 1$):
X_with_bias = np.c_[np.ones([len(X), 1]), X]
# And let's set the random seed so the output of this exercise solution is reproducible:
np.random.seed(2042)
# The easiest option to split the dataset into a training set, a validation set and a test set would be to use Scikit-Learn's `train_test_split()` function, but the point of this exercise is to try understand the algorithms by implementing them manually. So here is one possible implementation:
# +
test_ratio = 0.2
validation_ratio = 0.2
total_size = len(X_with_bias)
test_size = int(total_size * test_ratio)
validation_size = int(total_size * validation_ratio)
train_size = total_size - test_size - validation_size
rnd_indices = np.random.permutation(total_size)
X_train = X_with_bias[rnd_indices[:train_size]]
y_train = y[rnd_indices[:train_size]]
X_valid = X_with_bias[rnd_indices[train_size:-test_size]]
y_valid = y[rnd_indices[train_size:-test_size]]
X_test = X_with_bias[rnd_indices[-test_size:]]
y_test = y[rnd_indices[-test_size:]]
# -
# The targets are currently class indices (0, 1 or 2), but we need target class probabilities to train the Softmax Regression model. Each instance will have target class probabilities equal to 0.0 for all classes except for the target class which will have a probability of 1.0 (in other words, the vector of class probabilities for ay given instance is a one-hot vector). Let's write a small function to convert the vector of class indices into a matrix containing a one-hot vector for each instance:
def to_one_hot(y):
n_classes = y.max() + 1
m = len(y)
Y_one_hot = np.zeros((m, n_classes))
Y_one_hot[np.arange(m), y] = 1
return Y_one_hot
# Let's test this function on the first 10 instances:
y_train[:10]
to_one_hot(y_train[:10])
# Looks good, so let's create the target class probabilities matrix for the training set and the test set:
Y_train_one_hot = to_one_hot(y_train)
Y_valid_one_hot = to_one_hot(y_valid)
Y_test_one_hot = to_one_hot(y_test)
# Now let's implement the Softmax function. Recall that it is defined by the following equation:
#
# $\sigma\left(\mathbf{s}(\mathbf{x})\right)_k = \dfrac{\exp\left(s_k(\mathbf{x})\right)}{\sum\limits_{j=1}^{K}{\exp\left(s_j(\mathbf{x})\right)}}$
def softmax(logits):
exps = np.exp(logits)
exp_sums = np.sum(exps, axis=1, keepdims=True)
return exps / exp_sums
# We are almost ready to start training. Let's define the number of inputs and outputs:
n_inputs = X_train.shape[1] # == 3 (2 features plus the bias term)
n_outputs = len(np.unique(y_train)) # == 3 (3 iris classes)
# Now here comes the hardest part: training! Theoretically, it's simple: it's just a matter of translating the math equations into Python code. But in practice, it can be quite tricky: in particular, it's easy to mix up the order of the terms, or the indices. You can even end up with code that looks like it's working but is actually not computing exactly the right thing. When unsure, you should write down the shape of each term in the equation and make sure the corresponding terms in your code match closely. It can also help to evaluate each term independently and print them out. The good news it that you won't have to do this everyday, since all this is well implemented by Scikit-Learn, but it will help you understand what's going on under the hood.
#
# So the equations we will need are the cost function:
#
# $J(\mathbf{\Theta}) =
# - \dfrac{1}{m}\sum\limits_{i=1}^{m}\sum\limits_{k=1}^{K}{y_k^{(i)}\log\left(\hat{p}_k^{(i)}\right)}$
#
# And the equation for the gradients:
#
# $\nabla_{\mathbf{\theta}^{(k)}} \, J(\mathbf{\Theta}) = \dfrac{1}{m} \sum\limits_{i=1}^{m}{ \left ( \hat{p}^{(i)}_k - y_k^{(i)} \right ) \mathbf{x}^{(i)}}$
#
# Note that $\log\left(\hat{p}_k^{(i)}\right)$ may not be computable if $\hat{p}_k^{(i)} = 0$. So we will add a tiny value $\epsilon$ to $\log\left(\hat{p}_k^{(i)}\right)$ to avoid getting `nan` values.
# +
eta = 0.01
n_iterations = 5001
m = len(X_train)
epsilon = 1e-7
Theta = np.random.randn(n_inputs, n_outputs)
for iteration in range(n_iterations):
logits = X_train.dot(Theta)
Y_proba = softmax(logits)
loss = -np.mean(np.sum(Y_train_one_hot * np.log(Y_proba + epsilon), axis=1))
error = Y_proba - Y_train_one_hot
if iteration % 500 == 0:
print(iteration, loss)
gradients = 1/m * X_train.T.dot(error)
Theta = Theta - eta * gradients
# -
# And that's it! The Softmax model is trained. Let's look at the model parameters:
Theta
# Let's make predictions for the validation set and check the accuracy score:
# +
logits = X_valid.dot(Theta)
Y_proba = softmax(logits)
y_predict = np.argmax(Y_proba, axis=1)
accuracy_score = np.mean(y_predict == y_valid)
accuracy_score
# -
# Well, this model looks pretty good. For the sake of the exercise, let's add a bit of $\ell_2$ regularization. The following training code is similar to the one above, but the loss now has an additional $\ell_2$ penalty, and the gradients have the proper additional term (note that we don't regularize the first element of `Theta` since this corresponds to the bias term). Also, let's try increasing the learning rate `eta`.
# +
eta = 0.1
n_iterations = 5001
m = len(X_train)
epsilon = 1e-7
alpha = 0.1 # regularization hyperparameter
Theta = np.random.randn(n_inputs, n_outputs)
for iteration in range(n_iterations):
logits = X_train.dot(Theta)
Y_proba = softmax(logits)
xentropy_loss = -np.mean(np.sum(Y_train_one_hot * np.log(Y_proba + epsilon), axis=1))
l2_loss = 1/2 * np.sum(np.square(Theta[1:]))
loss = xentropy_loss + alpha * l2_loss
error = Y_proba - Y_train_one_hot
if iteration % 500 == 0:
print(iteration, loss)
gradients = 1/m * X_train.T.dot(error) + np.r_[np.zeros([1, n_outputs]), alpha * Theta[1:]]
Theta = Theta - eta * gradients
# -
# Because of the additional $\ell_2$ penalty, the loss seems greater than earlier, but perhaps this model will perform better? Let's find out:
# +
logits = X_valid.dot(Theta)
Y_proba = softmax(logits)
y_predict = np.argmax(Y_proba, axis=1)
accuracy_score = np.mean(y_predict == y_valid)
accuracy_score
# -
# Cool, perfect accuracy! We probably just got lucky with this validation set, but still, it's pleasant.
# Now let's add early stopping. For this we just need to measure the loss on the validation set at every iteration and stop when the error starts growing.
# +
eta = 0.1
n_iterations = 5001
m = len(X_train)
epsilon = 1e-7
alpha = 0.1 # regularization hyperparameter
best_loss = np.infty
Theta = np.random.randn(n_inputs, n_outputs)
for iteration in range(n_iterations):
logits = X_train.dot(Theta)
Y_proba = softmax(logits)
xentropy_loss = -np.mean(np.sum(Y_train_one_hot * np.log(Y_proba + epsilon), axis=1))
l2_loss = 1/2 * np.sum(np.square(Theta[1:]))
loss = xentropy_loss + alpha * l2_loss
error = Y_proba - Y_train_one_hot
gradients = 1/m * X_train.T.dot(error) + np.r_[np.zeros([1, n_outputs]), alpha * Theta[1:]]
Theta = Theta - eta * gradients
logits = X_valid.dot(Theta)
Y_proba = softmax(logits)
xentropy_loss = -np.mean(np.sum(Y_valid_one_hot * np.log(Y_proba + epsilon), axis=1))
l2_loss = 1/2 * np.sum(np.square(Theta[1:]))
loss = xentropy_loss + alpha * l2_loss
if iteration % 500 == 0:
print(iteration, loss)
if loss < best_loss:
best_loss = loss
else:
print(iteration - 1, best_loss)
print(iteration, loss, "early stopping!")
break
# +
logits = X_valid.dot(Theta)
Y_proba = softmax(logits)
y_predict = np.argmax(Y_proba, axis=1)
accuracy_score = np.mean(y_predict == y_valid)
accuracy_score
# -
# Still perfect, but faster.
# Now let's plot the model's predictions on the whole dataset:
# +
x0, x1 = np.meshgrid(
np.linspace(0, 8, 500).reshape(-1, 1),
np.linspace(0, 3.5, 200).reshape(-1, 1),
)
X_new = np.c_[x0.ravel(), x1.ravel()]
X_new_with_bias = np.c_[np.ones([len(X_new), 1]), X_new]
logits = X_new_with_bias.dot(Theta)
Y_proba = softmax(logits)
y_predict = np.argmax(Y_proba, axis=1)
zz1 = Y_proba[:, 1].reshape(x0.shape)
zz = y_predict.reshape(x0.shape)
plt.figure(figsize=(10, 4))
plt.plot(X[y==2, 0], X[y==2, 1], "g^", label="Iris-Virginica")
plt.plot(X[y==1, 0], X[y==1, 1], "bs", label="Iris-Versicolor")
plt.plot(X[y==0, 0], X[y==0, 1], "yo", label="Iris-Setosa")
from matplotlib.colors import ListedColormap
custom_cmap = ListedColormap(['#fafab0','#9898ff','#a0faa0'])
plt.contourf(x0, x1, zz, cmap=custom_cmap)
contour = plt.contour(x0, x1, zz1, cmap=plt.cm.brg)
plt.clabel(contour, inline=1, fontsize=12)
plt.xlabel("Petal length", fontsize=14)
plt.ylabel("Petal width", fontsize=14)
plt.legend(loc="upper left", fontsize=14)
plt.axis([0, 7, 0, 3.5])
plt.show()
# -
# And now let's measure the final model's accuracy on the test set:
# +
logits = X_test.dot(Theta)
Y_proba = softmax(logits)
y_predict = np.argmax(Y_proba, axis=1)
accuracy_score = np.mean(y_predict == y_test)
accuracy_score
# -
# Our perfect model turns out to have slight imperfections. This variability is likely due to the very small size of the dataset: depending on how you sample the training set, validation set and the test set, you can get quite different results. Try changing the random seed and running the code again a few times, you will see that the results will vary.
|
04_training_linear_models.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Test For `DriftCheckerEstimator`-`pydrift`
#
# We're going to test how it works with the famous titanic dataset
#
# # Dependencies
# +
import pandas as pd
from sklearn import set_config
from sklearn.model_selection import train_test_split
from sklearn.pipeline import make_pipeline
from sklearn.compose import make_column_transformer
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import OrdinalEncoder
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import roc_auc_score
from catboost import CatBoostClassifier
from pydrift import DriftCheckerEstimator
from pydrift.exceptions import ColumnsNotMatchException
from pydrift.constants import PATH_DATA, RANDOM_STATE
from pydrift.models import cat_features_fillna
from pydrift.exceptions import DriftEstimatorException
set_config(display='diagram')
# -
# # Read Data
df_titanic = pd.read_csv(PATH_DATA / 'titanic.csv')
# # Constants
DATA_LENGTH = df_titanic.shape[0]
TARGET = 'Survived'
# # Data Split
#
# 50% sample will give us a non-drift problem
#
# We drop Ticket and Cabin features because of cardinality
# +
X = df_titanic.drop(columns=['Ticket', 'Cabin', 'PassengerId', 'Name', TARGET])
y = df_titanic[TARGET]
cat_features = (X
.select_dtypes(include=['category', 'object'])
.columns)
X_filled = cat_features_fillna(X, cat_features)
X_train_filled, X_test_filled, y_train, y_test = train_test_split(
X_filled, y, test_size=.5, random_state=RANDOM_STATE, stratify=y
)
catboost_classifier = CatBoostClassifier(
num_trees=5,
max_depth=3,
cat_features=cat_features,
random_state=RANDOM_STATE,
verbose=False
)
# -
# # Build Pipeline With DriftCheckerEstimator
#
# Catboost as estimator
# +
pipeline_catboost_drift_checker = make_pipeline(
DriftCheckerEstimator(ml_classifier_model=catboost_classifier, column_names=X.columns)
)
display(pipeline_catboost_drift_checker)
# -
# # Let´s Fit And Predict
# +
pipeline_catboost_drift_checker.fit(X_train_filled, y_train)
y_score_train = pipeline_catboost_drift_checker.predict_proba(X_train_filled)[:, 1]
y_score_test = pipeline_catboost_drift_checker.predict_proba(X_test_filled)[:, 1]
auc_train = roc_auc_score(y_true=y_train, y_score=y_score_train)
auc_test = roc_auc_score(y_true=y_test, y_score=y_score_test)
print(f'AUC training data: {auc_train:.2f}')
print(f'AUC testing data: {auc_test:.2f}')
# -
# # Same With Logistic Regression Pipeline
# +
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=.5, random_state=RANDOM_STATE, stratify=y
)
categorical_pipeline = make_pipeline(
SimpleImputer(strategy='most_frequent'),
OrdinalEncoder()
)
column_transformer = make_column_transformer(
(categorical_pipeline, cat_features),
(SimpleImputer(strategy='median'), X_train.select_dtypes(include='number').columns)
)
pipeline_lr_drift_checker = make_pipeline(
column_transformer,
DriftCheckerEstimator(
ml_classifier_model=LogisticRegression(max_iter=1000,
random_state=RANDOM_STATE),
column_names=X.columns
)
)
display(pipeline_lr_drift_checker)
# -
# # Let´s Fit And Predict
# +
pipeline_lr_drift_checker.fit(X_train, y_train)
y_score_train = pipeline_lr_drift_checker.predict_proba(X_train)[:, 1]
y_score_test = pipeline_lr_drift_checker.predict_proba(X_test)[:, 1]
auc_train = roc_auc_score(y_true=y_train, y_score=y_score_train)
auc_test = roc_auc_score(y_true=y_test, y_score=y_score_test)
print(f'AUC training data: {auc_train:.3f}')
print(f'AUC testing data: {auc_test:.3f}')
# -
# # Ok, Now With Drifted Data
# +
X = df_titanic.drop(columns=['Ticket', 'Cabin', 'PassengerId', 'Name', TARGET])
y = df_titanic[TARGET]
cat_features = (X
.select_dtypes(include=['category', 'object'])
.columns)
X_filled = cat_features_fillna(X, cat_features)
X_train_filled, X_test_filled, y_train, y_test = train_test_split(
X_filled, y, test_size=.5, random_state=RANDOM_STATE, stratify=y
)
df_train_filled = pd.concat([X_train_filled, y_train], axis=1)
df_train_filled_drifted = df_train_filled[(df_train_filled['Pclass'] > 1) & (df_train_filled['Fare'] > 10)].copy()
X_train_filled_drifted = df_train_filled_drifted.drop(columns=TARGET)
y_train_filled_drifted = df_train_filled_drifted[TARGET]
df_test_filled = pd.concat([X_test_filled, y_test], axis=1)
df_test_filled_drifted = df_test_filled[~(df_test_filled['Pclass'] > 1) & (df_test_filled['Fare'] > 10)].copy()
X_test_filled_drifted = df_test_filled_drifted.drop(columns=TARGET)
y_test_filled_drifted = df_test_filled_drifted[TARGET]
# -
# # Let´s Try To Fit And Predict
#
# DriftEstimatorException tells you that there are some data drifts, you can acces to `drifted_columns` attribute to ckeck them, we will do in the next cell
# +
pipeline_catboost_drift_checker.fit(X_train_filled_drifted, y_train_filled_drifted)
y_score_train = pipeline_catboost_drift_checker.predict_proba(X_train_filled_drifted)[:, 1]
try:
y_score_test = pipeline_catboost_drift_checker.predict_proba(X_test_filled_drifted)[:, 1]
auc_train = roc_auc_score(y_true=y_train_filled_drifted, y_score=y_score_train)
auc_test = roc_auc_score(y_true=y_test_filled_drifted, y_score=y_score_test)
print(f'AUC training data: {auc_train:.2f}')
print(f'AUC testing data: {auc_test:.2f}')
except DriftEstimatorException:
print('Drift found in pipeline_catboost_drift_checker')
# -
# # You Can Get Drifted Features From `DriftCheckerEstimator` Object
# +
drifted_features = (
pipeline_catboost_drift_checker
.named_steps['driftcheckerestimator']
.get_drifted_features()
)
drifted_features
# -
# # You Can Also Get High Cardinality Features
#
# None in this case
(
pipeline_catboost_drift_checker
.named_steps['driftcheckerestimator']
.get_high_cardinality_features()
)
|
notebooks/1-Titanic-DriftCheckerEstimator-Demo.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
# Imports and plotting preferences
import os
import numpy as np
from sklearn.mixture import GMM
from astropy.io import fits
import seaborn as sns
import matplotlib.pyplot as plt
# %matplotlib inline
sns.set(style='white', font_scale=1.6, palette='deep')
col = sns.color_palette()
# -
# Class to read, write, and sample from a mixture model.
class GaussianMixtureModel(object):
def __init__(self, weights, means, covars, covtype):
self.weights = weights
self.means = means
self.covars = covars
self.covtype = covtype
self.n_components, self.n_dimensions = self.means.shape
@staticmethod
def save(model, filename):
hdus = fits.HDUList()
hdr = fits.Header()
hdr['covtype'] = model.covariance_type
hdus.append(fits.ImageHDU(model.weights_, name='weights', header=hdr))
hdus.append(fits.ImageHDU(model.means_, name='means'))
hdus.append(fits.ImageHDU(model.covars_, name='covars'))
hdus.writeto(filename, clobber=True)
@staticmethod
def load(filename):
hdus = fits.open(filename, memmap=False)
hdr = hdus[0].header
covtype = hdr['covtype']
model = GaussianMixtureModel(
hdus['weights'].data, hdus['means'].data, hdus['covars'].data, covtype)
hdus.close()
return model
def sample(self, n_samples=1, random_state=None):
if self.covtype != 'full':
return NotImplementedError(
'covariance type "{0}" not implemented yet.'.format(self.covtype))
# Code adapted from sklearn's GMM.sample()
if random_state is None:
random_state = np.random.RandomState()
weight_cdf = np.cumsum(self.weights)
X = np.empty((n_samples, self.n_dimensions))
rand = random_state.rand(n_samples)
# decide which component to use for each sample
comps = weight_cdf.searchsorted(rand)
# for each component, generate all needed samples
for comp in range(self.n_components):
# occurrences of current component in X
comp_in_X = (comp == comps)
# number of those occurrences
num_comp_in_X = comp_in_X.sum()
if num_comp_in_X > 0:
X[comp_in_X] = random_state.multivariate_normal(
self.means[comp], self.covars[comp], num_comp_in_X)
return X
# Use the Bayesian information criterion to determine how many components we need.
def getbic(X, ncomp=[3]):
bic = [GMM(n_components=nc, covariance_type="full").fit(X).bic(X) for nc in ncomp]
#for ii in range(len(ncomp)):
# print(ncomp[ii], bic[ii])
return bic
# ### Model the [OIII]/Hb, [OII]/Hb, [NII]/Hb, and [SII]/Hb emission-line sequences
# Output file name
forbid_mogfile = os.path.join(os.getenv('DESISIM'), 'data', 'forbidden_mogs.fits')
# Define the valid ranges. MoGs are sensitive to outliers.
oiiihbrange = (-1.5,1.0)
oiihbrange = (-1.2,1.2)
niihbrange = (-2.0,0.8)
siihbrange = (-2.0,0.4)
# +
# Read the catalogs of emission-line objects we will use to calibrate the forbidden emission-line sequences: SDSS
# (MPA/JHU DR7 catalog); HII region (Moustakas et al. 2010); and integrated spectral atlas data (from
# Moustakas & Kennicutt 2006).
atlas = fits.getdata(os.path.join(os.getenv('DESI_ROOT'),'data','atlas-emlines.fits.gz'),1)
sdss = fits.getdata(os.path.join(os.getenv('DESI_ROOT'),'data','sdss-emlines.fits.gz'),1)
hii = fits.getdata(os.path.join(os.getenv('DESI_ROOT'),'data','hii-emlines.fits.gz'),1)
atlas = atlas[np.where((atlas['OIIIHB']>oiiihbrange[0])*(atlas['OIIIHB']<oiiihbrange[1])*
(atlas['OIIHB']>oiihbrange[0])*(atlas['OIIHB']<oiihbrange[1])*
(atlas['NIIHB']>niihbrange[0])*(atlas['NIIHB']<niihbrange[1])*
(atlas['SIIHB']>siihbrange[0])*(atlas['SIIHB']<siihbrange[1]))[0]]
sdss = sdss[np.where((sdss['OIIIHB']>oiiihbrange[0])*(sdss['OIIIHB']<oiiihbrange[1])*
(sdss['OIIHB']>oiihbrange[0])*(sdss['OIIHB']<oiihbrange[1])*
(sdss['NIIHB']>niihbrange[0])*(sdss['NIIHB']<niihbrange[1])*
(sdss['SIIHB']>siihbrange[0])*(sdss['SIIHB']<siihbrange[1]))[0]]
hii = hii[np.where((hii['OIIIHB']>oiiihbrange[0])*(hii['OIIIHB']<oiiihbrange[1])*
(hii['OIIHB']>oiihbrange[0])*(hii['OIIHB']<oiihbrange[1])*
(hii['NIIHB']>niihbrange[0])*(hii['NIIHB']<niihbrange[1])*
(hii['SIIHB']>siihbrange[0])*(hii['SIIHB']<siihbrange[1]))[0]]
print('N(atlas)={}, N(HII)={}, N(SDSS)={}'.format(len(atlas), len(hii), len(sdss)))
# +
# Define some convenience variables. I decide here to not use the SDSS in the fitting.
#oiiihb = np.concatenate((sdss['OIIIHB'], atlas['OIIIHB'], hii['OIIIHB'])).flatten()
#oiihb = np.concatenate((sdss['OIIHB'], atlas['OIIHB'], hii['OIIHB'])).flatten()
#niihb = np.concatenate((sdss['NIIHB'], atlas['NIIHB'], hii['NIIHB'])).flatten()
#siihb = np.concatenate((sdss['SIIHB'], atlas['SIIHB'], hii['SIIHB'])).flatten()
oiiihb = np.concatenate((atlas['OIIIHB'], hii['OIIIHB'])).flatten()
oiihb = np.concatenate((atlas['OIIHB'], hii['OIIHB'])).flatten()
niihb = np.concatenate((atlas['NIIHB'], hii['NIIHB'])).flatten()
siihb = np.concatenate((atlas['SIIHB'], hii['SIIHB'])).flatten()
Xall = np.array([oiiihb, oiihb, niihb, siihb]).T
print('Total number of objects = {}'.format(len(oiiihb)))
# -
# Determine how many Gaussian components we need by looking at the Bayesian Information Criterion.
ncomp = np.arange(2,15)
bic = getbic(Xall, ncomp)
fig, ax = plt.subplots(1, 1, figsize=(8,5))
ax.plot(ncomp, bic, marker='s', ls='-')
ax.set_xlim((0,16))
ax.set_ylim((-1250,-650))
ax.set_xlabel('Number of Gaussian Components')
ax.set_ylabel('Bayesian Information Criterion')
plt.legend(labels=['Forbidden Line-ratios'])
plt.tight_layout()
plt.show()
# Model the distribution using a mixture of Gaussians and write out.
ncomp = 6 # from figure above
mog = GMM(n_components=ncomp, covariance_type="full").fit(Xall)
print('Writing {}'.format(forbid_mogfile))
GaussianMixtureModel.save(mog, forbid_mogfile)
# Reread the model.
mog = GaussianMixtureModel.load(forbid_mogfile)
samp = mog.sample(1000)
# +
# Now plot the emission-line sequences of interest. Show the data on the left-hand panels
# and random draws from the MoGs on the right-hand panels.
fig, ((ax1, ax2), (ax3, ax4), (ax5, ax6)) = plt.subplots(3, 2, figsize=(11,11),
sharex=True)
# [OII]/Hbeta vs [OIII]/Hbeta
ax1.plot(hii['OIIIHB'], hii['OIIHB'], 's', markersize=4, label='HII Regions')
ax1.plot(atlas['OIIIHB'], atlas['OIIHB'], '^', markersize=5, label='Integrated Spectra (z~0)')
sns.kdeplot(sdss['OIIIHB'], sdss['OIIHB'], clip=(oiiihbrange, oiihbrange), ax=ax1, gridsize=40,
cmap="Reds_r", shade=True, cut=0, shade_lowest=False, label='SDSS')
ax1.set_xlim(oiiihbrange)
ax1.set_ylim(oiihbrange)
ax1.set_ylabel(r'log$_{10}$ ([O II] $\lambda$3727/H$\beta$)')
ax2.plot(samp[:,0], samp[:,1], 'o', label='Random Draws', c=col[3], markersize=3)
ax2.set_xlim(oiiihbrange)
ax2.set_ylim(oiihbrange)
ax2.yaxis.tick_right()
ax2.yaxis.set_label_position("right")
ax2.set_ylabel(r'log$_{10}$ ([O II] $\lambda$3727/H$\beta$)')
# [NII]/Hbeta vs [OIII]/Hbeta
ax3.plot(hii['OIIIHB'], hii['NIIHB'], 's', markersize=4, label='HII Regions')
ax3.plot(atlas['OIIIHB'], atlas['NIIHB'], '^', markersize=5, label='Integrated Spectra (z~0)')
sns.kdeplot(sdss['OIIIHB'], sdss['NIIHB'], clip=(oiiihbrange, niihbrange), ax=ax3, gridsize=40,
cmap="Reds_r", shade=True, cut=0, shade_lowest=False, label='SDSS')
ax3.set_xlim(oiiihbrange)
ax3.set_ylim(niihbrange)
ax3.set_ylabel(r'log$_{10}$ ([N II] $\lambda$6584/H$\beta$)')
ax4.plot(samp[:,0], samp[:,2], 'o', label='Random Draws', c=col[3], markersize=3)
ax4.set_xlim(oiiihbrange)
ax4.set_ylim(niihbrange)
ax4.yaxis.tick_right()
ax4.yaxis.set_label_position("right")
ax4.set_ylabel(r'log$_{10}$ ([N II] $\lambda$6584/H$\beta$)')
# [SII]/Hbeta vs [OIII]/Hbeta
ax5.plot(hii['OIIIHB'], hii['SIIHB'], 's', markersize=4, label='HII Regions')
ax5.plot(atlas['OIIIHB'], atlas['SIIHB'], '^', markersize=5, label='Integrated Spectra (z~0)')
sns.kdeplot(sdss['OIIIHB'], sdss['SIIHB'], clip=(oiiihbrange, siihbrange), ax=ax5, gridsize=40,
cmap="Reds_r", shade=True, cut=0, shade_lowest=False, label='SDSS')
ax5.set_xlim(oiiihbrange)
ax5.set_ylim(siihbrange)
ax5.set_ylabel(r'log$_{10}$ ([S II] $\lambda$6716/H$\beta$)')
ax5.set_xlabel(r'log$_{10}$ ([O III] $\lambda$5007/H$\beta$)')
ax5.legend(loc='lower left', prop={'size': 14}, labelspacing=0.25, markerscale=2)
ax6.plot(samp[:,0], samp[:,3], 'o', label='Random Draws', c=col[3], markersize=3)
ax6.set_xlim(oiiihbrange)
ax6.set_ylim(siihbrange)
ax6.yaxis.tick_right()
ax6.yaxis.set_label_position("right")
ax6.set_ylabel(r'log$_{10}$ ([S II] $\lambda$6731/H$\beta$)')
ax6.set_xlabel(r'log$_{10}$ ([O III] $\lambda$5007/H$\beta$)')
ax6.legend(loc='lower left', prop={'size': 14}, labelspacing=0.25, markerscale=2)
fig.subplots_adjust(wspace=0.05, hspace=0.1)
plt.show()
# -
# Show that the AGES (BGS) sample obeys the same emission-line distributions.
meta = fits.getdata(os.path.join(os.getenv('DESI_BASIS_TEMPLATES'),'bgs_templates_v2.0.fits'),1)
# +
#fig, ax1 = plt.subplots(1, 1, figsize=(8,8))#, sharey=True)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(11,6))#, sharey=True)
# [OII]/Hbeta vs [OIII]/Hbeta
good = np.where((meta['OIIIHBETA']>-900)*(meta['OIIHBETA']>-900)*1)[0]
ax1.plot(samp[:,0], samp[:,1], 's', markersize=4)
ax1.plot(meta['OIIIHBETA'][good], meta['OIIHBETA'][good], '^', markersize=5)
ax1.set_xlim(oiiihbrange)
ax1.set_ylim(oiihbrange)
ax1.set_xlabel(r'log$_{10}$ ([O III] $\lambda$5007/H$\beta$)')
ax1.set_ylabel(r'log$_{10}$ ([O II] $\lambda$3727/H$\beta$)')
# [NII]/Hbeta vs [OIII]/Hbeta
good = np.where((meta['OIIIHBETA']>-900)*(meta['NIIHBETA']>-900)*1)[0]
ax2.plot(samp[:,0], samp[:,2], 's', markersize=4, label='Random Draws')
ax2.plot(meta['OIIIHBETA'][good], meta['NIIHBETA'][good], '^', markersize=5, label='AGES/BGS')
ax2.set_xlim(oiiihbrange)
ax2.set_ylim(niihbrange)
ax2.yaxis.tick_right()
ax2.yaxis.set_label_position("right")
ax2.set_ylabel(r'log$_{10}$ ([N II] $\lambda$6584/H$\beta$)')
ax2.set_xlabel(r'log$_{10}$ ([O III] $\lambda$5007/H$\beta$)')
ax2.legend(loc='lower left', prop={'size': 14}, labelspacing=0.25, markerscale=2)
ax2.annotate('', xy=(-0.1, 0.5), xytext=(-0.7, 0.55),
arrowprops=dict(facecolor='black', shrink=0.05))
ax2.text(-1.1, 0.5, 'AGN')
fig.subplots_adjust(wspace=0.1)
plt.show()
# -
# ### Calibrate the EW(H-beta) vs D(4000) sequence for the BGS using the AGES data.
# Output file name
bgs_mogfile = os.path.join(os.getenv('DESISIM'), 'data', 'bgs_mogs.fits')
# Read the BGS metadata.
meta = fits.getdata(os.path.join(os.getenv('DESI_BASIS_TEMPLATES'),'bgs_templates_v2.0.fits'),1)
meta.columns
# Show that the photometric and spectroscopic D(4000) values match nicely.
fig, ax = plt.subplots(1, 1, figsize=(8,7))
ax.plot(meta['D4000'], meta['D4000_SPEC'], marker='s', markersize=3, linestyle='None')
ax.plot([0.9,2.3], [0.9,2.3], color='k')
ax.set_xlim((0.8,2.4))
ax.set_ylim((0.8,2.4))
ax.set_xlabel('D$_{n}$(4000) [Photometric]')
ax.set_ylabel('D$_{n}$(4000) [Spectroscopic]')
plt.tight_layout()
plt.show()
# Separate out galaxies with well-measured H-beta vs upper limits.
good = np.where(meta['HBETA_LIMIT']==0)[0]
limit = np.where(meta['HBETA_LIMIT']==1)[0]
Xages = np.array((meta['D4000'][good], np.log10(meta['HBETA_EW'][good]))).T
Xageslim = np.array((meta['D4000'][limit], np.log10(meta['HBETA_EW'][limit]))).T
# Model the distribution with a simple polynomial.
bgscoeff = np.polyfit(Xages[:,0], Xages[:,1], 2)
print(bgscoeff)
ncomp = np.arange(2,7)
bic = getbic(Xages, ncomp)
fig, ax = plt.subplots(1, 1, figsize=(8,5))
ax.plot(ncomp, bic, marker='s', ls='-')
plt.legend(labels=[r'BGS EW(H$\beta$)-D$_{n}$(4000)'], loc='upper left')
ax.set_xlim((1,ncomp.max()+1))
ax.set_xlabel('Number of Gaussian Components')
ax.set_ylabel('Bayesian Information Criterion')
plt.tight_layout()
plt.show()
# Model the distribution using a mixture of Gaussians and write out.
ncomp = 3 # from figure above
mog = GMM(n_components=ncomp, covariance_type="full").fit(Xages)
print('Writing {}'.format(bgs_mogfile))
GaussianMixtureModel.save(mog, bgs_mogfile)
# Reread the model.
mog = GaussianMixtureModel.load(bgs_mogfile)
samp = mog.sample(1000)
# +
# Make a plot.
d4000axis = np.arange(1.0, 2.1, 0.1)
ewhbetafit = np.polyval(bgscoeff, d4000axis)
plt.scatter(Xages[:,0], Xages[:,1], c=col[0], label='AGES Detections')
plt.scatter(Xageslim[:,0], Xageslim[:,1], c=col[1], label='AGES Upper limits')
#plt.scatter(samp[:,0], samp[:,1], c=col[4], label='Random Draws')
plt.plot(d4000axis, ewhbetafit, lw=3, c=col[2])
plt.plot(d4000axis, ewhbetafit+0.25, ls='--', lw=3, c=col[2])
plt.plot(d4000axis, ewhbetafit-0.25, ls='--', lw=3, c=col[2])
plt.xlabel('D$_{n}$(4000)')
plt.ylabel(r'log$_{10}$ EW(H$\beta$) (A)')
plt.xlim((0.9,2.2))
plt.ylim((-1.5,2.5))
plt.legend()
plt.show()
# -
# ### Calibrate the EW([OII]) vs D(4000) sequence for DEEP2 ELGs.
# Read the ELG metadata.
meta = fits.getdata(os.path.join(os.getenv('DESI_BASIS_TEMPLATES'),'elg_templates_v2.0.fits'),1)
meta.columns
# Convenience variables
from desitarget.cuts import isELG
elg = isELG(gflux=10**(0.4*(22.5-meta['DECAM_G'])), rflux=10**(0.4*(22.5-meta['DECAM_R'])),
zflux=10**(0.4*(22.5-meta['DECAM_Z'])))
X = np.array((meta['D4000'], np.log10(meta['OII_3727_EW']))).T
Xelg = np.array((meta['D4000'][elg], np.log10(meta['OII_3727_EW'][elg]))).T
Xelg
# Model the distribution with a simple polynomial.
elgcoeff = np.polyfit(X[:,0], X[:,1], 2)
print(elgcoeff)
# +
# Make a plot.
d4000axis = np.arange(1.0, 2.1, 0.1)
ewoiifit = np.polyval(elgcoeff, d4000axis)
plt.scatter(X[:,0], X[:,1], c=col[0], label='All ELGs')
plt.scatter(Xelg[:,0], Xelg[:,1], c=col[1], marker='s', label='grz-Selected ELGs')
plt.plot(d4000axis, ewoiifit, lw=3, c=col[2])
plt.plot(d4000axis, ewoiifit+0.3, ls='--', lw=3, c=col[2])
plt.plot(d4000axis, ewoiifit-0.3, ls='--', lw=3, c=col[2])
plt.xlabel('D$_{n}$(4000)')
plt.ylabel(r'log$_{10}$ EW([O II] $\lambda3726,29$) (A)')
plt.xlim((0.9,2.2))
plt.ylim((0.0,3))
plt.legend()
plt.show()
|
doc/nb/calib-emline.ipynb
|