code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import json
import pandas as pd
import requests
from datetime import datetime
response = requests.get('https://coinmap.org/api/v1/venues/')
data = response.json()
venues_array = data['venues']
print(venues_array[0])
# +
southernmost_lat = 18.464825 # southern tip of Hawaii
easternmost_lon = -66.949471 # eastern tip of Maine
# This filtering step reduces the number of records from 22787 to 7379
# This still means we have to call the API 7379 times. Unfortunately
# there are no further simple optimization we can make, since it turns
# out that >6000 of the 7379 actually lie within the United States
venues_array = list(filter( lambda ven: int(ven['lon']) <= easternmost_lon, venues_array ))
venues_array = list(filter( lambda ven: int(ven['lat']) >= southernmost_lat, venues_array ))
print(len(list(venues_array)))
venues_usa = []
for venue in venues_array:
lat_param = venue['lat']
lon_param = venue['lon']
# Get response and convert it into a json
response = requests.get(f'https://geo.fcc.gov/api/census/block/find?latitude={lat_param}&longitude={lon_param}&format=json')
if response.status_code == 200:
data = response.json()
# We cast the state name and county name to string becuase when the coordinates lie
# outside of the United States, these values are returned as type <class 'NoneType'>.
# In this case, we simply skip this record and move on to the next on in the veneus_array.
if str(data['State']['name']) == 'None' or str(data['County']['name']) == 'None':
continue
# If the venue is in the United States, we assign the state name and country name to the
# current venue record. We then append this record, which now contains fields for state and
# county to the venues_usa list. Casting to a string here is a defensive operation.
else:
venue['state'] = str(data['State']['name'])
venue['county'] = str(data['County']['name'])
venues_usa.append(venue)
else:
print(response.status_code, response)
print(len(venues_usa))
# +
# Since the continuous process of calling the API for each record is so expensive
# we will want to save the data as soon as possible. Although the data has been
# feature enigeered to some degree, it is still 'raw', so we will save it to the
# raw directory immediately
df_venues_usa = pd.json_normalize(venues_usa)
df_venues_usa
df_venues_usa.to_csv('../../data/raw/COINMAP_DATA_USA', index=False)
# +
# Since the continuous process of calling the API for each record is so expensive
# we will want to save the data as soon as possible.
df_venues_usa = pd.json_normalize(venues_usa)
df_venues_usa['created_on'] = pd.to_datetime(df_venues_usa['created_on'],unit='s')
df_venues_usa['year'] = df_venues_usa['created_on'].dt.year
df_venues_usa.drop(['id','promoted','name', 'created_on', 'geolocation_degrees'], axis=1, inplace=True)
df_venues_usa
# +
# reorder columns so indices are to the left
df_venues_usa = df_venues_usa[['state', 'county', 'lat', 'lon', 'year', 'category']]
df_venues_usa.dtypes
# -
df_venues_usa.to_csv('../../data/interim/CRYTO_VENUES_USA', index=False)
for cnty in df_venues_usa['county']:
print(cnty)
| notebooks/sandbox/coimap_etl.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="QAzjnJoZnERD"
# Installing pre-requisites
# + colab={"base_uri": "https://localhost:8080/"} id="UvcH4D6Sm8DY" outputId="2b776130-4a70-4e96-ed0b-e7d86df2f2bb"
# !pip install pytorch-metric-learning
# !pip install faiss-gpu
# + colab={"base_uri": "https://localhost:8080/"} id="xiTtQcENnaAZ" outputId="c25ac121-8255-4438-fecc-b44e5f85bbd6"
from google.colab import drive
drive.mount('/content/drive')
# + [markdown] id="yQMMvxZYnZD3"
# ## PART 1: Importing datasets (both training and MNIST)
# + id="agnX7kf7nYtR"
import numpy as np
import pandas as pd
import os
from PIL import Image
import torch
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder
from torch.utils.data import Dataset
import torchvision.transforms as transforms
from torch.utils.data.sampler import SubsetRandomSampler
# + id="x21wSqLXnz2n"
transform = transforms.Compose(
[transforms.Resize(220),
transforms.ToTensor(),
transforms.Normalize(mean=[0.5], std=[0.5])])
# + colab={"base_uri": "https://localhost:8080/", "height": 568, "referenced_widgets": ["ab3da7b9815a4e0aad00f93320381614", "0f7f7a2c43924eba8a02836dc986f529", "e387958108ff443ba6a032dfd25ae0d2", "8713d5507d11456f9a243a8abadec1dc", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "c71ccc81279449c1833b861ef74929f0", "1426e6d102eb4d429f92a9f495057339", "d3a26e9933f4413dbacc62318778a4e9", "0fb1c397e6d5498ba239fe533c2478c0", "<KEY>", "d3a6e6fed23644d894b6cacd506729ab", "<KEY>", "<KEY>", "fa620950b5f4457ca3bdd17f6ede39de", "0ab01f2e171e4d28a9002cac1248f5a1", "23916bbad07143c79099294a2dc8fdf4", "9a5faf1dad1e4f948591c6ee17e46655", "<KEY>", "1493290193034dc5837a160d3cb6a817", "<KEY>", "117e92f9417a41a5b1e21199a3f1f53d", "8a66060e0628484e9b77ce707b9ea7bd", "a7832d303e59474d80e51e08bc936e91", "<KEY>", "a966d4c844d740aaa7f31363e53deaca", "<KEY>", "<KEY>", "<KEY>"]} id="scGyO0vNnYrO" outputId="90bfede5-d278-4e1e-da68-209a23eea8e6"
from torchvision import datasets
mnistTest = datasets.MNIST('.', train=False, download=True, transform=transform)
mnistTestLoader = torch.utils.data.DataLoader(mnistTest, batch_size=128)
# + colab={"base_uri": "https://localhost:8080/"} id="quYxWF02t0m_" outputId="5d740c74-28be-46fb-bfc1-ca5543dd472f"
# !unzip /content/drive/MyDrive/MIDAS/TASK3/mnistTask3.zip -d /content/mnistTASK
# + id="0zkXXsUunYoZ"
pathToDataset = '/content/mnistTASK/mnistTask'
# + id="rOmcqEYZnYmX" colab={"base_uri": "https://localhost:8080/", "height": 195} outputId="7c93d745-14d0-4fbf-f63a-5f9ac9b00ccb"
imagePath = []
labels = []
for folder in os.listdir(pathToDataset):
for images in os.listdir(os.path.join(pathToDataset,folder)):
image = os.path.join(pathToDataset,folder,images)
imagePath.append(image)
labels.append(folder)
data = {'Images':imagePath, 'Labels':labels}
data = pd.DataFrame(data)
data.head()
# + id="aXDjDGV8nYj6" colab={"base_uri": "https://localhost:8080/", "height": 195} outputId="62043a2b-5408-4e0e-d2ae-61ee21a7c258"
labelEncoder = LabelEncoder()
data['Encoded Labels'] = labelEncoder.fit_transform(data['Labels'])
data.head()
# + id="K8gdEkUynYhu"
batchSize = 128
validationSplit = 0.15
shuffleDataset = True
randomSeed = 17
# + id="iYHlzAtxnYfz"
datasetSize = len(data)
indices = list(range(datasetSize))
split = int(np.floor(validationSplit*datasetSize))
if shuffleDataset:
np.random.seed(randomSeed)
np.random.shuffle(indices)
trainIndices, validationIndices = indices[split:], indices[:split]
# + id="poKKyIUPnYd1"
trainSampler = SubsetRandomSampler(trainIndices)
validationSampler = SubsetRandomSampler(validationIndices)
# + id="Rk-P0OVXnYbz"
class CustomDataset(Dataset):
def __init__(self, imageData, imagePath, transform=None):
self.imagePath = imagePath
self.imageData = imageData
self.transform = transform
def __len__(self):
return len(self.imageData)
def __getitem__(self, index):
imageName = os.path.join(self.imagePath, self.imageData.loc[index, 'Labels'],self.imageData.loc[index,'Images'])
image = Image.open(imageName).convert('L')
image = image.resize((32,32))
label = torch.tensor(self.imageData.loc[index, 'Encoded Labels'])
if self.transform is not None:
image = self.transform(image)
return image,label
# + id="FybYSG_nnYZZ"
dataset = CustomDataset(data,pathToDataset,transform)
# + id="wdXpYoQDnYXQ"
trainLoader = torch.utils.data.DataLoader(dataset, batch_size = batchSize, sampler = trainSampler)
validationLoader = torch.utils.data.DataLoader(dataset, batch_size = batchSize, sampler = validationSampler)
# + id="jf_c-RyTnYU5"
def displayImage(image):
image = image/2 + 0.5
image = image.numpy()
image = image.reshape(220,220)
return image
# + id="rM07m0onnYS-" colab={"base_uri": "https://localhost:8080/", "height": 832} outputId="67a52fe4-03a3-4749-b126-912c8dbb3f03"
dataIterator = iter(trainLoader)
images, labels = dataIterator.next()
figure, axis = plt.subplots(3,5, figsize=(16,16))
for i, ax in enumerate(axis.flat):
with torch.no_grad():
image, label = images[i], labels[i]
ax.imshow(displayImage(image))
ax.set(title=f"{label.item()}")
# + [markdown] id="xJBCo2h4oXUv"
# ## Part 2: Training from scratch and testing on MNIST
# + id="x2Td-JkPnYQ4"
#the pytorch metric learning library comes with inbuilt methods for triplet mining and computing triplet losses between anchor, positive class and negative class
from pytorch_metric_learning import losses, miners
from pytorch_metric_learning.distances import CosineSimilarity
from pytorch_metric_learning.reducers import ThresholdReducer
from pytorch_metric_learning.regularizers import LpRegularizer
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
# + id="ER69FF25nYOj" colab={"base_uri": "https://localhost:8080/"} outputId="dd49ac0c-875d-47d2-bd99-7675883e828c"
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print("Training the model on: ",device, " Available GPU: ", torch.cuda.get_device_name(0))
# + id="lNERzSganYMj"
class EmbeddingNetwork(nn.Module):
def __init__(self):
super(EmbeddingNetwork, self).__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(1, 64, (7,7), stride=(2,2), padding=(3,3)),
nn.BatchNorm2d(64),
nn.LeakyReLU(0.001),
nn.MaxPool2d((3, 3), 2, padding=(1,1))
)
self.conv2 = nn.Sequential(
nn.Conv2d(64,64,(1,1), stride=(1,1)),
nn.BatchNorm2d(64),
nn.LeakyReLU(0.001),
nn.Conv2d(64,192, (3,3), stride=(1,1), padding=(1,1)),
nn.BatchNorm2d(192),
nn.LeakyReLU(0.001),
nn.MaxPool2d((3,3),2, padding=(1,1))
)
self.conv3 = nn.Sequential(
nn.Conv2d(192,192,(1,1), stride=(1,1)),
nn.BatchNorm2d(192),
nn.LeakyReLU(0.001),
nn.Conv2d(192,384,(3,3), stride=(1,1), padding=(1,1)),
nn.BatchNorm2d(384),
nn.LeakyReLU(0.001),
nn.MaxPool2d((3,3), 2, padding=(1,1))
)
self.conv4 = nn.Sequential(
nn.Conv2d(384,384,(1,1), stride=(1,1)),
nn.BatchNorm2d(384),
nn.LeakyReLU(0.001),
nn.Conv2d(384,256,(3,3), stride=(1,1), padding=(1,1)),
nn.BatchNorm2d(256),
nn.LeakyReLU(0.001)
)
self.conv5 = nn.Sequential(
nn.Conv2d(256,256,(1,1), stride=(1,1)),
nn.BatchNorm2d(256),
nn.LeakyReLU(0.001),
nn.Conv2d(256,256,(3,3), stride=(1,1), padding=(1,1)),
nn.BatchNorm2d(256),
nn.LeakyReLU(0.001)
)
self.conv6 = nn.Sequential(
nn.Conv2d(256,256,(1,1), stride=(1,1)),
nn.BatchNorm2d(256),
nn.LeakyReLU(0.001),
nn.Conv2d(256,256,(3,3), stride=(1,1), padding=(1,1)),
nn.BatchNorm2d(256),
nn.LeakyReLU(0.001),
nn.MaxPool2d((3,3),2, padding=(1,1)),
nn.Flatten()
)
self.fullyConnected = nn.Sequential(
nn.Linear(7*7*256,32*128),
nn.BatchNorm1d(32*128),
nn.LeakyReLU(0.001),
nn.Linear(32*128,128)
)
def forward(self,x):
x = self.conv1(x)
x = self.conv2(x)
x = self.conv3(x)
x = self.conv4(x)
x = self.conv5(x)
x = self.conv6(x)
x = self.fullyConnected(x)
return torch.nn.functional.normalize(x, p=2, dim=-1)
# + id="s2Cs23r1nYK-" colab={"base_uri": "https://localhost:8080/"} outputId="243d3d17-3fce-44c0-c18e-3ecd4c3dd932"
embeddingNetwork = EmbeddingNetwork().to(device)
print(embeddingNetwork)
# + id="dWrqFpN0nYIL"
def train(model, lossFunction, miningFunction, device, trainLoader, optimizer, epoch):
print("Training started for Epoch: ",epoch)
model.train()
for batchIndex, (data, labels) in enumerate(trainLoader):
data, labels = data.to(device), labels.to(device)
optimizer.zero_grad()
embeddings = model(data)
hardPairs = miningFunction(embeddings, labels)
loss = lossFunction(embeddings, labels, hardPairs)
loss.backward()
optimizer.step()
if batchIndex%200==0:
print("Training Stats for Epoch {} Iteration {}: Loss= {}, Number of mined triplets {}".format(epoch, batchIndex, loss, miningFunction.num_triplets))
# + id="gI5oFz9cnYGB"
#distance this tells the model how to calculate the distance between the generated embeddings
distance = CosineSimilarity()
reducer = ThresholdReducer(low=0.0)
lossFunction = losses.TripletMarginLoss(margin = 0.2, distance = distance, reducer = reducer)
miningFunction = miners.TripletMarginMiner(margin = 0.2, distance = distance, type_of_triplets = "semi-hard")
optimizer = torch.optim.Adam(embeddingNetwork.parameters(), lr=0.05)
# + [markdown] id="5nuJcvuPoqy4"
# The validation split here is from the given dataset not the MNIST Dataset
# + id="CIusd5ZgnYD-"
def tester(maxValidationAccuracy):
trainEmbeddings = []
trainLabels = []
validationEmbeddings = []
validationLabels = []
with torch.no_grad():
embeddingNetwork.eval()
for (dataTr, labelTr) in (trainLoader):
dataTr, labelTr = dataTr.to(device), labelTr.to(device)
embeddingTr = embeddingNetwork(dataTr)
trainEmbeddings.append(embeddingTr.cpu().detach().numpy())
trainLabels.append(labelTr.cpu().detach().numpy())
for (dataTe, labelTe) in (validationLoader):
dataTe, labelTe = dataTe.to(device), labelTe.to(device)
embeddingsTe = embeddingNetwork(dataTe)
validationEmbeddings.append(embeddingsTe.cpu().detach().numpy())
validationLabels.append(labelTe.cpu().detach().numpy())
trainEmbeddings1 = []
trainLabels1 = []
validationEmbeddings1 = []
validationLabels1 = []
for bat in trainEmbeddings:
for exm in bat:
trainEmbeddings1.append(exm)
for bat in trainLabels:
for exm in bat:
trainLabels1.append(exm)
for bat in validationEmbeddings:
for exm in bat:
validationEmbeddings1.append(exm)
for bat in validationLabels:
for exm in bat:
validationLabels1.append(exm)
neigh = KNeighborsClassifier(n_neighbors=13)
neigh.fit(trainEmbeddings1, trainLabels1)
prediction = neigh.predict(validationEmbeddings1)
currentAccuracy = accuracy_score(validationLabels1,prediction)
print("Accuracy: ",currentAccuracy)
if currentAccuracy > maxValidationAccuracy:
maxValidationAccuracy = currentAccuracy
print("New highest validation accuracy, saving the embedding model")
torch.save(embeddingNetwork.state_dict(), "embeddingNetworkTask3.pt")
return maxValidationAccuracy
# + id="KCRYWcUunYB1" colab={"base_uri": "https://localhost:8080/"} outputId="5a1ea146-bb5d-44d3-aa52-284a9c4be6be"
maxValidationAccuracy = 0
for epoch in range(1, 81):
train(embeddingNetwork, lossFunction, miningFunction, device, trainLoader, optimizer, epoch)
print("Training completed for the Epoch:", epoch)
maxValidationAccuracy = tester(maxValidationAccuracy)
# + id="1p3oMEkKnX_5" colab={"base_uri": "https://localhost:8080/"} outputId="9a510f60-486b-4395-a235-a47fe1b0aa41"
print("Highest Validation Accuracy acheived during training: ", maxValidationAccuracy)
# + id="jf4vLA2BnX9m" colab={"base_uri": "https://localhost:8080/"} outputId="0b4d0eb6-9a8d-42d4-b79f-c917a79a476c"
#loading best validated weights into the classifier
embeddingNetwork = EmbeddingNetwork().to(device)
embeddingNetwork.load_state_dict(torch.load('embeddingNetworkTask3.pt'))
# + id="c-eXVwELpLkZ"
class classifierNet(nn.Module):
def __init__(self, EmbeddingNet):
super(classifierNet, self).__init__()
self.embeddingLayer = EmbeddingNet
self.linearLayer = nn.Sequential(nn.Linear(128, 64), nn.ReLU())
self.classifierLayer = nn.Linear(64,10)
self.dropout = nn.Dropout(0.5)
def forward(self, x):
x = self.dropout(self.embeddingLayer(x))
x = self.dropout(self.linearLayer(x))
x = self.classifierLayer(x)
return F.log_softmax(x, dim=1)
# + id="sYCf4xZLpMge" colab={"base_uri": "https://localhost:8080/"} outputId="d6981d14-591f-417f-cdf1-40cc2242eda8"
classifier = classifierNet(embeddingNetwork).to(device)
print(classifier)
# + id="rJ45yilAuL7E"
for param in classifier.embeddingLayer.parameters():
param.requires_grad = False
# + [markdown] id="w1IqZsBgvGtd"
# The code below trains the classifier on provided dataset and tests on MNIST dataset. Here the validation accuracy means the accuracy of the MNIST test set.
# + id="z5FxZt9801Uj"
criterion = nn.NLLLoss()
optimizer = torch.optim.Adam(classifier.parameters(), lr=0.01)
# + colab={"base_uri": "https://localhost:8080/"} id="0lWm0MHxuxHa" outputId="238a78e2-f82f-43ce-815e-8efb380c4b24"
numberOfEpochs = 15
validAccuracyMax = 0.0
validationLossTransfer = []
validationAccuracyTransfer = []
trainingLossTransfer = []
trainingAccuracyTransfer = []
totalSteps = len(trainLoader)
for epoch in range(1, numberOfEpochs):
classifier.train()
runningLoss = 0.0
correct = 0
total = 0
print("Training started for Epoch: ",epoch)
for batchIndex, (data, target) in enumerate(trainLoader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
outputs = classifier(data)
loss = criterion(outputs,target)
loss.backward()
optimizer.step()
runningLoss += loss.item()
_, pred = torch.max(outputs, dim=1)
correct += torch.sum(pred==target).item()
total += target.size(0)
if (batchIndex)%100 ==0:
print("Epoch [{}/{}] Step [{}/{}] Loss: {:.4f}".format(epoch,numberOfEpochs,batchIndex,totalSteps,loss.item()))
trainingAccuracyTransfer.append(100*correct/total)
trainingLossTransfer.append(runningLoss/totalSteps)
print("Training Accuracy: ",(100*correct/total))
batchLoss = 0
totalV = 0
correctV = 0
with torch.no_grad():
classifier.eval()
for dataV, targetV in (mnistTestLoader):
dataV, targetV = dataV.to(device), targetV.to(device)
outputV = classifier(dataV)
lossV = criterion(outputV,targetV)
batchLoss += lossV.item()
_, predV = torch.max(outputV, dim=1)
correctV += torch.sum(predV==targetV).item()
totalV += targetV.size(0)
validationAccuracyTransfer.append(100*correctV/totalV)
validationLossTransfer.append(batchLoss/len(mnistTestLoader))
print("Validation Accuracy: ",(100*correctV/totalV))
if (100*correctV/totalV)>validAccuracyMax:
validAccuracyMax = 100*correctV/totalV
print("Validation accuracy improved, network improvement detected, saving network")
torch.save(classifier.state_dict(), "mnistFinalWeightsTASK3.pt")
classifier.train()
# + [markdown] id="Nr_BxEwEz_wc"
# Plotting the Accuracy and Loss on the MNIST Test set
# + colab={"base_uri": "https://localhost:8080/", "height": 610} id="J_INjJsguxEo" outputId="b00e964b-1c69-4fa6-f584-afd50786071b"
fig = plt.figure(figsize=(20,10))
plt.title("Validation Loss Plot")
plt.plot(validationLossTransfer, label='validation loss')
plt.xlabel('Number of epochs')
plt.ylabel('Loss')
plt.legend(loc='best')
# + colab={"base_uri": "https://localhost:8080/", "height": 610} id="uUE-Y9I2uxCN" outputId="668997f7-7a89-4fae-a364-c5135b9d1838"
fig = plt.figure(figsize=(20,10))
plt.title("Validation Accuracy Plot")
plt.plot(validationAccuracyTransfer, label='validation accuracy')
plt.xlabel('Number of epochs')
plt.ylabel('Accuracy')
plt.legend(loc='best')
# + id="5AUcGpQ5uw_3"
# + id="TsYPcSUeuw9L"
| SUB TASK 3/subtask3Notebook.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Machine Learning - Introdução
# #### APLICAÇÕES DE MACHINE LEARNING:
#
# ##### - Marketing:
# * Churn Analysis: Quais clientes tendem a abandonar a empresa?
# * Quais clientes irão responder a quais promoções?
# * Qual a combinação de produtos que mais vende?
# * Quais clientes irão comprar os mesmo sem ofertas?
# * Identificação dos consumidores alfa.
#
# ##### - Educação:
# * Quais alunos irão abandonar o curso e por quê?
# * Quais os alunos são mais fiéis?
# * Quais cursos são mais rentáveis?
# * Quais cursos, com quais características, atraem mais alunos?
#
# ##### - Recursos Humanos:
# * Qual o perfil de talentos é mais adequado para as vagas?
# * Qual o perfil de funcionários que abandonarão o emprego e quando?
# * Quais ações são efetivas na produtividade?
#
# ##### - Finanças/Contabilidade:
# * Prever a performance financeira da organização.
# * Mitigação de riscos futuros.
# * Fraude.
#
# ##### - Visão Computacional:
# * Veículos roubados (cidades inteligentes)
#
# #### TAREFAS:
#
# ##### - Classificação:
# * Descrever ou prever um atributo especial chamado classe.
# * Ex.: Prever uma fraude, decobrir a qual espécie um animal pertence, prever uma doença ou classificar um tipo de fungo.
#
# ##### - Regressão:
# * Um tipo de classificação, porém, enquanto na classificação a classe é um tipo de dado nominal ou categórico, na regressão a classe é numérica.
# * Ex.: Prever a altura de uma pessoa a partir do peso.
#
# ##### - Agrupamentos:
# * Não existe classe.
# * O Objetivo é criar grupos e atribuir às instâncias dos dados nestes grupos, a partir das características, ou atributos destas instância.
# * Ex.: Identificar grupos de clientes e direcionar campanhas de marketing específicas; Identificar tentativas de acesso a rede; Categorizar uma nova espécie entre outros.
#
# ##### - Regras de associação:
# * Buscam a relação entre itens. A aplicação clássica é em cestas de compras: quem comprou o produto A, também comprou o produto B. Porém têm ampla aplicação em diagnósticos de medicina, sensos, etc.
#
# #### SUPERVISIONADOS x NÃO SUPERVISIONADOS:
#
# - As tarefas de mineração de dados são ditas supervisionadas quando existe uma classe, ou um atributo especial com a qual se pode comparar o resultado.
#
# #### COMO MELHORAR UM MODELO?
#
# - Testando diferentes algoritmos.
# - Parametrizando algoritmos (hiper parâmetros)
# - Selecionando e tratando dados.
# - Seleção / Engenharia de atributos.
#
# #### TIPOS DE ALGORITMOS CLASSIFICADORES:
#
# - Árvores de Decisão:
# • Nodo raiz
# • Nodos Internos
# • Nodos Terminais
# • Algoritmo de Partição: grau de pureza
# - Regras
# - Naïve Bayes:
# • Baseado na teoria das probabilidades e que supõe que os atributos vão influenciar a classe de forma independente
# - Redes Bayesianas:
# • Uma Rede Bayesiana pode mostrar eventual dependência entre os atributos através de probabilidade condicional
# - Redes Neurais Artificiais e aprendizado Profundo (Próxima Seção)
# - Maquina de Vetor de Suporte
# - Métodos de Grupos:
# • Florestas Aleatórias
# • Boosting
# - Aprendizado Baseado em Instância:
# • Classificador do vizinho mais próximo (Nearest-neighbor)
#
# #### Maldição da dimensionalidade:
#
# - A inclusão de muitos (ou mais) atributos em um modelo, degradam sua performance!
# - Se existem muitos atributos, como saber quais são mais relevantes para o modelo?
# - Seleção de atributos:
# • Pode-se fazer “manualmente”
# • Alguns algoritmos já a fazem automáticamente
#
# #### CODIFICAÇÃO DE CATEGORIAS:
#
# #### Categorical Encoding:
# * Algoritmos entendem números
# * Categorical encoding é o processo de transformar categorias em números
# * Duas Formas:
# • Label encoding
# • One-hot encoding
#
# ##### - Label encoding:
# * Cada categoria recebe um número, normalmente em ordem alfabética
# * Problema: o algoritmo pode correlacionar os dados como uma ordem de grandeza!
#
# ##### One-hot encoding
# * Cada categoria é transformada em outro atributo: dummy variable
# * Um valor binário informa a ocorrência
#
# ##### Dummy Variable Trap:
# * O valor dos atributos se torna altamente previsível
# * Resultado, correlação entre as variáveis Independentes: multicolinearidade
# * Solução: Excluir um dos atributos!
#
# #### Qual usar?
#
# ##### - Label encoding:
# * Há ordem (Ex.: Programador Júnior, Pleno, Sênior, etc)
# * Grande número de categorias, não dá para usar o One-Hot Encoding
#
# ##### - One-hot encoding:
# * Não há ordem
# * Número de categorias é pequeno
#
# #### DIMENSIONAMENTO DE CATEGORIAS:
#
# - Processo de transformação de dados numéricos
# - Variáveis em escalas diferentes
# - Contribuem de forma desbalanceada para o modelo
# - Exemplo: Salário e Altura
# - Gradient Descent converge mais rapidamente para o mínimo local
# - Tipos:
# * Padronização (Z-score):
# • Dados aproximados da média (zero) e desvio padrão 1
# • Podem ser negativos
# • Não afeta outliers
# • Deve ser usado na maioria dos casos
# * Normalização (Min-Max):
# • Transforma para escala comum entre zero e 1
# • Usado em processamento de imagens e RNA
# • Quando não sabemos a distribuição dos dados
# • Quando precisam ser positivos
# • Algoritmos não "requerem" dados normais
# • Remove outliers pois impõe "limites"
# - Não vai necessariamente melhorar seu modelo!
# - Arvores de decisão não precisam de nenhum tipo
# - Não se aplica a atributos categóricos transformados
| Python/7_Machine-Learning-Introducao.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import io
import requests
import time
import random
# gets the hidden API keys
api_key = pd.read_csv('secrets.csv').api_key.to_string().split()[1]
# gets data using user's parameters
def get_data(symbol, interval):
"""
Signature: get_data(symbol, period) -> 'DataFrame'
Docstring:
Retrieves market data for the selected symbol and period.
Parameters
----------
symbol : str
The name of the equity of your choice. For example: symbol=GOOGL.
interval : str
Time interval between two consecutive data points in the time series.
The following values are supported: 1min, 5min, 15min, 30min, 60min.
Returns
-------
DataFrame
Examples
--------
>>> get_data('GOOGL', '60min')
"""
# main url or alphavantage and selection of features from user
BASE_URL = 'https://www.alphavantage.co/query?'
q = {
'function':'TIME_SERIES_INTRADAY_EXTENDED',
'symbol':symbol,
'interval':interval,
'slice':'year1month1',
'apikey':'<KEY>'
}
df=pd.DataFrame()
for y in range(1,3):
for m in range(1,13):
# create 'slices' of 1 month each. has to do with how the api functions
q['slice'] = f'year{y}month{m}'
# concatenate all user's selected values into one string
q_str = "".join([i for i in [str(i) + "=" + str(q[i]) + "&" for i in q]])[:-1]
# concatenate the base alphavantage url with the user's query
url = BASE_URL + q_str
print(url)
# GET url
response = requests.get(url)
# read data into a pandas dataframe
df=pd.concat([df, pd.read_csv(io.StringIO(response.content.decode('utf-8')))], axis=0)
# because the free api has a limit of 5 calls per minute, we need to wait
time.sleep(60/5)
# returns a dataframe
return(df)
# auto complete function for stocks
def auto_complete_stocks(x):
"""
Signature: auto_complete_stocks(str) -> 'json'
Docstring:
Makes use of the auto-completion function of Alpha Vantage API.
It takes the user's input and returns a json with the coincidences.
Parameters
----------
symbol : str
A string containing part of the symbol or description of the equity.
For example 'amaz' would return the symbol and description for AMZN stocks, etc.
Returns
-------
json
"""
BASE_URL = 'https://www.alphavantage.co/query?'
url = f'https://www.alphavantage.co/query?function=SYMBOL_SEARCH&keywords={x}&datatype=json&apikey={api_key}'
response = requests.get(url).json()
return(response)
# to fetch all updated stocks and ETFs supported
def get_supported_stocks():
"""
Signature: get_supported_stocks() -> 'DataFrame'
Docstring:
Retrieves the supported list of stocks and ETFs from Alpha Vantage, using their API.
See https://www.alphavantage.co/
Returns
-------
DataFrame
Examples
--------
>>> get_supported_stocks()
"""
BASE_URL = 'https://www.alphavantage.co/query?'
url = f'https://www.alphavantage.co/query?function=LISTING_STATUS&apikey={api_key}'
response = requests.get(url)
x=pd.read_csv(io.StringIO(response.content.decode('utf-8')))
return(x)
# to fetch all updated stocks and ETFs supported
# static version loading from .csv previously downloaded
def get_supported_stocks_static():
"""
Signature: get_supported_stocks() -> 'DataFrame'
Docstring:
Retrieves the supported list of stocks and ETFs from Alpha Vantage, using their API.
This 'static' version loads the list from a .csv file.
Returns
-------
DataFrame
Examples
--------
>>> get_supported_stocks()
"""
x = pd.read_csv('data/stocks_etfs_list.csv')
l1 = x['symbol'].to_list()
l2 = x['name'].to_list()
l3 = [str(i) + " - " + str(j) for i, j in zip(l1, l2)]
return(l1, l2, l3)
| stocks.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.9.7 64-bit (conda)
# name: python3
# ---
# # 02. Neural Network Classification with PyTorch
#
# ## TK - What we're going to cover
#
# TODO
# * Simple classification (reiterate PyTorch workflow)
# * Binary classification
# * Multi-class classification
# * Writing device agnostic code
# * Data on GPU
# * Model on GPU
# * More advanced classification (e.g. black and white images or FashionMNIST or FoodMNIST or something)
# * Visualizing a PyTorch model (all of the different connections)
#
#
# ## TK - How you can use this notebook
#
# ## TK - Architecture of a classification network
# ## TK - Make data
# +
from sklearn.datasets import make_moons
# Make 1000 samples
n_samples = 1000
# Create circles
X, y = make_moons(n_samples,
noise=0.07,
random_state=42) # keep random state so we get the same values
# -
# Get dataframe of moons
import pandas as pd
moons = pd.DataFrame({"X1": X[:, 0],
"X2": X[:, 1],
"label": y
})
moons.head()
# Check different labels
moons.label.value_counts()
# Visualize with a plot
import matplotlib.pyplot as plt
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.RdYlBu)
# TK - why show data like this? This is a toy problem but it represents the major key of classification, you have some kind of data represented as numerical values and you'd like to build a model that's able to classify (separate red from blue) it.
# ## TK - Input and output shapes
# Check the shapes of our features and labels
X.shape, y.shape
# Check how many samples we have
len(X), len(y)
# View the first example of features and labels
X[0], y[0]
# +
# Turn data into tensors
# Otherwise this causes issues with computations later on
import torch
X = torch.from_numpy(X).type(torch.float)
y = torch.from_numpy(y).type(torch.float)
X[:5], y[:5]
# +
# Split data into train and test sets
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X,
y,
test_size=0.2,
random_state=42)
len(X_train), len(X_test), len(y_train), len(y_test)
# -
# ## TK - Steps in modelling
#
# 1. Construct the model
# 2. Setup loss and optimizer
# 3. Create training/eval loop
# ### TK - Building a model
# +
from torch import nn
class MoonModelV0(nn.Module):
def __init__(self):
super().__init__()
self.layer_1 = nn.Linear(in_features=2, out_features=10) # TK - explain in and out features (could use TF playground for this)
self.layer_2 = nn.Linear(in_features=10, out_features=1)
def forward(self, x):
return self.layer_2(self.layer_1(x))
model_0 = MoonModelV0()
model_0
# +
# TODO, show case the same model with nn.Sequential
model_0 = nn.Sequential(
nn.Linear(in_features=2, out_features=10),
nn.Linear(in_features=10, out_features=1)
)
model_0
# -
# ### TK - Setup loss and optimizer
# TK - Explain both of these - https://discuss.pytorch.org/t/bceloss-vs-bcewithlogitsloss/33586/4
# loss_fn = nn.BCELoss() # BCELoss = no sigmoid built-in
loss_fn = nn.BCEWithLogitsLoss() # BCEWithLogitsLoss = Sigmoid built-in
optimizer = torch.optim.SGD(params=model_0.parameters(), lr=0.1)
# Calculate accuracy (a classification metric)
def accuracy_fn(y_true, y_pred):
correct = torch.eq(y_true, y_pred).sum().item()
acc = (correct / len(y_pred)) * 100
return acc
# ### TK - Train model
# TK - clean this up for different data types (could be far clearer what's going on)
torch.manual_seed(42)
epochs = 100
for epoch in range(epochs):
# 1. Forward pass
y_pred = model_0(X_train).squeeze() # this won't work unless X & y are tensors
# 2. Calculate loss/accuracy
# loss = loss_fn(torch.sigmoid(y_pred), y) # Using nn.BCELoss you need torch.sigmoid()
loss = loss_fn(y_pred, y_train) # using nn.BCEWithLogitsLoss you *don't* need torch.sigmoid()
acc = accuracy_fn(y_true=y_train,
y_pred=torch.round(torch.sigmoid(y_pred)))
# 3. Optimizer zero grad
optimizer.zero_grad()
# 4. Loss backwards
loss.backward()
# 5. Optimizer step
optimizer.step()
# Testing
model_0.eval()
with torch.inference_mode():
test_pred = model_0(X_test).squeeze()
test_loss = loss_fn(test_pred, y_test)
test_acc = accuracy_fn(y_true=y_test,
y_pred=torch.round(torch.sigmoid(test_pred)))
# Print out what's happening
if epoch % 10 == 0:
print(f"Epoch: {epoch} | Loss: {loss:.5f}, Accuracy: {acc:.2f}% | Test loss: {test_loss:.5f}, Test acc: {test_acc:.2f}%")
# ## TK - Plot model predictions
import numpy as np
def plot_decision_boundary(model, X, y):
# Source - https://madewithml.com/courses/foundations/neural-networks/
# (with modifications)
x_min, x_max = X[:, 0].min() - 0.1, X[:, 0].max() + 0.1
y_min, y_max = X[:, 1].min() - 0.1, X[:, 1].max() + 0.1
xx, yy = np.meshgrid(np.linspace(x_min, x_max, 101),
np.linspace(y_min, y_max, 101))
X_test = torch.from_numpy(np.column_stack((xx.ravel(), yy.ravel()))).float()
model.eval()
with torch.inference_mode():
y_preds = model(X_test)
# Test for multi-class
if len(torch.unique(y)) > 2:
y_pred = torch.softmax(y_preds, dim=1).argmax(dim=1) # mutli-class
else:
y_pred = torch.round(torch.sigmoid(y_preds)) # binary
# Reshape preds and plot
y_pred = y_pred.reshape(xx.shape).detach().numpy()
plt.contourf(xx, yy, y_pred, cmap=plt.cm.RdYlBu, alpha=0.7)
plt.scatter(X[:, 0], X[:, 1], c=y, s=40, cmap=plt.cm.RdYlBu)
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
# Plot decision boundaries for training and test sets
plt.figure(figsize=(12, 6))
plt.subplot(1, 2, 1)
plt.title("Train")
plot_decision_boundary(model_0, X_train, y_train)
plt.subplot(1, 2, 2)
plt.title("Test")
plot_decision_boundary(model_0, X_test, y_test)
# ## TK - Improving a model
#
# TODO
# * Add layers
# * Add units
# * Train for longer...
#
# +
class MoonModelV1(nn.Module):
def __init__(self):
super().__init__()
self.layer_1 = nn.Linear(in_features=2, out_features=10)
self.layer_2 = nn.Linear(in_features=10, out_features=10)
self.layer_3 = nn.Linear(in_features=10, out_features=1)
def forward(self, x): # always make sure forward is spelt correctly!
# Creating a model like this is the same as below, though below
# generally benefits from speedups where possible.
# z = self.layer_1(x)
# z = self.layer_2(z)
# z = self.layer_3(z)
# return z
return self.layer_3(self.layer_2(self.layer_1(x)))
model_1 = MoonModelV1()
model_1
# -
# loss_fn = nn.BCELoss() # Requires sigmoid on input
loss_fn = nn.BCEWithLogitsLoss() # Does not require sigmoid on input
optimizer = torch.optim.SGD(model_1.parameters(), lr=0.1)
epochs = 1000 # Train for longer
for epoch in range(epochs):
# 1. Forward pass
y_pred = model_1(X).squeeze()
# 2. Calculate loss/accuracy
loss = loss_fn(y_pred, y)
acc = accuracy_fn(y_true=y,
y_pred=torch.round(torch.sigmoid(y_pred)))
# 3. Optimizer zero grad
optimizer.zero_grad()
# 4. Loss backwards
loss.backward()
# 5. Optimizer step
optimizer.step()
# Print outputs
if epoch % 100 == 0:
print(f"Epoch: {epoch} | Loss: {loss:.5f}, Accuracy: {acc:.2f}%")
# Plot decision boundaries for training and test sets
plt.figure(figsize=(12, 6))
plt.subplot(1, 2, 1)
plt.title("Train")
plot_decision_boundary(model_1, X_train, y_train)
plt.subplot(1, 2, 2)
plt.title("Test")
plot_decision_boundary(model_1, X_test, y_test)
# Pretty good... but still not where we'd like it.
#
# Our model is drawing straight lines... but we'd like the lines to be curved, what can we do?
#
# How about we check our model on linear data, what does that do?
# +
# Create some data (same as notebook 01)
weight = 0.7
bias = 0.3
start = 0
end = 1
step = 0.02
# TK - change these variable names so they don't interfere with the original moons data
# Create data
X_regression = torch.arange(start, end, step).unsqueeze(dim=1)
y_regression = weight * X_regression + bias
X_regression[:5], y_regression[:5]
# +
# Create train/test split
train_split = int(0.8 * len(X_regression)) # 80% of data used for training set
X_train_regression, y_train_regression = X_regression[:train_split], \
y_regression[:train_split]
X_test_regression, y_test_regression = X_regression[train_split:], \
y_regression[train_split:]
print(len(X_train_regression),
len(y_train_regression),
len(X_test_regression),
len(y_test_regression))
# -
# TK - put this in helper function file...
def plot_predictions(train_data=X_train,
train_labels=y_train,
test_data=X_test,
test_labels=y_test,
predictions=None):
"""
Plots training data, test data and compares predictions.
"""
plt.figure(figsize=(10, 7))
# Plot training data in blue
plt.scatter(train_data, train_labels, c="b", s=4, label="Training data")
# Plot test data in green
plt.scatter(test_data, test_labels, c="g", s=4, label="Testing data")
if predictions is not None:
# Plot the predictions in red (predictions were made on the test data)
plt.scatter(test_data, predictions, c="r", s=4, label="Predictions")
# Show the legend
plt.legend(prop={"size": 14});
plot_predictions(train_data=X_train_regression,
train_labels=y_train_regression,
test_data=X_test_regression,
test_labels=y_test_regression
);
# +
# Same architecture as model_1 (but using nn.Sequential)
model_2 = nn.Sequential(
nn.Linear(in_features=1, out_features=10),
nn.Linear(in_features=10, out_features=10),
nn.Linear(in_features=10, out_features=1)
)
model_2
# -
# Loss and optimizer
loss_fn = nn.L1Loss()
optimizer = torch.optim.SGD(model_2.parameters(), lr=0.1)
# Train the model
torch.manual_seed(42)
epochs = 1000
for epoch in range(epochs):
# 1. Forward pass
y_pred = model_2(X_train_regression)
# 2. Calculate loss
loss = loss_fn(y_pred, y_train_regression)
# 3. Optimizer zero grad
optimizer.zero_grad()
# 4. Loss backwards
loss.backward()
# 5. Optimizer step
optimizer.step()
# Testing
model_2.eval()
with torch.inference_mode():
test_pred = model_2(X_test_regression)
test_loss = loss_fn(test_pred, y_test_regression)
# Print out what's happening
if epoch % 100 == 0:
print(f"Epoch: {epoch} | Train loss: {loss:.5f}, Test loss: {test_loss:.5f}")
model_2.eval()
with torch.inference_mode():
y_preds = model_2(X_test_regression)
plot_predictions(train_data=X_train_regression,
train_labels=y_train_regression,
test_data=X_test_regression,
test_labels=y_test_regression,
predictions=y_preds.detach().numpy());
# ## TK - The missing piece: non-linearity
#
# Our model can draw straight lines, thanks to its linear layers.
#
# But how about we give it the capacity to draw non-straight (non-linear) lines?
#
# Let's try.
# +
# Make data
from sklearn.datasets import make_moons
n_samples = 1000
X, y = make_moons(n_samples=1000,
noise=0.15,
random_state=42,
)
# -
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.RdBu)
# +
# Convert to tensors and split into train and test sets
import torch
from sklearn.model_selection import train_test_split
X = torch.from_numpy(X).type(torch.float)
y = torch.from_numpy(y).type(torch.float)
# Split into train and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2,
random_state=42
)
X_train[:5], y_train[:5]
# +
# Build model
from torch import nn
class MoonModelV2(nn.Module):
def __init__(self):
super().__init__()
self.layer_1 = nn.Linear(in_features=2, out_features=10)
self.layer_2 = nn.Linear(in_features=10, out_features=10)
self.layer_3 = nn.Linear(in_features=10, out_features=1)
self.relu = nn.ReLU()
# Can also put sigmoid in the model
# This would mean you don't need to use it on the predictions
# self.sigmoid = nn.Sigmoid()
def forward(self, x):
return self.layer_3(self.relu(self.layer_2(self.relu(self.layer_1(x)))))
model_3 = MoonModelV2()
print(model_3)
# -
# Setup loss and optimizer
loss_fn = nn.BCEWithLogitsLoss()
optimizer = torch.optim.SGD(model_3.parameters(), lr=0.1)
# Fit the model
torch.manual_seed(42)
epochs = 1000
for epoch in range(epochs):
# 1. Forward pass
y_pred = model_3(X_train).squeeze()
# 2. Calculate loss
loss = loss_fn(y_pred, y_train)
acc = accuracy_fn(y_true=y_train,
y_pred=torch.round(torch.sigmoid(y_pred)))
# 3. Optimizer zero grad
optimizer.zero_grad()
# 4. Loss backward
loss.backward()
# 5. Optimizer step
optimizer.step()
# Print out what's happening
if epoch % 100 == 0:
print(f"Epoch: {epoch} | Loss: {loss:.5f}, Accuracy: {acc:.2f}%")
# Make predictions
model_3.eval()
with torch.inference_mode():
y_preds = torch.round(torch.sigmoid(model_3(X_test))).squeeze()
y_preds[:10], y[:10] # want preds in same format as truth labels
# Plot decision boundaries for training and test sets
plt.figure(figsize=(12, 6))
plt.subplot(1, 2, 1)
plt.title("Train")
plot_decision_boundary(model_1, X_train, y_train) # model_1 = no non-linearity
plt.subplot(1, 2, 2)
plt.title("Test")
plot_decision_boundary(model_3, X_test, y_test) # model_3 = has non-linearity
# ## Multi-class model
# +
import torch
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs
from sklearn.model_selection import train_test_split
NUM_CLASSES = 4
NUM_FEATURES = 2
RANDOM_SEED = 42
# Create data
X_blob, y_blob = make_blobs(n_samples=1000,
n_features=NUM_FEATURES,
centers=NUM_CLASSES,
cluster_std=1.5,
random_state=RANDOM_SEED
)
# Turn into tensors
X_blob = torch.from_numpy(X_blob).type(torch.float)
y_blob = torch.from_numpy(y_blob).type(torch.LongTensor)
print(X_blob[:5], y_blob[:5])
# Split into train and test
X_blob_train, X_blob_test, y_blob_train, y_blob_test = train_test_split(X_blob,
y_blob,
test_size=0.2,
random_state=RANDOM_SEED
)
# Plot data
plt.figure(figsize=(10, 7))
plt.scatter(X_blob[:, 0], X_blob[:, 1], c=y_blob, cmap=plt.cm.RdYlBu);
# -
# TODO: **Question:** Does this dataset need non-linearity? Or could you draw a succession of straight lines to separate it?
# +
from torch import nn
# Build model
class BlobModel(nn.Module):
def __init__(self, input_features, output_features, hidden_units):
super().__init__()
self.linear_layer_stack = nn.Sequential(
nn.Linear(in_features=input_features, out_features=hidden_units),
# nn.ReLU(),
nn.Linear(in_features=hidden_units, out_features=hidden_units),
# nn.ReLU(),
nn.Linear(in_features=hidden_units, out_features=output_features), # how many classes are there?
)
def forward(self, x):
return self.linear_layer_stack(x)
model_4 = BlobModel(input_features=NUM_FEATURES,
output_features=NUM_CLASSES,
hidden_units=8) # doesn't work with 1, try a different value
model_4
# -
# TODO: Explain different loss options for multi-class and binary classification
# Loss and optimizer
loss_fn = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model_4.parameters(), lr=0.1)
# Fit the model
torch.manual_seed(42)
epochs = 100
for epoch in range(epochs):
# 1. Forward pass
y_pred = model_4(X_blob_train)
# 2. Calculate loss and accuracy
loss = loss_fn(y_pred, y_blob_train)
acc = accuracy_fn(y_true=y_blob_train,
y_pred=y_pred.argmax(dim=1))
# 3. Optimizer zero grad
optimizer.zero_grad()
# 4. Loss backwards
loss.backward()
# 5. Optimizer step
optimizer.step()
# Print out what's happening
if epoch % 10 == 0:
print(f"Epoch: {epoch} | Loss: {loss:.5f}, Accuracy: {acc:.2f}%")
# Make predictions
model_4.eval()
with torch.inference_mode():
y_preds = model_4(X_blob_test)
# TODO: Show the outputs of the model - what do these mean?
y_preds[:10]
# TODO: Softmax = obtain probabilities, no softmax = logits from model
print(torch.softmax(y_preds, dim=1).argmax(dim=1)[:10], y_preds.argmax(dim=1)[:10])
print(f"Test accuracy: {accuracy_fn(y_true=y_blob_test, y_pred=y_preds.argmax(dim=1))}%")
plt.figure(figsize=(12, 6))
plt.subplot(1, 2, 1)
plt.title("Train")
plot_decision_boundary(model_4, X_blob_train, y_blob_train)
plt.subplot(1, 2, 2)
plt.title("Test")
plot_decision_boundary(model_4, X_blob_test, y_blob_test)
# ## TODO: Exercises
#
# * Build a model to fit the spirals data from CS231n - https://cs231n.github.io/neural-networks-case-study/
#
# ```python
# N = 100 # number of points per class
# D = 2 # dimensionality
# K = 3 # number of classes
# X = np.zeros((N*K,D)) # data matrix (each row = single example)
# y = np.zeros(N*K, dtype='uint8') # class labels
# for j in range(K):
# ix = range(N*j,N*(j+1))
# r = np.linspace(0.0,1,N) # radius
# t = np.linspace(j*4,(j+1)*4,N) + np.random.randn(N)*0.2 # theta
# X[ix] = np.c_[r*np.sin(t), r*np.cos(t)]
# y[ix] = j
# # lets visualize the data:
# plt.scatter(X[:, 0], X[:, 1], c=y, s=40, cmap=plt.cm.Spectral)
# plt.show()
# ```
# ## TODO: Extra-curriculum
# ## TODO
# * ~~Multiclass model~~
# * ~~device agnostic code: GPU model~~ - not doing this due to complications with plotting code, will do it with more advanced modules
# * ~~Fixed data inputs (e.g. calc on tensors when need to calc on tensors)~~
# * ~~Merge the functions so `plot_decision_boundary` works on multi-class~~
# * ~~Introduce non-linearity (the missing piece!) - straight lines and non-straight lines~~
# * Make evaluation functions better - teach someone about different evaluation functions for a classification model
# * Make sure notebook runs from top to bottom
# * Make sure each section can be run individually - all imports should be where they need to be
# * Create helper_functions.py script for different helper functions throughout the course
| 02_pytorch_classification.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
import seaborn as sns
import pandas as pd
from retrosheet_controller import RetroSheetDataController
from retrosheet_util import RetroSheetUtil
from analyze_batter import AnalyzeBatter
import matplotlib.pyplot as plt
# <NAME> VS Ichiro Suzukiお散歩対決
client = RetroSheetDataController()
analyzer = AnalyzeBatter()
votto_df_atbat, ichiro_df_atbat = {}, {}
votto_df_walks, ichiro_df_walks = {}, {}
for year in range(2009, 2015): # 2009-2014シーズンまでが対象
votto_df_atbat[year] = client.batter_event_by_at_bat('Joey', 'Votto', year)
votto_df_walks[year] = client.batter_event_by_walk('Joey', 'Votto', year)
ichiro_df_atbat[year] = client.batter_event_by_at_bat('Ichiro', 'Suzuki', year)
ichiro_df_walks[year] = client.batter_event_by_walk('Ichiro', 'Suzuki', year)
# +
# ボットVSイチローの年間四球数(月ごと&Total)
graph_data_list = {}
for df_walks in (
{"name": "<NAME>", "data": votto_df_walks},
{"name": "<NAME>", "data": ichiro_df_walks},
):
graph_data = {}
for year, walks in df_walks["data"].items():
monthly_walks, walk_counts = analyzer.monthly_walks(walks)
graph_data["{year}({ball})".format(year=year, ball=walk_counts)] = monthly_walks
df = pd.DataFrame(graph_data)
graph_data_list[df_walks['name']] = df
# -
# Ichiro Suzuki
graph_data_list['Ichiro Suzuki'].plot(kind='bar', ylim=(0, 30), title='Osan-po({name})'.format(name='Ichiro Suzuki'))
# <NAME>
graph_data_list['<NAME>'].plot(kind='bar', ylim=(0, 30), title='Osan-po({name})'.format(name='<NAME>'))
# +
# ボットVSイチローのマルチ散歩対決(月ごと&Total)
graph_data_list = {}
for df_walks in (
{"name": "<NAME>", "data": votto_df_walks},
{"name": "I<NAME>uki", "data": ichiro_df_walks},
):
graph_data = {}
for year, walks in df_walks["data"].items():
monthly_walks, walk_counts = analyzer.monthly_walks_multi(walks)
graph_data["{year}({ball})".format(year=year, ball=walk_counts)] = monthly_walks
df = pd.DataFrame(graph_data)
graph_data_list[df_walks['name']] = df
# -
# Ichiro Suzuki
graph_data_list['Ichiro Suzuki'].plot(kind='bar', ylim=(0, 10), title='Multi Osan-po({name})'.format(name='Ichiro Suzuki'))
# <NAME>
graph_data_list['<NAME>'].plot(kind='bar', ylim=(0, 10), title='Multi Osan-po({name})'.format(name='<NAME>'))
# +
# ボットさんの内野のポップフライ数を数える
fly, pop_fly = {}, {}
for year, atbat in votto_df_atbat.items():
monthly_fly = analyzer._monthly_counts()
monthly_pop = analyzer._monthly_counts()
fly_cnt, pop_cnt = 0, 0
for i, row in atbat.iterrows():
month = int(str(row['game_dt'])[4:6])
# フライっぽいアウト(と思われる)打球をカウント
# event cdがアウトかつ、batted ballがラインドライブかフライ
if row['event_cd'] == 2 and row['battedball_cd'] in ('L', 'F'):
monthly_fly[month] += 1
fly_cnt += 1
# 内野に上がったフライの数
if int(row['event_tx'][0:1]) < 7:
monthly_pop[month] += 1
pop_cnt += 1
fly["{year}(Fly:{cnt})".format(year=year, cnt=fly_cnt)] = monthly_fly
pop_fly["{year}(Pop Fly:{cnt})".format(year=year, cnt=pop_cnt)] = monthly_pop
df = pd.DataFrame(fly)
df.plot(kind='bar', ylim=(0, 40), title='Fly Count({name})'.format(name='<NAME>'))
df = pd.DataFrame(pop_fly)
df.plot(kind='bar', ylim=(0, 5), title='PopFly Count({name})'.format(name='<NAME>'))
# -
| retrosheet_app/joey_votto.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Title :
# Bayes - Exercise 2
#
# ## Description :
# Model $y$ as a least-squares regression as $y = \alpha + \beta \cdot x + \epsilon$
#
# After completing this exercise you should see following trace plots:
#
# <img src="../fig/fig2.png" style="width: 500px;">
#
#
# ## Hints:
#
# <a href="https://docs.pymc.io/api/distributions/continuous.html#pymc3.distributions.continuous.Normal" target="_blank">pymc3 Normal</a>
#
# Refer to lecture notebook.
#
# Do not change any other code except the blanks.
# !pip install xarray==0.16.0
# +
import pandas as pd
import numpy as np
import pymc3 as pm
from matplotlib import pyplot
# %matplotlib inline
# -
df = pd.read_csv('data3.csv')
### edTest(test_pm_model) ###
np.random.seed(109)
with pm.Model() as model:
# prior
alpha = pm.Normal('alpha', mu=0, tau=1000)
beta = pm.Normal('beta', mu=0, tau=1000)
# likelihood
# Next statement creates the expected value of mu_vec of the
# outcomes, specifying the linear relationship.
# mu_vec is just the sum of the intercept alpha and the product of
# the coefficient beta and the predictor variable.
mu_vec = pm.Deterministic('mu_vec', ____)
tau_obs = pm.Gamma('tau_obs', 0.001, 0.001)
obs = pm.Normal(_______) #Parameters to set: name, mu, tau, observed
trace = pm.sample(2000, tune=2000, chains=2)
pm.traceplot(trace, var_names=['alpha','beta','tau_obs'], compact=False);
#posterior means
np.mean(trace['alpha']),np.mean(trace['beta']), np.mean(trace['tau_obs'])
| content/lectures/lecture12/notebook/L3_2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Introdução - movimento Browniano geométrico
#
# Um processo estocástico $\{S_t\}_{t \geq 0}$ pode ser modelado como um mBg (movimento Browniano geométrico) se satisfazer a SDE:
#
# $$dS_t = \mu S_t dt + \sigma S_t dB_t$$
#
# Neste caso, a solução da SDE acima é dada por $S_t = S_0 e^{X(t)}$, onde $X(t) = \left(\mu - \frac{\sigma^2}{2} \right)t + \sigma B_t$ e $B_t$ é um movimento Browniano, ou seja, $X(t)$ é um movimento Browniano com drift $\left(\mu - \frac{\sigma^2}{2}\right)$.
#
# ## Propriedade
#
# $\quad$ i. $\lim_{t \to +\infty} \mathbb{P}\left(S_t = +\infty \mid \mu - \frac{\sigma^2}{2} > 0\right) = 1$.
#
#
# # Estratégia
#
# O backtest abaixo tem a seguinte ideia, mensalmente iremos estimar os parâmetros do modelo movimento Browniano geométrico e iremos rankear os ativos do maior para o menor com base na seguinte fórmula:
#
# $$\frac{\mu}{\sigma^2}$$
#
# A ideia é que queremos os ativos com maior drift e menor volatilidade e aproveitar-se da propriedade i citada anteriormente na introdução. Obsreve que $\mu$ e $\sigma$ são os parâmetros do modelo. Selecionaremos o top n ativos com maior ranking e aplicaremos a fronteira eficiente nesse conjunto, a estratégia fará o rebalanceamento dos ativos mensalmente.
# +
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from collections import OrderedDict
#library for filtering signals
import statsmodels.api as sm
#Numerical operations
import numpy as np
from numpy import log as ln
from math import floor, isnan
#Symbolic operations
import sympy as sy
from sympy import symbols, lambdify
from sympy.functions.elementary.exponential import exp
#Datetime operations
import datetime
#Import backtrader platform
import backtrader as bt
import backtrader.analyzers as btanalyzers
#Module to analyze portfolio
import pyfolio as pf
#Ploting library
import matplotlib.pyplot as plt
#operating system library
import os
#Optimizations
from scipy.optimize import minimize, Bounds, LinearConstraint
from scipy.stats import norm as N
#portfolio analyzer
import quantstats as qs
#data analysis library
import pandas as pd
#debugging library
from ipdb import set_trace
class Portfolio():
"""
Class portfolio, retrieve optimal portfolio weights based on Markowitz efficient frontier model.
Inputs:
μ - assets log return vector
Σ - assets variance-covariance matrix
minimize_method - method to optimize (std = standard deviation, neg_sharpe = negative sharpe)
long_only(True/False) - allows short position if False (not implemented)
"""
def __init__(self, μ, Σ, premiumRisk=0, minimize_method='std', long_only=True):
n = len(μ)
self.n = n
self.μ = μ
self.Σ = Σ
self.premiumRisk = 0
self.n_days = 1
self.minimize_method = minimize_method
self.premiumRisk = premiumRisk
self.set_long_only = long_only
def n_bound(self,n):
return [(0,None) for i in range(n)]
def std(self, w, μ, Σ):
return (np.dot(w.T, np.dot(Σ, w)))**0.5
def neg_sharpe(self, w, μ, Σ):
return -(np.dot(w.T,μ))/((np.dot(w.T, np.dot(Σ, w)))**0.5)
def optimize(self):
μ = self.μ
Σ = self.Σ
n = self.n
if self.minimize_method=='std':
f=self.std
if self.minimize_method=='neg_sharpe':
f=self.neg_sharpe
#Initial Guess
w = np.random.rand(n)
w /= np.sum(w)
#Model Constraints
bounds = self.n_bound(n)
constraints = LinearConstraint(np.ones(n), lb=1, ub=1)
#Optimization
optimization = minimize(f, w, args=(μ, Σ),
method='SLSQP',
bounds=bounds, constraints=constraints)
#Risk, Return Calculations
optW = optimization['x']
optReturn = np.dot(optW.T,self.μ)
optRisk = np.sqrt(np.dot(optW.T, np.dot(self.Σ, optW)))
self.optRisk = optRisk
self.optReturn = optReturn
return optW, optRisk, optReturn
def plot(self):
#Creation of a markowitz graph representation
n = self.n
PortfolioReturn = []
PortfolioRisk = []
for t in range(10000):
ω = np.random.rand(n)
ω /= (np.sum(ω)*1)
Return = np.dot(ω.T, self.μ)
Risk = np.sqrt(np.dot(ω.T, np.dot(self.Σ, ω)))
PortfolioReturn.append(Return)
PortfolioRisk.append(Risk)
fig, ax = plt.subplots(figsize=(16,10), facecolor=(0.75, 0.75, 0.75))
#plot
ax.plot(PortfolioRisk, PortfolioReturn, 'o', label='Portfolios')
ax.plot(self.optRisk, self.optReturn, 'bo', label='Optimal Point')
#aesthetics
ax.set_facecolor((0.1, 0.1, 0.1))
ax.grid(True)
ax.set_ylabel('Expected Return')
ax.set_xlabel('Standard Deviation')
ax.legend()
plt.show()
class gBm(bt.Indicator):
"""
Geometric Brownian Motion model.
dS_t = μ S_t dt + σ S_t dB_t
Crescimento Exponencial.
o modelo tem um fit melhor para growth stocks como AAPL, TSLA, MGLU3, PRIO3 etc i.e.
para ativos que tem um Crescimento Exponencial
"""
lines = ('m', 'σ', 'μ', 'var', 'sl')
params = dict(size=2**9+1,
sl_quantil=0.1,
sl_look_forward=5)
def __init__(self):
self.addminperiod(self.p.size)
def next(self):
self.S = self.data.close.get(0, size=self.p.size)
self.estimateParams()
self.lines.m[0] = self.m
self.lines.σ[0] = self.σ
self.lines.μ[0] = self.μ
self.lines.var[0] = self.Var(self.p.sl_look_forward)
self.lines.sl[0] = self.q(self.p.sl_quantil, self.p.sl_look_forward)
def estimateParams(self):
"""
Referência:
"Estimation of Geometric Brownian Motion Parameters for Oil Price Analysis" C. Jakob et al.
"""
S = self.S
X = np.diff(np.log(S), n=1)
m = X.mean() #mean
σ = X.std() #standard deviation
μ = m + ((σ**2)/2) #drift
n = len(S)
self.m = m
self.σ = σ
self.μ = μ
self.n = n
def E(self, t):
"""
Referência:
Ross, <NAME>. (2014). "Variations on Brownian Motion".
Introduction to Probability Models (11th ed.).
"""
S = self.S
S0 = S[-1]
μ = self.μ
return S0*np.exp(μ*t)
def Var(self, t):
"""
Referência:
Ross, <NAME>. (2014). "Variations on Brownian Motion".
Introduction to Probability Models (11th ed.).
"""
S = self.S
S0 = S[-1]
μ = self.μ
σ = self.σ
return (S0**2)*np.exp(2*μ*t)*(np.exp((σ**2)*t) - 1)
def q(self, p, t):
"""
quantil de St/S0 o qual é definido como:
q(p) = exp( (μ - σ**2/2)*t + σ*np.sqrt(t)*inv_Φ(p))
p ∈ (0,1)
"""
#assert p>0 and p<1
#assert type(t)==int
σ = self.σ
μ = self.μ
mean = (μ - (σ**2/2))*t
var = σ**2*t
return np.exp(mean + np.sqrt(var)*N.ppf(p, 0, 1))
class momentumStrat(bt.Strategy):
"""
strategy class
"""
params = dict(gBmSize=2**8+1, #number of observations to use in backtest to estimate parameters
enableLog=True, #enable log of buy and sell of assets
exposure=1, #exposure to the market
rebal_monthday=1, #which day in month to do the rebalancing
stocks_holding=10) #Max number of stocks holding
def __init__(self):
self.order = None #variable to track pending orders
acoes = self.datas
#construct line objects of gBm data
gBms = [gBm(d, size=self.p.gBmSize) for d in acoes]
driftAcoes = [gBm.μ for gBm in gBms]
volatAcoes = [gBm.σ for gBm in gBms]
#construct a line object of ranks
self.rank = {d:drift/volat for d, drift, volat in zip(acoes, driftAcoes, volatAcoes)}
self.driftAcoes = {d:drift for d, drift in zip(acoes, driftAcoes)}
self.volatAcoes = {d:volat for d, volat in zip(acoes, volatAcoes)}
#create a timer to execute the strategy montly
self.add_timer(
when=bt.Timer.SESSION_START,
monthdays=[self.p.rebal_monthday],
monthcarry=True # if a day isn't there, execute on the next
)
def notify_order(self, order):
'''Notify if an orde was executed/submited or not'''
if order.status in [order.Submitted, order.Accepted]:
# Buy/Sell order submitted/accepted to/by broker - Nothing to do
return
# Check if an order has been completed
# Attention: broker could reject order if not enough cash
if order.status in [order.Completed]:
if order.isbuy():
self.log(
'BUY EXECUTED, Price: %.2f, Cost: %.2f, Comm %.2f' %
(order.executed.price,
order.executed.value,
order.executed.comm))
self.buyprice = order.executed.price
self.buycomm = order.executed.comm
else: # Sell
self.log('SELL EXECUTED, Price: %.2f, Cost: %.2f, Comm %.2f' %
(order.executed.price,
order.executed.value,
order.executed.comm))
self.bar_executed = len(self)
elif order.status==order.Canceled:
self.log('Order Canceled. Price: {}'.format(order.price))
elif order.status==order.Margin:
self.log('Order Margin. Price: {}'.format(order.price))
elif order.status==order.Rejected:
self.log('Order Rejected. Price: {}'.format(order.price))
self.order = None
def log(self, txt, dt=None, doprint=False):
'''Logging function fot this strategy'''
if self.params.enableLog or doprint:
dt = dt or self.datas[0].datetime.date(0)
print('%s, %s' % (dt.isoformat(), txt))
def getPortfolioParameters(self, datas:list):
'''Get portfolio assets log returns vector and assets variance-covariance matrix'''
S = np.column_stack(tuple(np.array(d).T for d in map(lambda x: x.close.get(0, self.p.gBmSize), datas)))
S0 = S[0]
X = np.diff(np.log(np.divide(S,S0)), n=1, axis=0)
μ = X.mean(axis=0)
Σ = np.cov(X, rowvar=False)
n = len(datas)
return μ, Σ
def notify_timer(self, timer, when, *args, **kwargs):
'''this function is called monthly, it is our strategy logic'''
l = len(self)
if l < self.p.gBmSize or self.order:
return
rank = {d:rank for (d, rank) in self.rank.items() if d.close.get(0, self.p.gBmSize)}
ranks = sorted(
rank.items(), # get the (d, rank), pair
key=lambda x: x[1][0], # use rank (elem 1) and current time "0"
reverse=True, # highest ranked 1st ... please
)
top_ranks = ranks[:self.p.stocks_holding]
datas = [data for (data, rank) in top_ranks]
μ, Σ = self.getPortfolioParameters(datas)
portfolio = Portfolio(μ, Σ, minimize_method='neg_sharpe')
w, Risk, Return = portfolio.optimize()
#portfolio.plot()
opt_portfolio = [(data,weight) for (data,weight) in zip(datas,w)]
current_positions = [d for d, pos in self.getpositions().items() if pos]
#if we're not in market
if len(current_positions)==0:
for (d,weight) in opt_portfolio:
if weight!=0:
self.order = self.order_target_percent(d, target=weight)
self.log('Buying: {} /---/ Rank: {:.4f} /---/ Weight: {:.3f}'.format(d._name,
rank[d][0],
weight))
return
#if we're in market
else:
# remove those no longer top ranked
# do this first to issue sell orders and free cash
for d in (d for d in current_positions if d not in datas):
self.log('Closing: {} /---/ Rank: {:.4f}'.format(d._name, rank[d][0]))
self.order_target_percent(d, target=0.0)
# rebalance those already top ranked and still there
for d in (d for d in current_positions if d in datas):
i = datas.index(d)
self.log('Rebalancing: {} /---/ Rank: {:.4f} /---/ Weight: {:.3f}'.format(d._name,
rank[d][0],
w[i]))
self.order_target_percent(d, target=w[i])
del datas[i] # remove it, to simplify next iteration
w = np.delete(w, i)
# issue a target order for the newly top ranked stocks
# do this last, as this will generate buy orders consuming cash
for i,d in enumerate(datas):
if w[i]!=0:
self.log('Buying: {} /---/ Rank: {:.4f} /---/ Weight: {:.3f}'.format(d._name,
rank[d][0],
w[i]))
self.order_target_percent(d, target=w[i])
else:
continue
def stop(self):
self.log('Ending Value %.2f' %
(self.broker.getvalue()), doprint=True)
def runStrat(in_start, in_end, wf=False, best_set=None):
'''create cerebro instance, set cash, add data, add strategy and run it'''
cerebro = bt.Cerebro()
#Set cash value
cerebro.broker.set_cash(100000)
#Set commission modelo
cerebro.broker.setcommission(commission=0)
cerebro.broker.set_checksubmit(False)
data_font = 'data'
#Adding data
start=datetime.datetime.strptime(in_start, '%Y-%m-%d')
end=datetime.datetime.strptime(in_end, '%Y-%m-%d')
files = os.listdir(data_font)
for file in files:
#print(file)
datapath='{}//'.format(data_font)+file
data = bt.feeds.YahooFinanceCSVData(dataname=datapath,
fromdate=start,
todate=end,
reverse=False)
cerebro.adddata(data)
#Add analyzers
cerebro.addanalyzer(bt.analyzers.PyFolio, _name='pyfolio')
cerebro.addanalyzer(btanalyzers.SharpeRatio, _name='sharpe')
#Add Strategy
if wf:
cerebro.addstrategy(momentumStrat, wf=wf)
else:
cerebro.addstrategy(momentumStrat)
#Run Algo
results = cerebro.run(maxcpus=1)
#Plot results
#cerebro.plot()
return results
def analyzeStrat(results, live):
'''analyze strategy results'''
strat = results[0]
pyfoliozer = strat.analyzers.getbyname('pyfolio')
returns, positions, transactions, gross_lev = pyfoliozer.get_pf_items()
# extend pandas functionality with metrics, etc.
qs.extend_pandas()
returns.index = pd.to_datetime(returns.index.astype(str).str[:10])
qs.reports.full(returns, '^BVSP')
if __name__=='__main__':
live = '2021-01-01'
r = runStrat('2005-01-03', '2021-06-02')
analyzeStrat(r, live)
| Efficient_frontier/backtest/.ipynb_checkpoints/geometricBrownianMotion_Eff_frontier-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # MAT281 - Laboratorio N°04
#
# <a id='p1'></a>
#
# ## Problema 01
#
# En la carpeta data se encuentra el archivo `nba.db`, el cual muestra informacion básica de algunos jugadores de la NBA.
#
# <img src="https://encrypted-tbn0.gstatic.com/images?q=tbn:ANd9GcQHCVR_4gPOmYYGglSV0rT1WlJtgRGnIw6Z6G68PTY1r_Qunv-6C3BDIa3jnStoNvdgcEE&usqp=CAU" align="center"/>
#
#
# +
from sqlalchemy import create_engine
import pandas as pd
import os
# Crear conector
conn = create_engine(os.path.join('sqlite:///','data', 'nba.db'))
conn.table_names()
# -
# funcion de consultas
def consulta(query,conn):
return pd.read_sql_query(query, con=conn)
# ejemplo
query = """
SELECT * FROM player
"""
consulta(query,conn)
# El objetivo es tratar de obtener la mayor información posible de este conjunto de datos mediante código **SQL**. Para cumplir con este objetivo debe resolver las siguientes problemáticas:
# 1. Mostrar las primeras 5 filas
#muestra las primeras 5 filas
query = """
SELECT * FROM player LIMIT 5;
"""
consulta(query,conn)
# 2. Seleccionar los valores únicos de la columna `position`.
# muestra los valores unicos de la columna position
query = """
SELECT position
FROM player
GROUP BY position
"""
consulta(query,conn)
# 3. Seleccionar y cambiar el nombre de la columna `name` por `nombre`
# se selecciona la columna name y se muestra con el nombre "nombre"
query = """
SELECT name AS nombre
From player
"""
consulta(query,conn)
# 4. Determinar el tiempo (en años) de cada jugador en su posición
#se determina el tiempo en años de cada jugador en su posicion
query = """
SELECT name, position, year_end - year_start AS tiempo
FROM player
"""
consulta(query,conn)
# 5. Encontrar el valor máximo de la columna `weight` por cada valor de la columna `position`
# se busca el valor maximo de la columna weight para cada valor de la columna position
query = """
SELECT position, MAX(weight)
FROM player
GROUP BY position
"""
consulta(query,conn)
# 6. Encontrar el total de jugadores por cada valor de la columna `year_start`
#se encuentra el total de jugadores por cada año de inicio
query = """
SELECT year_start, COUNT(name)
FROM player
GROUP BY year_start
"""
consulta(query,conn)
# 7. Encontrar el valor mínimo, máximo y promedio de la columna `weight` por cada valor de la columnas `college`
# minimo de peso por college
query = """
SELECT college, MIN(weight)
FROM player
GROUP BY college
"""
consulta(query,conn)
# maximo de peso por college
query = """
SELECT college, MAX(weight)
FROM player
GROUP BY college
"""
consulta(query,conn)
# valor promedio de peso por college
query = """
SELECT college, AVG(weight)
FROM player
GROUP BY college
"""
consulta(query,conn)
# 8. Filtrar por aquellos jugadores que cumplan con :
# * Para la columna `year_start` tienen un valor mayor 1990 y menor a 2000
# * Para la columna `position` tienen un valor de `C`,`C-F` o `F-C`
# * Para la columna `college` tienen un valor distinto de `Duke University`
#
#se filtra por los años de inicio desde 1990 a 2000
query = """
SELECT *
FROM player
WHERE year_start BETWEEN 1990 AND 2000
"""
consulta(query,conn)
#se filtra por la posicion C C-F y F-C
query = """
SELECT *
FROM player
WHERE position = 'C' OR 'C-F' OR 'F-C'
"""
consulta(query,conn)
#se muestran todos los datos sin el college Duke University
query = """
SELECT *
FROM player
WHERE college <> 'Duke University'
"""
consulta(query,conn)
# 9. Crear dos conjuntos de datos y juntarlos en una misma *query*. Las condiciones de cada uno de los cojunto de datos son:
#
# * **df1**:
# * Para la columna `year_start` tienen un valor mayor 1990 y menor a 2000
# * Para la columna `position` tienen un valor de `C`,`C-F` o `F-C`
#
# * **df2**:
# * Para la columna `year_end` tienen un valor menor a 2000
# * Para la columna `position` tienen un valor de `G`o `F`
query ="""
SELECT *
FROM player
WHERE (year_start BETWEEN 1990 AND 2000) AND (position = 'C' OR 'C-F' OR 'F-C')
UNION
SELECT *
FROM player
WHERE (year_end < 2000) AND (position = 'G' OR 'F')
"""
consulta(query,conn)
| labs/lab_04.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# **JSON2RTF**
# + [markdown] tags=[]
# # Úvod
# Cílem aplikace je vytvořit z přijatých vstupních souborů (data v JSON a formulář v rtf) vyplněný výstupní soubor rtf. Příkladem praktického využití je situace, kdy máme v JSON uloženou databázi zaměstanců a chceme z ní vytisknout určitá data - např. seznam zaměstanců, kde u každého bude uvedeno jeho bydliště. Vytvoříme si tedy pomocí MS Office Word (nebo podobného editoru) formulář, který opatříme flagy, podle kterých bude formulář vyplněn.
# <br>Aplikace se skládá z knihovny JSON2RTF_lib a serveru JSON2RTF_server který implementuje JSON2RTF_lib. Server zajišťuje příjem vstupních souborů a odeslání výstupního souboru. Zároveň implementuje funkce pro zabezpečení pomocí tokenu. Knihovna pak slouží pro samotné zpracování přijatých souborů do výsledného vyplněného formuláře.
# <br>Pro demonstaraci funkčnosti obsahuje aplikace testovací stránku na platformě Swagger, které umožňuje nahrát vybrané soubory a zobrazit výstup.
# <br>Pro správnou funkčnost aplikce je nutné, aby uživatel byl poučený a veděl, jak správně napsat vstupní formulář - aplikace neobsahuje funkce pro kontrolu kompatibility rtf a JSON.
# -
# # Nasazení aplikace
# Aplikace je z důvodu zajištění kompatibility provozována v prostředí Docker. Obsahuje jak soubor DOCKERFILE pro vytvoření image, tak i docker-compose.yaml který slouží ke spuštení kontejneru.
# <br>Pro spuštění aplikace je nutné v adresáři s aplikací použít příkaz docker-compose up
# <br>UI aplikace je pak defaultně na localhost:80\docs
# <br>Aplikace je publikována pod MIT licencí na __[GitHub](https://github.com/MrM2266/JSON2RTF_Lib)__ a __[hub.docker.com](https://hub.docker.com/repository/docker/marek2266/json2rtf_server)__
# ## Docker
# Pro zajištění kompatibilty je nutné mít naistalovaný Docker a Docker Compose (na Windows oba součástí Docker Hub). Adresu a port, na kterém bude aplikace dostupná v rámci kontejneru, je možné nastavit v DOCKERFILE - obsahuje parametr pro uvicorn a FastAPI.
# <br>Adresu a port, na kterém bude aplikace dostupná z hostitelského pc (nebo v rámci sítě) je možné nastavit v docker-compose.yaml
# <br>Pro stažení z __[hub.docker.com](https://hub.docker.com/repository/docker/marek2266/json2rtf_server)__ je možné použít příkaz: docker pull marek2266/json2rtf_server:latest
# ## UI
# Součástí aplikace je jednoduché UI, které je ve výchozím nastavení dostupné na localhost:80/docs. Umožňuje nahrát vstupní soubory a zobrazit výstup. Jedná se o testovací prostředí vytvořené pomocí FastAPI a Swagger. Je zde také možné otestovat zabezpečení pomocí tokenu.<br>
# <br> Defaultní údaje:<br>Username: jirka<br>Login: secret
# # JSON file
# Podle IESG Standarts Track 2070-1721 RFC8259 může JSON soubor obsahovat pouze value. Value může být: number, string, boolean, array, object a null. Každý json soubor může obsahovat jen jednu value. Jednotlivé value je možné do sebe libovolně vnořovat.
# <br><br>**Pro správné zobrazení diakritiky je nutné json uložit s formátováním Windows-1250 (cp1250)**
# <br>
# <br>**array**
# <br>- je to seřazený soubor hodnot, značí se [ ]
# <br>- k hodnotám je možné přistoupit pomocí jejich indexu (key = index)
# <br>- v poli mohou být pouze hodnoty oddělené čárkou - tzn. number, string, boolean, array, object a null - [10, "text",true, [ ], { }, null] - prvky pole mají jen index
# <br>
# <br>**object**
# <br>- je to neuspořádaný soubor hodnot, značí se { }
# <br>- k hodnotám je možné přistoupit pomocí jejich key
# <br>- prvky jsou ve tvaru "key":"value" kde value je number, string, boolean, array, object nebo null
# <br>- prvky se oddělují čárkou
# <br>{
# <br> "number":8,
# <br> "string":"text",
# <br> "boolean":true,
# <br> "object":{},
# <br> "array":[],
# <br> "null":null
# <br>}
# <br>
# <br>**number**
# <br>- je to jakékoliv číslo
# <br>- př. 26
# <br>
# <br>**string**
# <br>- je to jakýkoliv text
# <br>- př. "text"
# <br>
# <br>**boolean**
# <br>- true nebo false
# <br>
# <br>**null**
# <br>- pouze null
# # Flagy v RTF
# Pro označení míst, která mají být ve výtupním rtf nahrazena daty z JSON je nutné použít flagy. Flagy se píší do vstupního formuláře. Cílem flagu je označit místo, které má být nahrazeno daty z JSON a popsat, kde se požadovaná data v JSON nacházejí. JSON soubor má strukturu úrovní - je možné vnořovat libovolně prvky do sebe. Flagy definují úroveň (level), ve které se hledaná data nacházejí.
# <br>
# <br>
# <br>Sytax flagů se řídí následujícími pravidly, která částečně vycházejí z pravidel pro JSON:
# 1. Nejvyšší úroveň json i rtf se nazývá root
# 1. Flagy se vždy píší do hranatých závorek ve tvaru [[flag:key]]
# 1. v rootu musí být pouze jedno pole nebo jeden objekt
# 1. součástí flagu je key, který obsahuje název konkrétní struktury v JSON např. [[A:pole]] - zde key = pole
# 1. prvky pole mají index - jako key se používá slovo null - př. [[O:null]][[E:null]] označuje objekt v poli
# 1. key musí být unikátní v rámci levelu
# 1. jako key v json nelze použít null
# 1. každá struktura kromě item musí mít end flag
# <br><br><br>Rozlišujeme následující flagy:
# <br>
# <br>**end**
# <br>[[E:key]]
# <br>- označuje konec struktury
# <br>
# <br>**object**
# <br>[[O:key]] - označuje začátek objektu key - program vyhledá v aktuální úrovni json souboru objekt auto a vstoupí do něj
# <br>[[E:key]] - označuje konec objektu key - program ukončí hledání v objektu auto a přesune se v json souboru o úroveň nahoru
# <br>- odpovídá hodnotě object v json
# <br>
# <br>**array**
# <br>[[A:key]] - označuje začátek pole key - program přejde z aktuálního levelu v json souboru do pole key
# <br>[[E:key]] - označuje konec pole key - program se přesune v json souboru o level nahoru
# <br>- Flag array je specifický tím, že kód, který je mezi start flagem [[A:lide]] a end flagem [[E:lide]] se provede pro každý prvek pole.
# <br>- Program spočítá počet prvků v json (např. pole lide obsahuje v json 5 objektů) a provede zadaný kód 5x -> kód mezi [[A:key]] a [[E:key]] se provede 5x po sobě, pokaždé pro jeden prvek pole key
# <br>- odpovídá hodnotě array v json
# <br>
# <br>**item**
# <br>[[I:key]]
# <br>- označuje místo, které se má nahradit z json konkrétními daty - program v json objektu najde položku s názvem key a místo [[I:key]] doplní data z json
# <br>- odpovídá hodnotám number, string, boolean, null v json
# <br>
# <br>
# <br>
# <br>Možné kombinace a jejich zápis pomocí flagů:
# <br>
# <br>**Root může obsahovat:**
# <br>[[A:root]] - nepojmenované pole v rootu; end flag [[E:root]]
# <br>[[O:root]] - nepojmenovaný objekt v rootu;end flag [[E:root]]
# <br>
# <br>**Array může obsahovat:**
# <br>[A:null]] - nepojmenované pole v poli; end flag [[E:null]]
# <br>[[O:null]] - nepojmenovaný objekt v poli; end flag [[E:null]]
# <br>[[I:index]] - index čísla, stringu nebo booleanu pro vypsání
# <br>
# <br>**Object může obsahovat:**
# <br>[[A:key]] - pro pole v objektu - musí být pojmenované (musí mít strukturu key:value); end flag [[E:key]]
# <br>[[O:key]] - pro objekt v objektu - musí bý pojmenovaný; end flag [[E:key]]
# <br>[[I:key]] - pro string, číslo, boolean v objektu - musí být pojmenovaný
# <br>
# <br>**Vypsání všech prvků pole:**
# <br>- v json máme pole, které obsahuje pouze stringy, nebo čísla - chceme celé pole vypsat
# <br>- pomocí flagů [[A:key]][[E:key]] spustíme na poli key fci ArrayAsString, která vypíše prvky pole jako stringy oddělené čárkami
# # Postup při vytváření šablony pro vyplnění
# 1. V MS Word vytvoříme šablonu - s formátováním, tabulkami atd. - uložíme jako rtf
# 1. Šablonu otevřeme v notepadu - dopíšeme flagy a uložíme jako rtf
# 1. odešleme spolu s příslušným JSON na server<br><br><br>
# # Popis funkcionality knihovny JSON2RTF_lib
# Tato kapitola se bude věnovat hlubšímu popisu knihovny. Bude ilustrovat jednotlivé funkce a popíše způsob, jakým jsou soubory zpracovány.
# ## Obecně
# Knihovna obsahuje dvě základní třídy CData a CjsonReader. Třída CData má dvě instance - input a output. Slouží pro uložení vstupního a výstupního rtf jako string. Třída má proměnnou m_data, která obsahuje rtf. Poskytuje základní metody - LoadData a Add. Díky nim umí input i output načíst data a přidat si do m_data část vygenerovaného rtf.
# <br>Třída CjsonReader zajišťuje "pohyb" v json souboru a čtení dat z něj. Poskytuje metody Down, Up a NextElement pro "pohyb" v levelech JSON a metody GetArraySize a GetRootSize pro zjištění velikosti polí. Metody GetArrayAsString a GetRootAsString vracejí prvky pole jako string. Metoda GetItem pak vrací data z JSON objektu.
# <br><br>Obecně knihovna funguje tak, že celý rtf soubor se předá do fce Process. Ta najde první start flag v řetezci input. Vše co je před start flagem přidá do output. K start flagu najde odpovídající end flag. Vše co je před end flagem odstarní z input (data jsou zpracována). Podle flagu (A, O, I) spustí odpovídající funkci - Array, Object, Item a předá jí vše, co je mezi start flagem a end flagem. Funkce spuštná podle flagu (Array, Object, Item) opět předává svou část do fce Process -> je možné jít do libovolné úrovně JSON - JSON se neustále "rozbaluje".
# <br>Funkce se takto volají neustále mezi sebou, až string m_data v input neobsahuje žádné znaky - výsledný rtf je zpracovaný.
# ## Hledání Flagů
# Chceme zpracovat rtf soubor uložený ve stringu inputRTF
inputRTF = "rtf kod se speciálními znaky ,.ů§/*86 [[O:auto]] kod objektu auto[[E:auto]] pokračování rtf kódu"
# <br>Nejprve si definujeme třídu CData, jejíž instance budou input a output - bude se starat o vstupní a výstupní rtf.
class CData:
def __init__(self):
self.m_data=""
def Add(self, add):
"""Gets a string that is added to m_data
Args:
add: value that is added to m_data
"""
self.m_data += str(add)
def LoadData(self, data):
"""Takes rtf file as a string and stores it into m_data
Args:
data: rtf as string - from fastAPI
"""
self.m_data = data
# <br>Vyhledáme první start flag - fce FlagStart vrací array ve tvaru [flag, key, počátek start flagu, konec start flagu]. String inpurRTF získáme např. z FastAPI
# +
import re
def FlagStart(data):
"""Finds the first flag in a string of data and finds out information
Information: What flag is it, it's key, beginning and end
Args:
data: String of data you want to find the information about
Returns:
[flag, key, match.start(), match.end()]: if the functions finds out the information
None: if the function doesn't find out any information
"""
match = re.search("\[{2}((A:[a-zA-Z0-9]+)|(O:[a-zA-Z0-9]+)|(I:[a-zA-Z0-9]+))\]{2}", data)
if match:
flag = data[match.start() + 2 : match.start() + 3]
key = data[match.start() + 4 : match.end() - 2]
return [flag, key, match.start(), match.end()]
else:
return None
input = CData()
output = CData()
input.LoadData(inputRTF)
outputStart = FlagStart(input.m_data)
print(f"Flag: {outputStart[0]}")
print(f"Key: {outputStart[1]}")
print(f"Pozice prvního znaku start flagu: {outputStart[2]}")
print(f"Pozice posledního znaku start flagu: {outputStart[3]}")
# -
# <br>Funkce vyhledala v řetězci první start flag a jako výstup předává všechny potřebné informace - key, pozice prvního a posledního znaku.
# <br>Pomocí funkce FlagEnd vyhledáme na zadaném řetězci end flag s konkrétním key
# +
def FlagEnd(data, key):
"""Returns the positon of the ending tag
Args:
data: String of data in which you want to find the position of the ending tag
key: Value of the ending tag
Returns:
Positon of the ending tag
Beginning and ending of the tag
"""
if (key == "null"):
list = []
start=[]
end=[]
for match in re.finditer("\[{2}(A|O):null\]{2}", data):
s = match.start()
list.append(s)
start.append(s)
for match in re.finditer("\[{2}E:null\]{2}", data):
s = match.start()
list.append(s)
end.append(s)
list.sort()
count=0
for i in list:
if i in start:
count += 1
if i in end:
count -= 1
if (count == 0):
return [i,i+10]
else:
str = "[[E:" + key + "]]"
return [data.find(str), data.find(str) + len(str)]
outputEnd = FlagEnd(inputRTF, outputStart[1])
print(f"Pozice prvního znaku end flagu: {outputEnd[0]}")
print(f"Pozice posledního znaku end flagu: {outputEnd[1]}")
# -
# <br><br>Výstup z obou funkcí používá funkce Process, která předá část vstupního rtf do výstupu, odstraní flagy a kód mezi nimi předá do příslušné funkce. Funkce jsou uvedené v modifikované podobě - pouze zobrazují data, která přijímají jako parametry.
# +
def Process(data):
"""Takes string data and finds the first tag from the beginning (start tag) and corresponding end tag
Text that is before start flag is added to output
Text that is after end flag is left for another loop
The code between the start flag and the end flag is passed into Process function again
Args:
data: string to process
Returns:
str: text that is after the end flag - code for another loop
"""
start = FlagStart(data)
if start != None:
output.Add(data[0:start[2]])
if start[0] != "I":
end = FlagEnd(data, start[1])
if start[0] == "A":
Array(data[start[3]:end[0]], start[1])
if start[0] == "O":
Object(data[start[3]:end[0]], start[1])
return data[end[1]:]
else:
Item(start[1])
return data[start[3]:]
else:
output.Add(data)
return ""
def Array(data, key):
print("Funkce Array\n=============================================")
print(f"Key k vyhledání v json: {key}")
print(f"Kód pole: {data}")
def Object(data, key):
print("Funkce Object\n=============================================")
print(f"Key k vyhledání v json: {key}")
print(f"Kód objektu: {data}")
def Item(key):
print("Funkce Key\n=============================================")
print(f"Key k vyhledání v json: {key}")
input.m_data = Process(input.m_data)
# -
# <br>V tuto chvíli jsme schopni dekódovat první úroveň flagů. Funkce Array a Object ve skutečnosti nezobrazují výstup, ale znovu svůj kód předají do funkce Process - v kódu např. objektu dojde k vyhledání dalších struktur a spuštění příslušných funkcí - v objektu je např. pole -> funkce Process spustí funkci Array - ta opět provede analýzu svého kódu pomocí Process -> kód může jít do libovolné úrovně.
# <br>V tomto příkladu popíšeme pouze práci s první úrovní - inputRTF obsahuje pouze jednu dvojici start flag - end flag<br><br>
# ## Vyhledávání v JSON
# Budeme ilustrovat funkci třídy CjsonReader na stringu inputJson, který obsahuje vstupní JSON soubor (např. z FastAPI)
inputJson = """
{
"states": [
{
"name": "Alabama",
"abbreviation": "AL",
"areaCodes": [ "205", "251", "256", "334", "938" ]
},
{
"name": "Alaska",
"abbreviation": "AK",
"areaCodes": [ "907" ]
},
{
"name": "Arizona",
"abbreviation": "AZ",
"areaCodes": [ "480", "520", "602", "623", "928" ]
},
{
"name": "Arkansas",
"abbreviation": "AR",
"areaCodes": [ "479", "501", "870" ]
}]
}
"""
# Definujeme si třídu CjsonReader, který bude zajišťovat práci s JSON souborem a vyhledávání v něm.
# +
import json
class CjsonReader:
def __init__(self):
self.m_levels = [] #list of individual levels, current is always the last item of the array
self.m_level = 0
self.m_indexes = [0] #list of indexes (index 0 contains value 2 -> I'am on an index 2 on level 0
def LoadData(self, jsonString, decode):
"""Takes JSON as bytes. If decode is 1 JSON is decoded using ANSI and then stored into m_levels.
Args:
jsonString: jsonFile as bytes - from fastAPI
decode: parameter; if 1 -> string is decoded using cp1250; if 0 -> string is stored without decoding
"""
if (decode == 1):
self.m_levels.append(json.loads(str(jsonString, 'cp1250')))
else:
self.m_levels.append(json.loads(jsonString))
def Down(self, key):
"""Gets you down a level to entered key or index
Args:
key: Index or key you want to step down to
"""
self.m_levels.append(self.m_levels[self.m_level][key])
self.m_indexes.append(0)
self.m_level += 1
def Up(self):
"""Gets you up a level
"""
if self.m_level > 0:
del self.m_levels[-1]
del self.m_indexes[-1]
self.m_level -= 1
def GetItem(self, key):
"""Returns string from JSON dictionary
Args:
key: JSON dictionary key to find desired string
Returns:
str: string found under parameter key
"""
return self.m_levels[self.m_level][key]
def GetArraySize(self, key):
"""Returns the size of an array
Args:
key: Key of an array you want to get the size of
Returns:
int: Size of an array
"""
return len(self.m_levels[self.m_level][key])
def GetRootSize(self):
"""Returns the size of root array
Returns:
int: Size of root
"""
return len(self.m_levels[0])
def GetArrayAsStr(self, key):
"""Returns array as one string - elements are separated with ,
Args:
key: Key of an array you want to get as a string
Returns:
int: Size of anarray
"""
result = ""
for i in self.m_levels[self.m_level][key]:
result = result + str(i) + ", "
return result[:-2]
def GetRootAsStr(self):
"""Returns root array as one string - elements are separated with ,
Args:
key: Key of an array you want to get as a string
Returns:
int: Size of root
"""
result = ""
for i in self.m_levels[0]:
result = result + str(i) + ", "
return result[:-2]
def NextElement(self):
"""Moves you one item forward in current level (only in array)
"""
self.m_indexes[self.m_level] += 1
def GetIndex(self):
"""Returns index of processing element on current level
Returns:
int: index of an element
"""
return self.m_indexes[self.m_level]
# -
# V následujícím kódu si vytvoříme instanci třídy CjsonReader a pomocí jejích metod budeme zobrazovat data z inputJson.
# +
jsonData = CjsonReader()
jsonData.LoadData(inputJson, 0)
jsonData.Down("states") ##v souboru je objekt, který obsahuje pole states; z root objektu vstoupíme do states
jsonData.Down(0) ##v poli states vstoupíme do prvku 0
print("Data z prvku 0\n==================================")
print(jsonData.GetItem("name")) ##vypíšeme položku name státu s indexem 0
print(jsonData.GetItem("abbreviation"))
print(jsonData.GetArrayAsStr("areaCodes")) ##vrací pole jako string
jsonData.Up() ##vrátíme se o úroveň nahoru - z prvku 0 do pole states
jsonData.Down(1) ##vstoupíme do prvku 1
print("\n\nData z prvku 1\n==================================")
print(jsonData.GetItem("name")) ##vypíšeme položku name státu s indexem 0
print(jsonData.GetItem("abbreviation"))
print(jsonData.GetArrayAsStr("areaCodes")) ##vrací pole jako string
jsonData.Up() ##vrátíme se o úroveň nahoru - z prvku 1 do pole states
jsonData.Down(2) ##vstoupíme do prvku 2
print("\n\nData z prvku 2\n==================================")
print(jsonData.GetItem("name")) ##vypíšeme položku name státu s indexem 0
print(jsonData.GetItem("abbreviation"))
print(jsonData.GetArrayAsStr("areaCodes")) ##vrací pole jako string
jsonData.Up() ##jsme v poli states
print("\n\nSeznam států v json\n==================================")
jsonData.Up()
pocet = jsonData.GetArraySize("states") ##vrací velikost pole states
jsonData.Down("states")
for i in range(0,pocet):
jsonData.Down(i)
print(jsonData.GetItem("name"))
jsonData.Up()
# -
# <br><br>
# ## Generování výstupu
# Knihovna JSON2RTF_lib propojuje obě třídy dohromady a podle toho, jak čte rtf flagy, tak provádí čtení z JSON souboru. V následujícím kódu bude ukázána funkce celé knihovny. Na vstupu budeme mít dva stringy - jeden obsahuje data z vdtupního rtf formuláře a druhý obsahuje data z JSON.
# <br>V kódu bude použita hotová knihovna JSON2RTF_lib.py tak, jak jí používá JSON2RTF_server.
# +
# %reset -f
inputJson = """
{
"states": [
{
"name": "Alabama",
"abbreviation": "AL",
"areaCodes": [ "205", "251", "256", "334", "938" ]
},
{
"name": "Alaska",
"abbreviation": "AK",
"areaCodes": [ "907" ]
},
{
"name": "Arizona",
"abbreviation": "AZ",
"areaCodes": [ "480", "520", "602", "623", "928" ]
},
{
"name": "Arkansas",
"abbreviation": "AR",
"areaCodes": [ "479", "501", "870" ]
}]
}
"""
# -
inputRTF = "[[O:root]]Seznam statu \n================\n[[A:states]][[O:null]]Stat: [[I:name]]\n[[E:null]][[E:states]][[E:root]]"
#inputRTF = "[[O:root]]Seznam statu \n================\n\n[[A:states]][[O:null]]Stat: [[I:name]]\nKod: [[I:abbreviation]]\nOblasti: [[A:areaCodes]][[E:areaCodes]]\n\n[[E:null]][[E:states]][[E:root]]"
# +
import JSON2RTF_lib as RTF
RTF.Init()
RTF.LoadJson(inputJson, 0)
RTF.LoadRTF(inputRTF)
RTF.ProcessRTF()
outputRTF = RTF.GetOutput()
print(outputRTF)
| JSON2RTF.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="RmU3VFldxvKl" colab_type="code" colab={}
import pandas as pd
import numpy as np
from sklearn.tree import DecisionTreeRegressor
from sklearn.metrics import mean_absolute_error
from sklearn.model_selection import cross_val_score
# + id="X7rO4QY7yIzi" colab_type="code" outputId="1849d289-31a8-4385-b1a9-dbda115770d2" colab={"base_uri": "https://localhost:8080/", "height": 34}
# cd "/content/drive/My Drive/Colab Notebooks/dataworkshop_matrix"
# + id="aibSNh0jym8f" colab_type="code" outputId="751af51a-efab-48f9-8018-49417b03088e" colab={"base_uri": "https://localhost:8080/", "height": 34}
# cd ../
# + id="7kvSipZfy4LR" colab_type="code" outputId="bcbd7eaa-76e2-4d04-8def-de9d080723c5" colab={"base_uri": "https://localhost:8080/", "height": 34}
df = pd.read_csv('data/men_shoes.csv', low_memory=False)
df.shape
# + id="MgH_XdhHzDW7" colab_type="code" outputId="152eef94-a127-4b5f-8be2-958132a2e56d" colab={"base_uri": "https://localhost:8080/", "height": 221}
df.columns
# + id="SiWTbKBwzPmM" colab_type="code" outputId="a9af17c8-3f89-44dc-edc6-e633f6de2f2b" colab={"base_uri": "https://localhost:8080/", "height": 34}
# avarage price of one single shoe
mean_price = np.mean( df['prices_amountmin'] )
mean_price
# + id="0mMNMc9bzfK7" colab_type="code" outputId="52cb6603-2ebd-4217-ecc3-66cd65e8a77b" colab={"base_uri": "https://localhost:8080/", "height": 34}
[4] * 10
# + id="_rnC6J_fz1yb" colab_type="code" outputId="8201f4e0-bf90-4166-ca5d-2278c67a3dbc" colab={"base_uri": "https://localhost:8080/", "height": 34}
y_true = df['prices_amountmin']
y_true.shape[0]
# + id="XHai6C6pz8qF" colab_type="code" colab={}
y_pred = [mean_price] * y_true.shape[0]
# + id="n2NkdBhg0Ejt" colab_type="code" outputId="240fe488-dedf-4010-eec8-c5ab3b307fa4" colab={"base_uri": "https://localhost:8080/", "height": 34}
mean_absolute_error(y_true, y_pred)
# + id="fpk3X5Wh0O1l" colab_type="code" outputId="55b8ee66-8e5b-4a5e-dffa-89b75aa06a05" colab={"base_uri": "https://localhost:8080/", "height": 282}
df['prices_amountmin'].hist(bins=100)
# + id="5mRA37qQ0fT2" colab_type="code" outputId="15bcc678-79a5-44dd-9b82-8521296016b4" colab={"base_uri": "https://localhost:8080/", "height": 282}
np.log(df['prices_amountmin'] + 1).hist(bins=100)
# + id="L3Ihe2bV01Sl" colab_type="code" colab={}
y_true = df['prices_amountmin']
y_pred = [np.median(y_true)] * y_true.shape[0]
# + id="zgazQWHa1Ad3" colab_type="code" outputId="194c6875-e571-46f7-8863-9ba42964fae1" colab={"base_uri": "https://localhost:8080/", "height": 34}
mean_absolute_error(y_true, y_pred)
# + id="DqiNdlm61E69" colab_type="code" outputId="942eea82-fd27-4dca-bb1b-a74e4c7e5d03" colab={"base_uri": "https://localhost:8080/", "height": 34}
# 50% of shoes cost median value (64.95) or less
np.median(y_true)
# + id="HNQgwAQS1M3N" colab_type="code" outputId="de82b564-cbf9-4ef8-a9e1-7e65ab7d0132" colab={"base_uri": "https://localhost:8080/", "height": 34}
# log-transformation
price_log_mean = np.mean( np.log1p(y_true) )
price_log_mean
# + id="aY09jmMN1zN_" colab_type="code" outputId="ac01b67d-688a-4e4f-a519-0f2e2e35e5c0" colab={"base_uri": "https://localhost:8080/", "height": 34}
np.exp( np.mean( np.log1p(y_true) ) ) - 1
# + id="Rj4R8vkG2D6k" colab_type="code" outputId="1dcfe2e2-4dd0-46e3-b61f-b38f3c615473" colab={"base_uri": "https://localhost:8080/", "height": 34}
price_log_mean = np.exp( np.mean( np.log1p(y_true) ) ) - 1
y_pred = [price_log_mean] * y_true.shape[0]
mean_absolute_error(y_true, y_pred)
# + id="wK3kHiIH2KvZ" colab_type="code" outputId="1202e88d-1b45-4b80-c814-2dbd34baaf7b" colab={"base_uri": "https://localhost:8080/", "height": 221}
df.columns
# + id="VOJd_oBw2jK-" colab_type="code" outputId="183dc429-d312-459c-e091-d7c787738024" colab={"base_uri": "https://localhost:8080/", "height": 221}
df.brand.value_counts()
# + id="hxK4A49H2pGG" colab_type="code" colab={}
# Associating data with unique IDs
df['brand_cat'] = df['brand'].factorize()[0]
# + id="YfshTgWN3OCH" colab_type="code" outputId="a24c96ec-a5c1-41e7-ba15-b1f863f0e131" colab={"base_uri": "https://localhost:8080/", "height": 34}
features = ['brand_cat']
X = df[ features ].values
y = df['prices_amountmin'].values
model = DecisionTreeRegressor(max_depth=5)
scores = cross_val_score(model, X, y, scoring='neg_mean_absolute_error')
np.mean(scores), np.std(scores)
# + id="kpyKwKeC4HN1" colab_type="code" colab={}
def run_model(features):
X = df[ features ].values
y = df['prices_amountmin'].values
model = DecisionTreeRegressor(max_depth=5)
scores = cross_val_score(model, X, y, scoring='neg_mean_absolute_error')
return np.mean(scores), np.std(scores)
# + id="TFJ_kRjK4jQ9" colab_type="code" outputId="52ee27e3-1908-487f-b427-e28906079e91" colab={"base_uri": "https://localhost:8080/", "height": 34}
run_model(['brand_cat'])
# + id="zT7ZzHYv4lIS" colab_type="code" colab={}
df['manufac_cat'] = df['manufacturer'].factorize()[0]
# + id="UO6M-tUv4zQH" colab_type="code" outputId="519b3666-175f-45c5-816e-b5ec453cf3ba" colab={"base_uri": "https://localhost:8080/", "height": 34}
run_model(['manufac_cat'])
# + id="p4x_9DyV42oD" colab_type="code" outputId="c1d3ec2c-7480-4c60-c3be-488baf955d8f" colab={"base_uri": "https://localhost:8080/", "height": 34}
run_model(['manufac_cat', 'brand_cat'])
| Matrix/ML_model_day4.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Day 2
# --- Day 2: Dive! ---
# Now, you need to figure out how to pilot this thing.
#
# It seems like the submarine can take a series of commands like forward 1, down 2, or up 3:
#
# - forward X increases the horizontal position by X units.
# - down X increases the depth by X units.
# - up X decreases the depth by X units.
# Note that since you're on a submarine, down and up affect your depth, and so they have the opposite result of what you might expect.
#
# The submarine seems to already have a planned course (your puzzle input). You should probably figure out where it's going. For example:
# ```
# forward 5
# down 5
# forward 8
# up 3
# down 8
# forward 2
# ```
# Your horizontal position and depth both start at 0. The steps above would then modify them as follows:
#
# - forward 5 adds 5 to your horizontal position, a total of 5.
# - down 5 adds 5 to your depth, resulting in a value of 5.
# - forward 8 adds 8 to your horizontal position, a total of 13.
# - up 3 decreases your depth by 3, resulting in a value of 2.
# - down 8 adds 8 to your depth, resulting in a value of 10.
# - forward 2 adds 2 to your horizontal position, a total of 15.
# After following these instructions, you would have a horizontal position of 15 and a depth of 10. (Multiplying these together produces 150.)
#
# Calculate the horizontal position and depth you would have after following the planned course. What do you get if you multiply your final horizontal position by your final depth?
with open ('input_day2.txt') as f:
commands = f.read().split('\n')
print(len(commands))
commands[-1]
value_hor = 0
value_depth = 0
for c in commands:
if c == '':
break
else:
direction, value = c.split(' ')
if direction == 'forward':
value_hor = value_hor + int(value)
elif direction == 'down':
value_depth = value_depth + int(value)
elif direction == 'up':
value_depth = value_depth - int(value)
print(str(value_hor), str(value_depth))
result = value_hor * value_depth
result
# --- Part Two ---
# Based on your calculations, the planned course doesn't seem to make any sense. You find the submarine manual and discover that the process is actually slightly more complicated.
#
# In addition to horizontal position and depth, you'll also need to track a third value, aim, which also starts at 0. The commands also mean something entirely different than you first thought:
#
# - down X increases your aim by X units.
# - up X decreases your aim by X units.
# - forward X does two things:
# - It increases your horizontal position by X units.
# - It increases your depth by your aim multiplied by X.
#
# Again note that since you're on a submarine, down and up do the opposite of what you might expect: "down" means aiming in the positive direction.
#
# Now, the above example does something different:
#
# forward 5 adds 5 to your horizontal position, a total of 5. Because your aim is 0, your depth does not change.
# down 5 adds 5 to your aim, resulting in a value of 5.
# forward 8 adds 8 to your horizontal position, a total of 13. Because your aim is 5, your depth increases by `8*5=40`.
# up 3 decreases your aim by 3, resulting in a value of 2.
# down 8 adds 8 to your aim, resulting in a value of 10.
# forward 2 adds 2 to your horizontal position, a total of 15. Because your aim is 10, your depth increases by `2*10=20` to a total of 60.
# After following these new instructions, you would have a horizontal position of 15 and a depth of 60. (Multiplying these produces 900.)
#
# Using this new interpretation of the commands, calculate the horizontal position and depth you would have after following the planned course. What do you get if you multiply your final horizontal position by your final depth?
#
#
value_hor = 0
value_depth = 0
value_aim = 0
for c in commands:
if c == '':
break
else:
direction, value = c.split(' ')
if direction == 'forward':
value_hor = value_hor + int(value)
value_depth = value_depth + (value_aim * int(value))
elif direction == 'down':
value_aim = value_aim + int(value)
elif direction == 'up':
value_aim = value_aim - int(value)
print(str(value_hor), str(value_depth))
result = value_hor * value_depth
result
| 2021/monica/Day2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: complex-systems
# language: python
# name: complex-systems
# ---
# # Logistic map
#
# We define the logistic map as,
# $$
# x_{n+1}=rx_n(1-x_n).
# $$
# +
from plotly import offline as py
from plotly import graph_objs as go
from dynamics import logistic_map
from dynamics import correlation_sum_1d
py.init_notebook_mode(connected=True)
# -
# ## Correlation dimension
#
# In the present notebook we are going to estimate the correlation dimension of the logistic map at the onset of chaos at $r\approx3.5699456$.
r = 3.5699456
x = logistic_map(0.1, r, 5000)
eps = np.linspace(1e-4, 1e-1, 1000)
cor = np.array([correlation_sum_1d(x, e) for e in eps])
# +
logeps = np.log(eps)
logcor = np.log(cor)
p = np.polyfit(logeps, logcor, 1)
logfit = p[1] + p[0]*logeps
# +
data = [
go.Scatter(x=logeps, y=logcor, name='data'),
go.Scatter(x=logeps, y=logfit, name='fit'),
]
figure = go.Figure(data=data, layout=go.Layout(
showlegend=True,
xaxis=dict(title='log(ε)'),
yaxis=dict(title='log(C(ε))')
))
py.iplot(figure)
# -
p[0]
# The slope of the fit gives us our correlation dimension $d\approx 0.5$.
| notebooks/logistic/Correlation Dimension.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Error Analysis
#
# Find localisation error using control lines
#
# The strategy is the following:
#
# - Register points across channels before and after chromatic aberration correction
# - Do the step above using multiple distance cutoff on registration
# - Error will be given by the standard deviation of the distances after correction
# +
import glob
import sys
import os
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import re
import scipy
import scipy.optimize
sys.path.append("../source/")
from dual_channel_analysis.utils import (
chromatic_aberration_correction,
compute_affine_transform,
)
from IPython.display import Markdown as md
matplotlib.use("Agg")
# %matplotlib inline
# +
def register_points_using_euclidean_distance(
reference: pd.DataFrame, moving: pd.DataFrame, distance_cutoff: float = 0.1
):
"""Given dataframe containing reference and moving coordinates, get the two sets of matched points"""
cdist = scipy.spatial.distance.cdist(reference, moving, metric="euclidean")
rows, cols = scipy.optimize.linear_sum_assignment(cdist)
for r, c in zip(rows, cols):
if cdist[r, c] > distance_cutoff:
rows = rows[rows != r]
cols = cols[cols != c]
reference = np.array([reference[i] for i in rows])
moving = np.array([moving[i] for i in cols])
return reference, moving
def calculate_deviation_registred_points(channel1_files, channel2_files, path_beads):
res = pd.DataFrame()
for dist_cutoff in [0.08, 0.09, 0.1, 0.2, 0.3, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]:
channel1_coords = []
channel2_coords = []
channel1_coords_after_correction = []
channel2_coords_after_correction = []
for idx in range(len(channel1_files)):
channel1 = pd.read_csv(channel1_files[idx])
channel2 = pd.read_csv(channel2_files[idx])
(
channel1_coords_curr,
channel2_coords_curr,
) = register_points_using_euclidean_distance(
reference=channel1[[X, Y, Z]].values,
moving=channel2[[X, Y, Z]].values,
distance_cutoff=dist_cutoff,
)
(
channel2_coords_after_correction_curr,
sx,
sy,
sz,
) = chromatic_aberration_correction(
directory=path_beads,
coords=channel2[[X, Y, Z]].values,
channel_to_correct=2,
distance_cutoff=0.1,
quality=f"{outdir}/chromatic_aberration_correction_quality.pdf",
)
(
channel1_coords_after_correction_curr,
channel2_coords_after_correction_curr,
) = register_points_using_euclidean_distance(
reference=channel1[[X, Y, Z]].values,
moving=channel2_coords_after_correction_curr,
distance_cutoff=dist_cutoff,
)
if len(channel1_coords_curr):
channel1_coords.append(channel1_coords_curr)
if len(channel2_coords_curr):
channel2_coords.append(channel2_coords_curr)
if len(channel1_coords_after_correction_curr):
channel1_coords_after_correction.append(
channel1_coords_after_correction_curr
)
if len(channel2_coords_after_correction_curr):
channel2_coords_after_correction.append(
channel2_coords_after_correction_curr
)
if len(channel1_coords) == 0 and len(channel1_coords_after_correction) == 0:
continue
channel1_coords = np.concatenate(channel1_coords)
channel2_coords = np.concatenate(channel2_coords)
channel1_coords_after_correction = np.concatenate(
channel1_coords_after_correction
)
channel2_coords_after_correction = np.concatenate(
channel2_coords_after_correction
)
t, A = compute_affine_transform(channel1_coords_after_correction, channel2_coords_after_correction)
channel2_coords_after_correction = np.transpose(np.dot(A, channel2_coords_after_correction.T)) + t
dist1 = np.sqrt(
np.sum(
np.square(
channel1_coords_after_correction - channel2_coords_after_correction
),
axis=1,
)
)
dist2 = np.sqrt(
np.sum(
np.square(channel1_coords - channel2_coords),
axis=1,
)
)
tmp = pd.DataFrame(dist1, columns=["distance_after"])
tmp["dist_cutoff"] = dist_cutoff
axis = ["x", "y", "z"]
for i in range(len(axis)):
diff1 = (
channel1_coords_after_correction[..., i]
- channel2_coords_after_correction[..., i]
)
diff2 = channel1_coords[..., i] - channel2_coords[..., i]
tmp[f"{axis[i]}_after"] = diff1
# tmp[f"{axis[i]}_before"] = diff2
res = pd.concat([res, tmp])
return res
def plotres(res):
fig, ax = plt.subplots(3, 4, figsize=(15, 15))
axis = ["x", "y", "z"]
i = 0
selection = res.axis == axis[i]
ax[0, i].plot(res.dist_cutoff[selection], res.number_points_before[selection], "-o")
ax[0, i].set_ylabel("Number of registred points")
for i in range(len(axis)):
selection = res.axis == axis[i]
ax[1, i].axhline(y=0, color="r", linestyle="-")
ax[1, i].plot(res.dist_cutoff[selection], res.mean_before[selection], "-o")
ax[1, i].plot(res.dist_cutoff[selection], res.mean_after[selection], "-o")
ax[1, i].legend(["Before correction", "After correction"])
if i == 0:
ax[1, i].set_ylabel("Average distance between registred spots")
ax[1, i].set_title(f"Along {axis[i]}")
ax[1, 3].plot(res.dist_cutoff[selection], res.mean_before_distance[selection], "-o")
ax[1, 3].plot(res.dist_cutoff[selection], res.mean_after_distance[selection], "-o")
ax[1, 3].legend(["Before correction", "After correction"])
ax[1, 3].set_title(f"radial distance")
for i in range(len(axis)):
selection = res.axis == axis[i]
ax[2, i].plot(res.dist_cutoff[selection], res.sd_before[selection], "-o")
ax[2, i].plot(res.dist_cutoff[selection], res.sd_after[selection], "-o")
ax[2, i].legend(["Before correction", "After correction"])
ax[2, i].set_xlabel("Distance cutoff for points registration")
if i == 0:
ax[2, i].set_ylabel("STD of distance between registred spots")
plt.suptitle(f"{outdir}", fontsize=20)
ax[2, 3].plot(res.dist_cutoff[selection], res.sd_before_distance[selection], "-o")
ax[2, 3].plot(res.dist_cutoff[selection], res.sd_after_distance[selection], "-o")
ax[2, 3].legend(["Before correction", "After correction"])
ax[2, 3].set_xlabel("Distance cutoff for points registration")
plt.savefig(f"{outdir}/{outname}.error_analysis.pdf")
plt.show()
# -
TRACKID = "track"
X = "x"
Y = "y"
Z = "z"
FRAME = "frame"
CELLID = "cell"
basedir = "/tungstenfs/scratch/ggiorget/zhan/2021/1105_pia_image_analysis/3d_prediction/two_colours_lines/control_lines_new/"
# +
res = pd.DataFrame()
outdir = f"{basedir}/"
outname = "alldatapooled"
for sample in glob.glob(f"{basedir}/*/"):
path_beads = f"{sample}/beads/um_based/"
path_images_to_correct = f"{sample}/um_based/"
channel1_files = sorted(glob.glob(f"{path_images_to_correct}/*w1*csv"))
names = [re.search(r"(^.*)w1", os.path.basename(x))[1] for x in channel1_files]
channel2_files = [
glob.glob(f"{path_images_to_correct}/{name}*w2*csv")[0] for name in names
]
tmp = calculate_deviation_registred_points(channel1_files, channel2_files, path_beads)
tmp["exp"] = sample
res = pd.concat([tmp, res])
# plotres(res0)
# md(
# f"Ideal cutoff point registration cutoff seems to be 0.3um. This leads to the following offset {res0[round(res0.dist_cutoff, 1)==0.3]['mean_after_distance'].values[0]} +/- {res0[round(res0.dist_cutoff, 1)==0.3]['sd_after_distance'].values[0]}"
# )
# +
fig, ax = plt.subplots(1,3, figsize=(20,5))
ax[0].plot(res.groupby("dist_cutoff").mean()["distance_after"], "--o")
ax[0].set_xlabel("dist_cutoff for point registration")
ax[0].set_ylabel("Average distance resolution limit")
ax[1].plot(res.groupby("dist_cutoff").std()["distance_after"], "--o")
ax[1].set_xlabel("dist_cutoff for point registration")
ax[1].set_ylabel("STD distance resolution limit")
ax[2].plot(res.groupby("dist_cutoff")["distance_after"].apply(lambda x: len(x)), "--o")
ax[2].set_xlabel("dist_cutoff for point registration")
ax[2].set_ylabel("Number of registred spots")
# -
print(np.mean(res[res['dist_cutoff'] == 0.3]['distance_after']), "+/-", np.std(res[res['dist_cutoff'] == 0.3]['distance_after']))
| notebooks/211215_Error_analysis_two_colors_imaging.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 正确重载运算符
# > 有些事情让我不安,比如运算符重载。我决定不支持运算符重载,这完全是个个人选择,因为我见过太多 C++ 程序员滥用它。
# > ——<NAME>, Java 之父
#
# 本章讨论的内容是:
# * Python 如何处理中缀运算符(如 `+` 和 `|`)中不同类型的操作数
# * 使用鸭子类型或显式类型检查处理不同类型的操作数
# * 众多比较运算符(如 `==`、`>`、`<=` 等等)的特殊行为
# * 增量赋值运算符(如 `+=`)的默认处理方式和重载方式
# 重载运算符,如果使用得当,可以让代码更易于阅读和编写。
# Python 出于灵活性、可用性和安全性方面的平衡考虑,对运算符重载做了一些限制:
# * 不能重载内置类型的运算符
# * 不能新建运算符
# * 计算符 `is`、`and`、`or`、`not` 不能重载
#
# Python 算数运算符对应的魔术方法可以见[这里](https://docs.python.org/3/reference/datamodel.html#emulating-numeric-types)。
#
# 一个小知识:二元运算符 `+` 和 `-` 对应的魔术方法是 `__add__` 和 `__sub__`,而一元运算符 `+` 和 `-` 对应的魔术方法是 `__pos__` 和 `__neg__`.
# ## 反向运算符
# > 为了支持涉及不同类型的运算,Python 为中缀运算符特殊方法提供了特殊的分派机制。对表达式 `a + b` 来说,解释器会执行以下几步操作。
# > 1. 如果 a 有 `__add__` 方法,而且返回值不是 `NotImplemented`,调用 `a.__add__`,然后返回结果。
# > 2. 如果 a 没有 `__add__` 方法,或者调用 `__add__` 方法返回 `NotImplemented`,检查 b 有没有 `__radd__` 方法,如果有,而且没有返回 `NotImplemented`,调用 `b.__radd__`,然后返回结果。
# > 3. 如果 b 没有 `__radd__` 方法,或者调用 `__radd__` 方法返回 `NotImplemented`,抛出 `TypeError`, 并在错误消息中指明操作数类型不支持。
#
# 这样一来,只要运算符两边的任何一个对象正确实现了运算方法,就可以正常实现二元运算操作。
#
# 小知识:
# * `NotImplemented` 是一个特殊的单例值,要 `return`;而 `NotImplementedError` 是一个异常,要 `raise`.
# * Python 3.5 新引入了 `@` 运算符,用于点积乘法,对应的魔术方法是 [`__matmul__`](https://docs.python.org/3/reference/datamodel.html#object.__matmul__).
# * 进行 `!=` 运算时,如果双方对象都没有实现 `__ne__`,解释器会尝试 `__eq__` 操作,并将得到的结果**取反**。
#
# 放在这里有点吓人的小知识:
# * Python 在进行 `==` 运算时,如果运算符两边的 `__eq__` 都失效了,解释器会用两个对象的 id 做比较\_(:з」∠)\_。_书中用了“最后一搏”这个词…真的有点吓人。_
# ## 运算符分派
# 有的时候,运算符另一边的对象可能会出现多种类型:比如对向量做乘法时,另外一个操作数可能是向量,也可能是一个标量。此时,需要在方法实现中,根据操作数的类型进行分派。
# 此时有两种选择:
# 1. 尝试直接运算,如果有问题,捕获 `TypeError` 异常;
# 2. 在运算前使用 `isinstance` 进行类型判断,在收到可接受类型时在进行运算。
# 判断类型时,应进行鸭子类型的判断。应该使用 `isinstance(other, numbers.Integral)`,而不是用 `isinstance(other, int)`,这是之前的知识点。
#
# 不过,在类上定义方法时,是不能用 `functools.singledispatch` 进行单分派的,因为第一个参数是 `self`,而不是 `o`.
# +
# 一个就地运算符的错误示范
class T:
def __init__(self, s):
self.s = s
def __str__(self):
return self.s
def __add__(self, o):
return self.s + o
def __iadd__(self, o):
self.s += o
# 这里必须要返回一个引用,用于传给 += 左边的引用变量
# return self
t = T('1')
t1 = t
w = t + '2'
print(w, type(w))
t += '2' # t = t.__iadd__('2')
print(t, type(t)) # t 被我们搞成了 None
print(t1, type(t1))
| 13-op-overloading.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/chadmh/Short-Hands-on-Tutorial-for-Deep-Learning-in-Tensorflow/blob/master/3_Convolutional_Neural_Networks.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="6kPrirL6nx0d"
# # 3.1 Convolutional Neural Networks
#
# Basic deep neural network layers can be represented by a linear transformation and a non-linear activation function:
#
# > *y = f(w * x + b)*
#
# where w is a matrix of trainable parameters, and b is a vector of trainable parameters. This type of neural network layer is often called a dense, or fully-connected, layer.
#
# An issue arises when applying these networks to data with important spatial relationships, such as images or volumetric data. As we saw in the [Introduction to Tensorflow with MNIST](https://colab.research.google.com/github/chadmh/Short-Hands-on-Tutorial-for-Deep-Learning-in-Tensorflow/blob/master/1_Introduction.ipynb), these spatial relationships are lost when the input image is flattened.
#
# Convolutional neural networks extend the basic deep network idea by implementing what is known as a convolutional layer. A convolutional layer is a implemented as a set of MxM kernal matrices which are then convolved with the input data. The training of the convolutional layer then consists of choosing the best values for each kernal matrix.
#
# Some extremely simple examples of kernal matrices include [-1, 0, 1] and its transpose which are used for vertical and horizontal edge detection, respectively. In CNNs the training process designs the kernals that best allow the model to minimize its loss function.
#
# After convolving the image with the kernals in one or more layers, a CNN typically applies a pooling layer. A pooling layer's job is to collect the most relevent information from the output of each kernal. It is computed by collecting the most relevent cell (e.g. max value) of an NxN window, moving (striding) K pixels and repeating.
#
# ### 3.1.1 CNN Basic Architecture
#
# The basic architecture of a CNN is to apply multiple layers of the convolution/pooling operation and to then pass the output to a set of dense layers for interpretation.
#
# CNNs are a popular tool for analyzing image data. In this notebook, a simple CNN will be demonstrated on the MNIST dataset. The code to load the dataset and setup the processing pipeline is equivalent to [notebook 1](https://colab.research.google.com/github/chadmh/Short-Hands-on-Tutorial-for-Deep-Learning-in-Tensorflow/blob/master/1_Introduction.ipynb).
# + colab={"base_uri": "https://localhost:8080/"} id="vBbDCAzgJWJn" outputId="39fd803c-11bc-498b-fcf3-1fb59558ca10"
# Import the needed Tensorflow components
import tensorflow as tf
import tensorflow_datasets as tfds
# Load the MNIST dataset. Load checks whether the dataset is locally available and downloads it from
# its official repository at http://yann.lecun.com/exdb/mnist if it cannot be found.
(train, test), info = tfds.load('mnist', # Pick the MNIST dataset
split=['train', 'test'], # Load both the training and testing parts of the dataset
with_info=True, # Generate summary information about the dataset
as_supervised=True) # return both the inputs and labels as a tuple
print(info.description)
print(info.splits)
# Define the data preprocessing pipeline. For MNIST, the only needed preprocessing is to convert from unit8 to
# float. Other data sets are likely more extensive.
def preprocess_data(input, label):
# Convert unit8 to real on [0, 1]
input = tf.cast(input, tf.float32) / 255.0
return input, label
# Assign the preprocessing pipeline to each dataset: train and test
train = train.map(preprocess_data)
test = test.map(preprocess_data)
# Tell each dataset how many images it will load at once for processing
BATCH_SIZE=128
train = train.batch(BATCH_SIZE)
test = test.batch(BATCH_SIZE)
# + [markdown] id="kLV4Fz3tJ9nn"
# Now that the data pipeline is set up, it is time to design the CNN. In this simple network, we have two convolution / pooling layers. Each convolution layer has 8 3x3 kernals. Setting the padding parameter to 'same' zero pads the output so that the convolution result is still 28 x 28 pixels.
#
# The max pooling layer divides the 28 x 28 output from each of the 8 kernals into 2 x 2 blocks and keeps just the maximum pixel value from each block. The data size is now 14 x 14 x 8.
#
# The next convolution layer reads in the 14 x 14 x 8 array. The 8 kernal outputs are simply treated as different channels of the data. The next layer's convolution kernals are therefore 8 layers deep (3 x 3 x 8). 2 x 2 Max pooling is applied just as before resulting in a 7 x 7 x 8 block of data for each image.
#
# The idea behind using multiple iterations of convolution / pooling is that the first iteration will capture basic features such as edges and lines, while later layers capture higher level features such as curves or digit fragments. This concept follows the hierarchical neural system of the human visual cortex.
#
# After the final convolution / pooling iteration, the data block is flattened and passed to a dense network with ReLU activation and then output.
# + colab={"base_uri": "https://localhost:8080/"} id="Yye5fPbjKKLf" outputId="74220b4a-d746-41ca-fd5a-098d617e1b23"
# Specify a basic sequential neural network
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(8, 3, input_shape=(28, 28, 1), padding='same'), # Convolution with 8 3x3 kernals
tf.keras.layers.MaxPool2D(pool_size=(2, 2)), # Keep max value of each 2x2 block
tf.keras.layers.Conv2D(8, 3, padding='same'), # Another convolution with 8 3x3 kernals
tf.keras.layers.MaxPool2D(pool_size=(2, 2)), # Keep max value of each 2x2 block (data is 7x7x8)
tf.keras.layers.Flatten(), # Flatten 7x7x8 array into a 392 x 1 vector
tf.keras.layers.Dense(20, activation='relu'), # Hidden Layer: Define a layer with 20 neurons
tf.keras.layers.Dense(10) # Output Layer: Define a layer with a slot for each digit.
])
# Define the optimizing algorithm and optimizing matrix for the model
model.compile(
optimizer=tf.keras.optimizers.Adam(learning_rate=0.001), # Use the Adam optimizer to train the model
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), # Minimize the categorical crossentropy function in training
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()] # Measure our overall accuracy to see how well we've trained
)
model.summary()
# + [markdown] id="yTCspt8DWfS-"
# Although the final model is more complicated than that of [Notebook 1](https://colab.research.google.com/github/chadmh/Short-Hands-on-Tutorial-for-Deep-Learning-in-Tensorflow/blob/master/1_Introduction.ipynb) the total number of trainable parameters is reduced from nearly 16,000 to under 9,000. At the same time, by capturing the spatial correlation of pixels, the model should perform better than a purely dense model like notebook 1. Just as in Notebook 1, we train using the fit function.
# + colab={"base_uri": "https://localhost:8080/"} id="gl5kfAzfRWFw" outputId="7e2aceb0-10e3-42ba-b8df-2bdb158a5f03"
model.fit(train, epochs=2, validation_data=test)
# + [markdown] id="fGOiCBbTXFE_"
# After 2 epochs, the model accuracy is almost 96% compared with 92% for the dense network in Notebook 1. This demonstrates that the CNN architecture is more efficient for image processing.
| 3_Convolutional_Neural_Networks.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # STA 141b Week 1
#
# _TA: <NAME> (<EMAIL>)_
#
# ## Links
#
# * [How to Set Up a Virtual Machine](http://nick-ulle.github.io/virtual-machine/) -- if you want to try Linux
#
# * [How to Set Up Bash on Win10](https://msdn.microsoft.com/en-us/commandline/wsl/install_guide) -- another way to try Linux
# ## Discussion Notes
#
# ### Getting Help
#
# Python is [well-documented](https://docs.python.org/3.6/)!
#
# You can also access documentation with the `help()` function.
help(range)
# ### Modules & Packages
#
# Python has _modules_ and also _packages_. What's the difference?
#
# A module is a single Python script (a `.py` file). You can load a module with the `import` command.
#
# A package is a collection of modules prepared for distribution. You can install a package with `conda` or `pip`. Some packages only have one module.
#
# Which of the built-in modules are important?
#
# Module | Description
# ----------- | -----------
# sys | info about Python (version, etc)
# pdb | Python debugger
# os.path | tools for file paths
# collections | additional data structures
# string | string processing
# re | regular expressions
# urlparse | parse URLs
# math | simple math (but we'll mostly use NumPy instead)
# itertools | tools for iterators
# functools | tools for functions
# +
import sys
sys.version
# -
# Python has built in `math` and `statistics` modules, but they are missing features needed for serious scientific computing.
#
# Instead, we'll use the "SciPy Stack". The SciPy Stack is a collection of packages for scientific computing (marked with a `*` below). Most scientists working in Python use the SciPy Stack. The 3 most important packages in the stack are:
#
# Package | Description
# ------------ | -----------
# numpy\* | arrays, matrices, math/stat functions
# scipy\* | additional math/stat functions
# pandas\* | data frames
#
# There are also several packages available for creating plots. A future lecture or discussion will explain what makes each unique:
#
# Package | Description
# ------------ | -----------
# matplotlib\* | visualizations
# seaborn | "statistical" visualizations
# plotly | in-browser visualizations
# bokeh | in-browser visualizations
#
# Finally, there are many other packages we may use for specific tasks (that is, only on a few assignments). Some of these are:
#
# Package | Description
# ------------ | -----------
# statsmodels | classical statistical models
# scikit-learn | machine learning models
# requests | web (HTTP) requests
# lxml | web page parsing (XML & HTML)
# beatifulsoup | web page parsing (HTML)
# nltk | natural language processing
# spacy | natural language processing
# textblob | natural language processing
# pillow | image processing
# scikit-image | image processing
# opencv | image processing
| sta141b/2018/discussion01.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Switch case
# <img src="Images/switch_case.jpg" alt="Drawing" style="width: 400px;"/>
def switch_month( key ):
switcher = {
# key: value
1: "January",
2: "Februar",
3: "March",
4: "April",
5: "May",
6: "June",
7: "July",
8: "August",
9: "September",
10: "October",
11: "November",
12:"December"
}
print(switcher.get(key, "Invalid month"))
return
switch_month( 4 ) # gibt April aus
switch_month( 13 ) # gibt invalid month aus
| UE1/08_switch_case.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # 模型组网
#
#
# 模型组网是深度学习任务中的重要一环,该环节定义了神经网络的层次结构、数据从输入到输出的计算过程(即前向计算)等。
#
# 飞桨框架提供了多种模型组网方式,本文介绍如下几种常见用法:
# * **直接使用内置模型**
# * **使用 [paddle.nn.Sequential](../../api/paddle/nn/Sequential_cn.html#sequential) 组网**
# * **使用 [paddle.nn.Layer](../../api/paddle/nn/Layer_cn.html#layer) 组网**
#
# 另外飞桨框架提供了 [paddle.summary](../../api/paddle/summary_cn.html#summary) 函数方便查看网络结构、每层的输入输出 shape 和参数信息。
# ## 一、直接使用内置模型
#
# 飞桨框架目前在 [paddle.vision.models](../../api/paddle/vision/Overview_cn.html#about-models) 下内置了计算机视觉领域的一些经典模型,只需一行代码即可完成网络构建和初始化,适合完成一些简单的深度学习任务,满足深度学习初阶用户感受模型的输入和输出形式、了解模型的性能。
# +
import paddle
print('飞桨框架内置模型:', paddle.vision.models.__all__)
# -
# 以 LeNet 模型为例,可通过如下代码组网:
# +
# 模型组网并初始化网络
lenet = paddle.vision.models.LeNet(num_classes=10)
# 可视化模型组网结构和参数
paddle.summary(lenet,(1, 1, 28, 28))
# -
# 通过 [paddle.summary](../../api/paddle/summary_cn.html#summary) 可清晰地查看神经网络层次结构、每一层的输入数据和输出数据的形状(Shape)、模型的参数量(Params)等信息,方便可视化地了解模型结构、分析数据计算和传递过程。从以上结果可以看出,LeNet 模型包含 2个`Conv2D` 卷积层、2个`ReLU` 激活层、2个`MaxPool2D` 池化层以及3个`Linear` 全连接层,这些层通过堆叠形成了 LeNet 模型,对应网络结构如下图所示。
#
# 
#
# 图 1:LeNet 网络结构示意图
#
#
#
# ## 二、Paddle.nn 介绍
#
# 经典模型可以满足一些简单深度学习任务的需求,然后更多情况下,需要使用深度学习框架构建一个自己的神经网络,这时可以使用飞桨框架 [paddle.nn](../../api/paddle/nn/Overview_cn.html) 下的 API 构建网络,该目录下定义了丰富的神经网络层和相关函数 API,如卷积网络相关的 Conv1D、Conv2D、Conv3D,循环神经网络相关的 RNN、LSTM、GRU 等,方便组网调用,详细清单可在 [API 文档](../../api/paddle/nn/Overview_cn.html) 中查看。
#
# 飞桨提供继承类(class)的方式构建网络,并提供了几个基类,如:[paddle.nn.Sequential](../../api/paddle/nn/Sequential_cn.html#sequential)、
# [paddle.nn.Layer](../../api/paddle/nn/Layer_cn.html#layer) 等,构建一个继承基类的子类,并在子类中添加层(layer,如卷积层、全连接层等)可实现网络的构建,不同基类对应不同的组网方式,本节介绍如下两种常用方法:
#
# * **使用 [paddle.nn.Sequential](../../api/paddle/nn/Sequential_cn.html#sequential) 组网**:构建顺序的线性网络结构(如 LeNet、AlexNet 和 VGG)时,可以选择该方式。相比于 Layer 方式 ,Sequential 方式可以用更少的代码完成线性网络的构建。
# * **使用 [paddle.nn.Layer](../../api/paddle/nn/Layer_cn.html#layer) 组网(推荐)**:构建一些比较复杂的网络结构时,可以选择该方式。相比于 Sequential 方式,Layer 方式可以更灵活地组建各种网络结构。Sequential 方式搭建的网络也可以作为子网加入 Layer 方式的组网中。
#
#
#
# ## 三、使用 paddle.nn.Sequential 组网
#
#
# 构建顺序的线性网络结构时,可以选择该方式,只需要按模型的结构顺序,一层一层加到 [paddle.nn.Sequential](../../api/paddle/nn/Sequential_cn.html#sequential) 子类中即可。
#
# 参照前面图 1 所示的 LeNet 模型结构,构建该网络结构的代码如下:
# +
from paddle import nn
# 使用 paddle.nn.Sequential 构建 LeNet 模型
lenet_Sequential = nn.Sequential(
nn.Conv2D(1, 6, 3, stride=1, padding=1),
nn.ReLU(),
nn.MaxPool2D(2, 2),
nn.Conv2D(6, 16, 5, stride=1, padding=0),
nn.ReLU(),
nn.MaxPool2D(2, 2),
nn.Flatten(),
nn.Linear(400, 120),
nn.Linear(120, 84),
nn.Linear(84, 10)
)
# 可视化模型组网结构和参数
paddle.summary(lenet_Sequential,(1, 1, 28, 28))
# -
# 使用 Sequential 组网时,会自动按照层次堆叠顺序完成网络的前向计算过程,简略了定义前向计算函数的代码。由于 Sequential 组网只能完成简单的线性结构模型,所以对于需要进行分支判断的模型需要使用 paddle.nn.Layer 组网方式实现。
# ## 四、使用 paddle.nn.Layer 组网
#
# 构建一些比较复杂的网络结构时,可以选择该方式,组网包括三个步骤:
# 1. 创建一个继承自 [paddle.nn.Layer](../../api/paddle/nn/Layer_cn.html#layer) 的类;
# 1. 在类的构造函数 `__init__` 中定义组网用到的神经网络层(layer);
# 1. 在类的前向计算函数 `forward` 中使用定义好的 layer 执行前向计算。
#
# 仍然以 LeNet 模型为例,使用 paddle.nn.Layer 组网的代码如下:
#
# +
# 使用 Subclass 方式构建 LeNet 模型
class LeNet(nn.Layer):
def __init__(self, num_classes=10):
super(LeNet, self).__init__()
self.num_classes = num_classes
# 构建 features 子网,用于对输入图像进行特征提取
self.features = nn.Sequential(
nn.Conv2D(
1, 6, 3, stride=1, padding=1),
nn.ReLU(),
nn.MaxPool2D(2, 2),
nn.Conv2D(
6, 16, 5, stride=1, padding=0),
nn.ReLU(),
nn.MaxPool2D(2, 2))
# 构建 linear 子网,用于分类
if num_classes > 0:
self.linear = nn.Sequential(
nn.Linear(400, 120),
nn.Linear(120, 84),
nn.Linear(84, num_classes)
)
# 执行前向计算
def forward(self, inputs):
x = self.features(inputs)
if self.num_classes > 0:
x = paddle.flatten(x, 1)
x = self.linear(x)
return x
lenet_SubClass = LeNet()
# 可视化模型组网结构和参数
params_info = paddle.summary(lenet_SubClass,(1, 1, 28, 28))
print(params_info)
# -
# 在上面的代码中,将 LeNet 分为了 `features` 和 `linear` 两个子网,`features` 用于对输入图像进行特征提取,`linear` 用于输出十个数字的分类。
# ## 五、总结
#
# 本节介绍了飞桨框架中模型组网的几种方式,并且以 LeNet 为例介绍了如何使用这几种组网方式实现,总结模型组网的方法和用到的关键 API 如下图所示。
#
# 
#
# 图 2:模型组网方法
# ## 扩展:模型的层(Layer)
#
# 模型组网中一个关键组成就是神经网络层,不同的神经网络层组合在一起,从输入的数据样本中习得数据内在规律,最终输出预测结果。每个层从前一层获得输入数据,然后输出结果作为下一层的输入,并且大多数层包含可调的参数,在反向传播梯度时更新参数。
#
# 在飞桨框架中内置了丰富的神经网络层,用类(class)的方式表示,构建模型时可直接作为实例添加到子类中,只需设置一些必要的参数,并定义前向计算函数即可,反向传播和参数保存由框架自动完成。
#
# 下面展开介绍几个常用的神经网络层。
#
#
#
# ### Conv2D
# [Conv2D](../../api/paddle/nn/Conv2D_cn.html#conv2d) (二维卷积层)主要用于对输入的特征图进行卷积操作,广泛用于深度学习网络中。Conv2D 根据输入、卷积核、步长(stride)、填充(padding)、空洞大小(dilations)等参数计算输出特征层大小。输入和输出是 NCHW 或 NHWC 格式,其中 N 是 batchsize 大小,C 是通道数,H 是特征高度,W 是特征宽度。
# +
x = paddle.uniform((2, 3, 8, 8), dtype='float32', min=-1., max=1.)
conv = nn.Conv2D(3, 6, (3, 3), stride=2) # 卷积层输入通道数为3,输出通道数为6,卷积核尺寸为3*3,步长为2
y = conv(x) # 输入数据x
y = y.numpy()
print(y.shape)
# -
# ### MaxPool2D
#
# [MaxPool2D](../../api/paddle/nn/MaxPool2D_cn.html#maxpool2d) (二维最大池化层)主要用于缩小特征图大小,根据 `kernel_size` 参数指定的窗口大小,对窗口内特征图进行取最大值的操作。
# +
x = paddle.uniform((2, 3, 8, 8), dtype='float32', min=-1., max=1.)
pool = nn.MaxPool2D(3, stride=2) # 池化核尺寸为3*3,步长为2
y = pool(x) #输入数据x
y = y.numpy()
print(y.shape)
# -
# ### Linear
#
# [Linear](../../api/paddle/nn/Linear_cn.html#linear) (全连接层)中每个神经元与上一层的所有神经元相连,实现对前一层的线性组合和线性变换。在卷积神经网络分类任务中,输出分类结果之前,通常采用全连接层对特征进行处理。
x = paddle.uniform((2, 6), dtype='float32', min=-1., max=1.)
linear = paddle.nn.Linear(6, 4)
y = linear(x)
print(y.shape)
# ### ReLU
#
# [ReLU](../../api/paddle/nn/ReLU_cn.html#relu) 是深度学习任务中常用的激活层,主要用于对输入进行非线性变换。ReLU 将输入中小于 0 的部分变为 0,大于 0 的部分保持不变。
x = paddle.to_tensor([-2., 0., 1.])
relu = paddle.nn.ReLU()
y = relu(x)
print(y)
# ## 扩展:模型的参数(Parameter)
#
# 在飞桨框架中,可通过网络的 [parameters()](../../api/paddle/nn/Layer_cn.html#parameters) 和 [named_parameters()](../../api/paddle/nn/Layer_cn.html#named_parameters) 方法获取网络在训练期间优化的所有参数(权重 weight 和偏置 bias),通过这些方法可以实现对网络更加精细化的控制,如设置某些层的参数不更新。
#
# 下面这段示例代码,通过 `named_parameters()` 获取了 LeNet 网络所有参数的名字和值,打印出了参数的名字(name)和形状(shape)。
for name, param in lenet.named_parameters():
print(f"Layer: {name} | Size: {param.shape}")
| docs/guides/02_paddle2.0_develop/04_model_cn.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from keras.models import Sequential
from keras.layers import Dense, Input, Reshape
from keras.models import Model
from keras.layers.core import Activation
from keras.layers.normalization import BatchNormalization
from keras.layers.convolutional import UpSampling2D
from keras.layers.convolutional import Conv2D, MaxPooling2D
from keras.layers.advanced_activations import LeakyReLU
from keras.layers.core import Flatten, Dropout
from keras.optimizers import Adam
from keras.datasets import mnist
import numpy as np
from PIL import Image
import argparse
import math
EPOCH = 30
PNG_CREATE = 20
N_CLASS = 10
# 偽物を作る生成機
def auto_encoder_generator_model():
input_img = Input(shape=(28, 28, 1)) # adapt this if using `channels_first` image data format
x = Conv2D(16, (3, 3), activation='relu', padding='same')(input_img)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(8, (3, 3), activation='relu', padding='same')(x)
x = MaxPooling2D((2, 2), padding='same')(x)
x = Conv2D(8, (3, 3), activation='relu', padding='same')(x)
encoded = MaxPooling2D((2, 2), padding='same')(x)
# at this point the representation is (4, 4, 8) i.e. 128-dimensional
x = Conv2D(8, (3, 3), activation='relu', padding='same')(encoded)
x = UpSampling2D((2, 2))(x)
x = Conv2D(8, (3, 3), activation='relu', padding='same')(x)
x = UpSampling2D((2, 2))(x)
x = Conv2D(16, (3, 3), activation='relu')(x)
x = UpSampling2D((2, 2))(x)
decoded = Conv2D(1, (3, 3), activation='sigmoid', padding='same')(x)
autoencoder = Model(input_img, decoded)
print('*** Auto encoder generator model ***')
autoencoder.summary()
return autoencoder
# 偽物を検知する発見機
def discriminator_model():
model = Sequential()
model.add(Conv2D(64, (5, 5), padding='same', input_shape=(28, 28, 1)))
model.add(LeakyReLU(0.2))
model.add(Conv2D(128, (5, 5), subsample=(2, 2))) # subsampleでダウンサンプリング
model.add(LeakyReLU(0.2))
model.add(Flatten())
model.add(Dense(1024))
model.add(LeakyReLU(0.2))
model.add(Dropout(0.5))
model.add(Dense(N_CLASS))
model.add(Activation('sigmoid'))
print('*** discriminator model ***')
model.summary()
return model
def generator_containing_discriminator(g, d):
model = Sequential()
model.add(g)
d.trainable = False
model.add(d)
model.summary()
return model
def combine_images(generated_images):
num = generated_images.shape[0]
width = int(math.sqrt(num))
height = int(math.ceil(float(num)/width))
shape = generated_images.shape[1:3]
image = np.zeros((height*shape[0], width*shape[1]),
dtype=generated_images.dtype)
for index, img in enumerate(generated_images):
i = int(index/width)
j = index % width
image[ i*shape[0]:(i+1)*shape[0], j*shape[1]:(j+1)*shape[1] ] = img[:, :, 0]
return image
def train(BATCH_SIZE):
####
# Dataの読み込み
####
(X_train, y_train), (X_test, y_test) = mnist.load_data()
print ("データのサイズ")
print ("X_train: %s, y_train:%s" % (X_train.shape, y_train.shape))
print ("X_test: %s, y_test: %s" % (X_test.shape, y_test.shape))
X_train = (X_train.astype(np.float32) - 127.5)/127.5
X_train = X_train[:, :, :, None] # channelは使用しない為None設定
X_test = X_test [:, :, :, None] # channelは使用しない為None設定
def create_label_data(in_y_data):
out_data = []
for index, data in enumerate(in_y_data):
if data == 0:
out_data.append([1,0,0,0,0,0,0,0,0,0])
elif data == 1:
out_data.append([0,1,0,0,0,0,0,0,0,0])
elif data == 2:
out_data.append([0,0,1,0,0,0,0,0,0,0])
elif data == 3:
out_data.append([0,0,0,1,0,0,0,0,0,0])
elif data == 4:
out_data.append([0,0,0,0,1,0,0,0,0,0])
elif data == 5:
out_data.append([0,0,0,0,0,1,0,0,0,0])
elif data == 6:
out_data.append([0,0,0,0,0,0,1,0,0,0])
elif data == 7:
out_data.append([0,0,0,0,0,0,0,1,0,0])
elif data == 8:
out_data.append([0,0,0,0,0,0,0,0,1,0])
else:
out_data.append([0,0,0,0,0,0,0,0,0,1])
return np.array(out_data)
# yのデータを多次元化
ylabel_train = create_label_data(y_train)
ylabel_test = create_label_data(y_test)
# generatorで生成した画像について間違えさせたいラベルを生成(ターゲットは適当)
def create_target_data(in_ylabel_data):
out_data = []
for index, data in enumerate(in_ylabel_data):
if data[0] == 1:
# 0を1と間違えさせるようにデータを生成する
out_data.append([0,1,0,0,0,0,0,0,0,0])
elif data[1] == 1:
out_data.append([0,0,1,0,0,0,0,0,0,0])
elif data[2] == 1:
out_data.append([0,0,0,1,0,0,0,0,0,0])
elif data[3] == 1:
out_data.append([0,0,0,0,1,0,0,0,0,0])
elif data[4] == 1:
out_data.append([0,0,0,0,0,1,0,0,0,0])
elif data[5] == 1:
out_data.append([0,0,0,0,0,0,1,0,0,0])
elif data[6] == 1:
out_data.append([0,0,0,0,0,0,0,1,0,0])
elif data[7] == 1:
out_data.append([0,0,0,0,0,0,0,0,1,0])
elif data[8] == 1:
out_data.append([0,0,0,0,0,0,0,0,0,1])
else:
out_data.append([1,0,0,0,0,0,0,0,0,0])
return np.array(out_data)
# target用データを生成
target_ylabel_train = create_target_data(ylabel_train)
target_ylabel_test = create_target_data(ylabel_test)
####
# モデルの準備
####
# discriminatorとgeneratorの作成
d = discriminator_model()
g = auto_encoder_generator_model()
# discriminatorとgeneratorのCONCATをしてAdversarial Training
d_on_g = generator_containing_discriminator(g, d) # この内部では d.trainable=False となっているためdは学習しない
# discriminatorとgenerator,Adversarial Trainingの最適化手法としてAdamを利用する
d_optim = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
g_optim = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0) #
d_on_g_optim = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
g.compile(loss='binary_crossentropy', optimizer=g_optim)
d_on_g.compile(loss='binary_crossentropy', optimizer=d_on_g_optim)
d.trainable = True # Discriminatorの学習をONにしておく
d.compile(loss='binary_crossentropy', optimizer=d_optim)
####
# epoch数分のループ
####
for epoch in range(EPOCH):
print("Epoch is", epoch)
print("Number of batches", int( X_train.shape[0] / BATCH_SIZE ))
####
# batchのループ
####
for index in range(int( X_train.shape[0] / BATCH_SIZE )):
image_batch = X_train[index*BATCH_SIZE:(index+1)*BATCH_SIZE] # 学習画像をBatch分、取得する
ylabel_train_batch = ylabel_train[index*BATCH_SIZE:(index+1)*BATCH_SIZE]
target_ylabel_train_batch = target_ylabel_train[index*BATCH_SIZE:(index+1)*BATCH_SIZE]
generated_images = g.predict(image_batch, verbose=0) # Auto encoder でAdversarial画像データを生成
####
# 指定回数の度に、状況をPNG画像で確認
####
if index % PNG_CREATE == 0:
image = combine_images(generated_images)
image = image*127.5+127.5
print('*** Generate Image by Auto encoder ***')
Image.fromarray(image.astype(np.uint8)).save(str(epoch)+"_"+str(index)+".png")
####
# 評価
####
print ("image_batch: %s, generated_images:%s" % (image_batch.shape, generated_images.shape))
print ("ylabel_train_batch: %s, target_ylabel_train_batch:%s" % (ylabel_train_batch.shape, target_ylabel_train_batch.shape))
X = np.concatenate((image_batch, generated_images)) # 学習画像と生成画像のCONCAT
y = np.concatenate((ylabel_train_batch, target_ylabel_train_batch)) # 正解ラベルと生成画像に付与するtargetラベルのCONCAT
# discriminatorで評価(入力画像に対して付与されたラベルを正しく検出できるかどうか)
d_loss = d.train_on_batch(X, y)
print("batch %d D_loss : %f" % (index, d_loss))
# generatorを評価(Adversarial画像が生成できているかどうか)
d.trainable = False # discriminatorの学習をOFFにする
g_loss = d_on_g.train_on_batch(image_batch, ylabel_train)
d.trainable = True # discriminatorの学習をONにする
print("batch %d G_loss : %f" % (index, g_loss))
# 適度にモデルのパラメータを出力する
if index % 10 == 9:
g.save_weights('generator', True)
d.save_weights('discriminator', True)
train(BATCH_SIZE=100)
def generate(BATCH_SIZE, nice=False):
g = auto_encoder_generator_model()
g_optim = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
g.compile(loss='binary_crossentropy', optimizer=g_optim)
g.load_weights('generator')
if nice:
d = discriminator_model()
d_optim = Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
d.compile(loss='binary_crossentropy', optimizer=g_optim)
d.load_weights('discriminator')
noise = np.random.uniform(-1, 1, (BATCH_SIZE*20, 100))
generated_images = g.predict(noise, verbose=1)
d_pret = d.predict(generated_images, verbose=1)
index = np.arange(0, BATCH_SIZE*20)
index.resize((BATCH_SIZE*20, 1))
pre_with_index = list(np.append(d_pret, index, axis=1))
pre_with_index.sort(key=lambda x: x[0], reverse=True)
nice_images = np.zeros((BATCH_SIZE,) + generated_images.shape[1:3], dtype=np.float32)
nice_images = nice_images[:, :, :, None]
for i in range(BATCH_SIZE):
idx = int(pre_with_index[i][1])
nice_images[i, :, :, 0] = generated_images[idx, :, :, 0]
image = combine_images(nice_images)
else:
noise = np.random.uniform(-1, 1, (BATCH_SIZE, 100))
generated_images = g.predict(noise, verbose=1)
image = combine_images(generated_images)
image = image*127.5+127.5
Image.fromarray(image.astype(np.uint8)).save("generated_image.png")
generate(BATCH_SIZE=100, nice=False)
| DCGAN.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] _cell_guid="10d5cd49-c524-4dd9-8b4d-67497e413466" _uuid="55946cb1-4116-49db-9d6f-d1a448b615a6"
#
# # Logistic Regression with Python
#
# We'll be trying to predict a classification- survival or deceased.Let's begin our understanding of implementing Logistic Regression in Python for classification.
#
# ## Import Libraries
# Let's import some libraries to get started!
# + _cell_guid="f8b120b2-aecd-4c89-acf8-309de0623000" _uuid="1f38d2a8-b056-4950-9f4c-8b0c5e6c574d"
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt;
import seaborn as sns
# %matplotlib inline
# + [markdown] _cell_guid="ba3d03b5-e508-47c0-8232-ad59809bbaf2" _uuid="057e509e-bb25-40b2-9900-3ed890470bac"
# ## The Data
#
# + _cell_guid="a83e0dca-00a8-4f0c-91ee-8ff4827f18a8" _uuid="2fccc693-f638-4f54-9604-9768991566a2"
train = pd.read_csv('../input/train.csv')
# + _cell_guid="2edc27e2-00b2-4f24-b9f4-eee0df2dccd3" _uuid="7269d290-49db-4c9b-b524-152864564846"
train.tail()
# + [markdown] _cell_guid="957da5d3-e53f-4f43-9e54-9af345fc2fa8" _uuid="e665e580-1279-45e6-bad2-d64c69527a98"
# # Exploratory Data Analysis
#
# Let's begin some exploratory data analysis! We'll start by checking out missing data!
#
# ## Missing Data
#
# We can use seaborn to create a simple heatmap to see where we are missing data!
# + _cell_guid="2796034a-ff70-4cdd-99ad-3d8c066c25db" _uuid="27c9deb6-52bf-493f-adbe-a53963b1fc34"
sns.heatmap(train.isnull(),yticklabels=False,cbar=False,cmap='viridis')
# + [markdown] _cell_guid="dc1c6798-386c-47e4-9b95-bf613c5871ff" _uuid="3e6a966e-f0bd-439e-8781-ef5f1b68ed6b"
# Roughly 20 percent of the Age data is missing. The proportion of Age missing is likely small enough for reasonable replacement with some form of imputation. Looking at the Cabin column, it looks like we are just missing too much of that data to do something useful with at a basic level. We'll probably drop this later, or change it to another feature like "Cabin Known: 1 or 0"
#
# #### Let's continue on by visualizing some more of the data! Check out the video for full explanations over these plots, this code is just to serve as reference.
# + _cell_guid="65e26278-c3d2-4e26-9a5b-d1cce06b0ff6" _uuid="16085fa2-70c5-462d-946d-9ed54aa8cd1d"
sns.set_style('whitegrid')
sns.countplot(x='Survived',data=train,palette='RdBu_r')
# + _cell_guid="1dde7b2d-a2c8-4cd2-a1da-716776742b43" _uuid="b5916cd2-f3b4-4baf-b4fc-d0ef9bea7c7d"
sns.set_style('whitegrid')
sns.countplot(x='Survived',hue='Sex',data=train,palette='RdBu_r')
# + _cell_guid="2029bd7d-2415-4055-bdb6-d5e0c6cc480f" _uuid="3c63bdd5-09b9-447d-adbd-f1efe69180b4"
sns.set_style('whitegrid')
sns.countplot(x='Survived',hue='Pclass',data=train,palette='rainbow')
# + _cell_guid="b43544da-010c-4a8b-b0c8-3c55cd623636" _uuid="6c52fb05-07f2-4b45-806d-dd50e74b20a4"
sns.distplot(train['Age'].dropna(),kde=False,color='darkred',bins=30)
# + _cell_guid="5684aa3c-2e6d-4668-ac3a-af2cb4890bf7" _uuid="d6c62306-3ac5-4cef-b97b-bf42581b1da0"
train['Age'].hist(bins=30,color='darkred',alpha=0.7)
# + _cell_guid="e577a7b2-9d02-4b89-b3e1-86ec2992fb79" _uuid="186519f5-c924-4e05-9fb3-6be7d7b7ca59"
sns.countplot(x='SibSp',data=train)
# + _cell_guid="9cfc90ba-5398-4139-864a-bc31a98e243a" _uuid="298860b1-dff8-44ca-b211-2237e3cedb55"
train['Fare'].hist(color='green',bins=40,figsize=(8,4))
# + [markdown] _cell_guid="9153792b-c727-4380-8d34-939686991bfb" _uuid="75563284-73ef-47c7-9542-f81075ed0c61"
# ___
# ## Data Cleaning
# We want to fill in missing age data instead of just dropping the missing age data rows. One way to do this is by filling in the mean age of all the passengers (imputation).
# However we can be smarter about this and check the average age by passenger class. For example:
#
# + _cell_guid="12d1841d-d955-47f4-a003-79fe4dbbc831" _uuid="d23506c6-ff8b-4d03-ad3b-be2911571905"
plt.figure(figsize=(12, 7))
sns.boxplot(x='Pclass',y='Age',data=train,palette='winter')
# + [markdown] _cell_guid="fc5cff0d-3637-457a-8d31-573ff28af0c5" _uuid="50235695-e58d-4e8d-8c16-5629d2a62944"
# We can see the wealthier passengers in the higher classes tend to be older, which makes sense. We'll use these average age values to impute based on Pclass for Age.
# + _cell_guid="ca213e42-43e3-4f69-bb6c-d3358d9a8934" _uuid="500a0190-6ce3-471a-9033-669e8996ef21"
def impute_age(cols):
Age = cols[0]
Pclass = cols[1]
if pd.isnull(Age):
if Pclass == 1:
return 37
elif Pclass == 2:
return 29
else:
return 24
else:
return Age
# + [markdown] _cell_guid="0b76ab42-3f75-408b-93dc-e5bd9d846e7f" _uuid="275055d1-38b2-4e06-afe1-5a1dd5e7a510"
# Now apply that function!
# + _cell_guid="8f711705-a3d8-46ad-bc16-7999bf470e2b" _uuid="f0971c93-45d0-4838-9bdd-d01918c4c69e"
train['Age'] = train[['Age','Pclass']].apply(impute_age,axis=1)
# + [markdown] _cell_guid="82e88dce-4751-4912-9cbe-b69e8c1b248f" _uuid="07c10e31-aa36-450f-8acf-f9367fca85e1"
# Now let's check that heat map again!
# + _cell_guid="74b72a12-7335-4780-ae3c-b3ec3cdd6e2a" _uuid="4d541a02-d5f1-48f2-b5b6-2e0505f2ae17"
sns.heatmap(train.isnull(),yticklabels=False,cbar=False,cmap='viridis')
# + [markdown] _cell_guid="0e9729d5-e3e9-4c37-87ed-c6bab92a80ae" _uuid="8163a025-02d5-4df6-83d1-b232613c866c"
# Great! Let's go ahead and drop the Cabin column and the row in Embarked that is NaN.
# + _cell_guid="40bf2f1b-76d3-4e33-8e96-aea6b13f7ba6" _uuid="2a5ae069-b203-4152-90d5-aa3b214f5935"
train.drop('Cabin',axis=1,inplace=True)
# + _cell_guid="ed680301-6512-4631-b9fe-8533402920b1" _uuid="3eb5a7f2-7a68-42a7-b210-73347fde0118"
train.head()
# + _cell_guid="304e4df6-e578-4010-833f-0f1f93c0ca7b" _uuid="205d9e89-5135-40a9-baab-d13c1036f196"
train.dropna(inplace=True)
# + [markdown] _cell_guid="c4488457-0050-41c9-85a0-8b44d37561bb" _uuid="5a776be5-763f-49b8-9cf8-337ec4d18ec6"
# ## Converting Categorical Features
# We'll need to convert categorical features to dummy variables using pandas! Otherwise our machine learning algorithm won't be able to directly take in those features as inputs.
# + _cell_guid="41462f98-6728-48d6-b6d8-b9d6e1da827b" _uuid="746d9c20-dbe6-4f3d-9dec-8d789e4e6c1b"
train.info()
# + _cell_guid="21c4dce6-8fa3-4c28-be1e-3178031629ad" _uuid="abc79025-8ad2-4fe2-804e-38517df0ac8a"
sex = pd.get_dummies(train['Sex'],drop_first=True)
embark = pd.get_dummies(train['Embarked'],drop_first=True)
# + _cell_guid="0f873b59-8484-446e-b5ee-4ef369deb427" _uuid="e8a00bfd-89e6-4ae2-b6f1-66d9776e4c5c"
train.drop(['Sex','Embarked','Name','Ticket'],axis=1,inplace=True)
# + _cell_guid="986df581-b6f7-4074-ac5f-c6fdb51f13e1" _uuid="701d86a7-2256-48e3-a865-80ce9af6420f"
train = pd.concat([train,sex,embark],axis=1)
# + _cell_guid="4f8d18d6-8170-4bb0-a98b-ee2fd2164e7a" _uuid="0930afba-ea1b-432f-ac55-38bf9454fbc6"
train.head()
# + [markdown] _cell_guid="e197124c-028e-45c7-841d-d9fb1427b0c4" _uuid="8e390930-df41-4441-98ec-43cc1004b300"
# ### Great! Our data is ready for our model!
#
# ## Building a Logistic Regression model
# Let's start by splitting our data into a training set and test set (there is another test.csv file that you can play around with in case you want to use all this data for training).
#
# ## Train Test Split
# + _cell_guid="8caf44d6-833f-4c29-a499-b8af9bbd9eb3" _uuid="f684ebdf-6983-4b97-8891-ea2aaf201f31"
from sklearn.model_selection import train_test_split
# + _cell_guid="9daa890e-9afe-472c-8550-4aa553ee8fd9" _uuid="b1167f0a-8737-4ea7-885f-58f075a04ab4"
X_train, X_test, y_train, y_test = train_test_split(train.drop('Survived',axis=1),
train['Survived'], test_size=0.20,
random_state=101)
# + [markdown] _cell_guid="b8c201a7-b13d-4b33-8028-0fbc43137461" _uuid="f8187397-d855-4ce3-a3a2-cf4e6faa0216"
# ## Training and Predicting
# + _cell_guid="b2c136ec-23ef-4c3e-a5f1-49d3af1bdd4f" _uuid="0d719fd4-cd9e-4f57-b208-099bad9b3fd7"
from sklearn.linear_model import LogisticRegression
# + _cell_guid="7a3f0260-c0c8-4ee0-8ddb-a7e3f040952b" _uuid="f1f2716c-a9be-4524-a1b7-719ab5df66b1"
logmodel = LogisticRegression()
logmodel.fit(X_train,y_train)
# + _cell_guid="edb32c5a-e564-44cc-b8c1-655a36bfba25" _uuid="55bdba18-3456-48fd-a9d1-a5584dade351"
predictions = logmodel.predict(X_test)
X_test.head()
# + _cell_guid="cace2897-8911-4476-9b65-76515944dd32" _uuid="53fb0999-8ec5-40dd-8289-0eb2029b0a8f"
predictions
# + [markdown] _cell_guid="e833a0ff-3e86-4144-aa06-7daf1addf340" _uuid="28d5ddd0-41ca-4bfe-818e-8d75837699c8"
# ## Evaluation
# + [markdown] _cell_guid="db6de7c9-5fe7-41bf-b2e8-aa8778e744df" _uuid="09b31e40-12c9-4d48-b165-e80e85453c7c"
# We can check precision,recall,f1-score using classification report!
# + _cell_guid="700b7898-3d56-4ee6-acc8-a9a8f78a8370" _uuid="795929d2-f5ae-492a-8739-65998cccdfbd"
from sklearn.metrics import classification_report,confusion_matrix
# + _cell_guid="113ddb4a-924d-459a-a5e0-97008adf7061" _uuid="1a1d84c0-778a-49d3-9c96-1b621be125b0"
print(confusion_matrix(y_test,predictions))
# + _cell_guid="aa125221-9354-49f9-9638-1c2849f9eaa7" _uuid="94743fe6-9829-4650-a6b2-aeeef91e0e8e"
print(classification_report(y_test,predictions))
# + [markdown] _cell_guid="bb6f3640-3016-44f7-8831-d4d4724559be" _uuid="260c4caf-9210-46d6-8ef6-62c8db08ee1e"
# # Decision Tree Classifiction
# + _cell_guid="1868ccd7-1079-45c4-8696-28f727323aae" _uuid="5cf3a6be-bc00-4ea1-a806-16cd9f1a2f06"
from sklearn.tree import DecisionTreeClassifier
# + _cell_guid="cce0b768-a291-460b-bf91-06e592ad43f8" _uuid="7b8c88d0-a90d-4e3e-9557-9ae34766fcda"
dt_model=DecisionTreeClassifier()
dt_model.fit(X_train,y_train)
# + _cell_guid="21ba5358-8cb6-4cc8-9d5a-3c06f2106330" _uuid="ed6a0db7-2f86-4e86-b2ff-17274f77396e"
dt_pred = dt_model.predict(X_test)
# + _cell_guid="be726d2d-b797-4cbc-a533-ff44b79a06f3" _uuid="7ae9ec8c-bfd6-4e9a-8007-efe92c90bec8"
print(confusion_matrix(y_test,dt_pred))
# + _cell_guid="8b46e811-5a6f-4464-82cb-f57fdb78faf9" _uuid="11634053-504f-4d75-853b-b5c97de6fabd"
print(classification_report(y_test,dt_pred))
# + [markdown] _cell_guid="0a591b53-7199-47ef-8182-9084a6285bb6" _uuid="0db9a6f5-935f-45e0-8ff2-3245e9594005"
# # Random Forest Classification
# + _cell_guid="70d6a3f5-894a-4f24-87b9-e2e1f1463fc9" _uuid="28b193b2-3cbf-413a-a432-4809f9ecbe40"
from sklearn.ensemble import RandomForestClassifier
# + _cell_guid="a12c4b2a-7d3c-41b0-8564-c1197e231dc2" _uuid="06091e72-a27b-40d7-971b-fa7b8ee6a8cd"
rf= RandomForestClassifier(n_estimators=5)
rf.fit(X_train,y_train)
# + _cell_guid="08bd74ef-be89-4dbd-bd25-c956262061ae" _uuid="1d2ed9ec-749e-4891-ac68-cdae97b398f7"
rf_pre=rf.predict(X_test)
# + _cell_guid="72031aef-f4fc-4a0d-a1d5-dc5d38ae52e4" _uuid="4f8f3848-eb38-42bf-a4db-632a243870e7"
print(confusion_matrix(y_test,rf_pre))
# + _cell_guid="3b00ce16-7d0d-49a9-9bfc-1d9d96257d2f" _uuid="d8b3e7b5-6aae-4a41-9634-c918206a3b60"
print(classification_report(y_test,rf_pre))
# + [markdown] _cell_guid="cfb3f0c1-067a-4c35-9971-1706d93ae827" _uuid="2ab3d7fc-78fe-489a-aabd-9445b49e8f66"
# Now we will use test dataset
# + _cell_guid="81c8bb37-0a9f-4bdb-94f3-7061f96d631c" _uuid="ce8b26eb-537f-4868-85f2-fe2cec2cc843"
test = pd.read_csv('../input/test.csv')
# + _cell_guid="8150466d-b4c5-461c-a415-8a25e2ffbe06" _uuid="661d71df-0275-4f60-afb6-9fbec3471b6d"
sns.heatmap(test.isnull())
# + _cell_guid="a202d402-7714-40e8-9954-09140cdc5a79" _uuid="37e82c95-fdee-4930-8cec-574f961a07de"
test.drop('Cabin',axis=1,inplace=True)
# + _cell_guid="e26f9907-ee39-401b-9c25-d87d7461fa3e" _uuid="d0b1546f-fdb2-49e6-a196-1e10578a748f"
test['Fare'].fillna(test['Fare'].mean(), inplace=True)
# + _cell_guid="a2e85572-34d5-45f3-a9ca-38f62fb584fc" _uuid="9d247a45-4259-4f60-90a5-b501dffbe082"
test.info()
# + _cell_guid="cefe16c8-42da-4046-9242-3b377af0fefe" _uuid="e2415ee2-effe-40d9-a7a9-6ce4f4e8ad15"
test.head()
# + _cell_guid="ee5211c5-79f9-48e3-9b2c-87975cd5c8c9" _uuid="3dbad7a2-65ca-43b9-bc76-2b444df0a1e2"
test['Age'] = test[['Age','Pclass']].apply(impute_age,axis=1)
# + _cell_guid="f1269896-08fd-4a32-a264-963c5538420f" _uuid="4681e0f4-d46f-4715-ab4c-849422f49b0b"
sex_test = pd.get_dummies(test['Sex'],drop_first=True)
embark_test= pd.get_dummies(test['Embarked'],drop_first=True)
# + _cell_guid="17efc58b-2c69-4b3b-b572-218c08aad4db" _uuid="65e55f4f-c493-4fe0-8227-481c6c43c3b6"
test.drop(['Sex','Embarked','Name','Ticket'],axis=1,inplace=True)
# + _cell_guid="374a300a-3bca-4eab-8833-05c3624f63f6" _uuid="27993b64-0e45-4e9e-b151-0de672a52606"
test = pd.concat([test,sex_test,embark_test],axis=1)
# + _cell_guid="6db8da1f-25d9-4b1b-b520-afb26c817f54" _uuid="c8eea306-ef91-4350-ae57-11b11d23d0ad"
test.head()
# + _cell_guid="31445cff-8885-4240-b4b0-722330644a96" _uuid="c58b717a-8f15-4ccd-ba61-116e6e7ebff7"
test_prediction = rf.predict(test)
# + _cell_guid="40cd24fd-6eda-4274-a1e2-15cfafb56d3d" _uuid="64ab6761-fe67-4384-b220-05a31cee6ac6"
test_prediction.shape
# + _cell_guid="4260a9b3-dbfa-4ea1-9aa6-665ddbc88bb0" _uuid="e0383c89-f9ae-47f2-9be3-974584c79fb5"
test_pred = pd.DataFrame(test_prediction, columns= ['Survived'])
# + _cell_guid="b7692e23-6c53-4b91-8cb1-4c9a37da7096" _uuid="41e0a5bc-840f-4525-b102-021e9142cd52"
new_test = pd.concat([test, test_pred], axis=1, join='inner')
# + _cell_guid="e40453af-78f6-4e85-88e7-aba4a78db05d" _uuid="f365aade-1212-43dc-8adf-01546d044cea"
new_test.head()
# + _cell_guid="e9137165-6f75-4cf1-9ace-2250defbe72a" _uuid="072fac73-faf1-46ce-b9c2-7aba41f661b2"
df= new_test[['PassengerId' ,'Survived']]
# + _cell_guid="3614e615-fa42-49fd-a632-028d954c7fad" _kg_hide-output=true _uuid="85f93489-01de-4ae2-b764-09108b4fdfe8"
df.to_csv('predictions.csv' , index=False)
# + [markdown] _cell_guid="01cd93a4-d8d1-4671-944e-0a9d08acfb60" _uuid="1f25d3d5-fb8b-4144-a193-33c27cff0a3f"
# ## If you like it, please vote.
# # Thank you :)
| titanic/titanic-classification-regression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] deletable=true editable=true
# # H2O.ai XGBoost GPU Benchmarks
# + [markdown] deletable=true editable=true
# In this notebook, we benchmark the latest version of [XGBoost](https://github.com/h2oai/xgboost), the well-known Kaggle-winning gradient boosting algorithm, and in particular, the [XGBoost GPU plugin](https://github.com/h2oai/xgboost/blob/master/plugin/updater_gpu/README.md). We also showcase the integration of XGBoost (incl. the GPU version) into H2O.
# + deletable=true editable=true
## For comparison between 1 GPU and 1 CPU, we use only 1 CPU:
#numactl -C 0 -N 0 -m 0 jupyter notebook
## This will ensure that we only use the first CPU on multi-CPU systems
# + [markdown] deletable=true editable=true
# 
# + deletable=true editable=true
## First time only: install xgboost and H2O, and restart the kernel afterwards
if False:
## Build XGBoost from source and install its Python module
import os
os.system("mkdir -p tmp && cd tmp && git clone https://github.com/h2oai/xgboost --recursive && cd xgboost && mkdir build && cd build && cmake .. -DPLUGIN_UPDATER_GPU=ON -DCUB_DIRECTORY=../cub -DCUDA_NVCC_FLAGS=\"--expt-extended-lambda -arch=sm_30\" && make -j; make; cd ../python-package && python3.6 setup.py install")
## Download and install H2O and its Python module
os.system("cd tmp && wget http://h2o-release.s3.amazonaws.com/h2o/rel-vajda/1/h2o-3.10.5.1.zip && unzip h2o-3.10.5.1.zip")
os.system("python3.6 -m pip install h2o-3.10.5.1/python/h2o-3.10.5.1-py2.py3-none-any.whl --upgrade")
## restart the kernel!
# + deletable=true editable=true
# %matplotlib inline
import xgboost as xgb
import pandas as pd
import numpy as np
import scipy as sp
import os
import time
from sklearn import metrics
# + deletable=true editable=true
path = "/opt/higgs_head_2M.csv"
if not os.path.exists(path):
os.system("cd /opt/ && wget https://s3.amazonaws.com/h2o-public-test-data/bigdata/laptop/higgs_head_2M.csv")
num_class = 2
num_round = 100
learn_rate = 0.02
max_depth = 10
## Parse data into a Pandas Frame
df = pd.read_csv(path, header=None)
# + deletable=true editable=true
df_target = df.iloc[:,0]
df.drop(df.iloc[:,0], axis=1, inplace=True)
cols = df.columns.values
df.shape
# + deletable=true editable=true
train = df
# + deletable=true editable=true
train_target = df_target
# + deletable=true editable=true
print(train.shape)
# + deletable=true editable=true
# !lscpu
# + deletable=true editable=true
# !cat /proc/meminfo | grep MemTotal
# + deletable=true editable=true
# !nvidia-smi -L
# + deletable=true editable=true
def runXGBoost(param):
have_updater = "updater" in param.keys()
label = "XGBoost " \
+ ("GPU hist" if have_updater and param["updater"]=="grow_gpu_hist" else "GPU exact" if have_updater and param["updater"]=="grow_gpu" else "CPU") \
+ " " + (param["tree_method"] if "updater" not in param.keys() else "")
print(label)
print("=====================")
for k, v in param.items():
print(k, v)
print("=====================")
t_start = time.time()
dtrain = xgb.DMatrix(train.values, label = train_target.values, feature_names=[str(c) for c in cols])
tt = time.time() - t_start
print("Time to create DMatrix (sec): ", tt)
dmatrix_times.append(tt)
t_start = time.time()
bst = xgb.train(param, dtrain, num_round)
tt = time.time() - t_start
print("Time to train (sec): ", tt)
train_times.append(tt)
t_start = time.time()
preds = bst.predict(dtrain)
tt = time.time() - t_start
print("Time to predict (sec): ", tt)
score_times.append(tt)
labels = dtrain.get_label()
auc = metrics.roc_auc_score(labels, preds)
print("Training AUC:", auc)
valid_aucs.append(auc)
plot_labels.append(label)
fs = bst.get_fscore()
# Optional: Uncomment to show variable importance
#varimp = pd.DataFrame({'Importance': list(fs.values()), 'Feature': list(fs.keys())})
#varimp.sort_values(by = 'Importance', inplace = True, ascending = False)
#varimp.head(10).plot(label='importance',kind="barh",x="Feature",y="Importance").invert_yaxis()
# + deletable=true editable=true
valid_aucs = []
dmatrix_times = []
train_times = []
score_times = []
plot_labels = []
# + deletable=true editable=true
param = {
"objective":('reg:logistic' if num_class>1 else 'reg:linear')
, "max_depth":max_depth
, "eta":learn_rate
, "tree_method":"exact"
, "subsample":0.7
, "colsample_bytree":0.9
, "min_child_weight":5
, "seed":12345
}
runXGBoost(param)
# + deletable=true editable=true
param = {
"objective":('reg:logistic' if num_class>1 else 'reg:linear')
, "max_depth":max_depth
, "eta":learn_rate
, "tree_method":"approx"
, "subsample":0.7
, "colsample_bytree":0.9
, "min_child_weight":5
, "seed":12345
}
runXGBoost(param)
# + deletable=true editable=true
param = {
"objective":('reg:logistic' if num_class>1 else 'reg:linear')
, "max_depth":max_depth
, "eta":learn_rate
, "tree_method":"hist"
, "subsample":0.7
, "colsample_bytree":0.9
, "min_child_weight":5
, "seed":12345
}
runXGBoost(param)
# + deletable=true editable=true
param = {
"objective":('reg:logistic' if num_class>1 else 'reg:linear')
, "max_depth":max_depth
, "eta":learn_rate
, "tree_method":"exact"
, "updater":"grow_gpu"
, "subsample":0.7
, "colsample_bytree":0.9
, "min_child_weight":5
, "seed":12345
}
runXGBoost(param)
# + deletable=true editable=true
param = {
"objective":('reg:logistic' if num_class>1 else 'reg:linear')
, "max_depth":max_depth
, "eta":learn_rate
, "tree_method":"exact"
, "updater":"grow_gpu_hist"
, "n_gpus":1
, "subsample":0.7
, "colsample_bytree":0.9
, "min_child_weight":5
, "seed":12345
}
runXGBoost(param)
# + deletable=true editable=true
data = pd.DataFrame({'algorithm' :plot_labels,
'dmatrix time':dmatrix_times,
'training time':train_times,
'scoring time':score_times,
'training AUC' :valid_aucs}).sort_values(by="training time")
data
# + deletable=true editable=true
data.plot(label="training time",kind='barh',x='algorithm',y='training time')
data.plot(title="training AUC",kind='barh',x='algorithm',y='training AUC',legend=False)
# + [markdown] deletable=true editable=true
# ## Now call XGBoost from H2O
# + deletable=true editable=true
import h2o
from h2o.estimators import H2OXGBoostEstimator
h2o.init()
t_start = time.time()
df_hex = h2o.import_file(path)
print("Time to parse by H2O (sec): ", time.time() - t_start)
trainhex = df_hex
trainhex[0] = (trainhex[0]).asfactor()
# + deletable=true editable=true
def runH2OXGBoost(param):
label = "H2O XGBoost " \
+ ("GPU" if "backend" in param.keys() and "gpu"==param["backend"] else "CPU") \
+ (" " + param["tree_method"] if "tree_method" in param.keys() else "")
print(label)
print("=====================")
for k, v in param.items():
print(k, v)
print("=====================")
t_start = time.time()
model = H2OXGBoostEstimator(**param)
model.train(x = list(range(1,trainhex.shape[1])), y = 0, training_frame = trainhex)
tt = time.time() - t_start
print("Time to train (sec): ", tt)
h2o_train_times.append(tt)
t_start = time.time()
preds = model.predict(trainhex)[:,2]
tt = time.time() - t_start
print("Time to predict (sec): ", tt)
h2o_score_times.append(tt)
preds = h2o.as_list(preds)
labels = train_target.values
auc = metrics.roc_auc_score(labels, preds)
print("Training AUC:", auc)
h2o_valid_aucs.append(auc)
h2o_plot_labels.append(label)
#pd.DataFrame(model.varimp(),columns=["Feature","","Importance",""]).head(10).plot(label='importance',kind="barh",x="Feature",y="Importance").invert_yaxis()
# + deletable=true editable=true
h2o_valid_aucs = []
h2o_train_times = []
h2o_score_times = []
h2o_plot_labels = []
# + deletable=true editable=true
param = {
"ntrees":num_round
, "max_depth":max_depth
, "eta":learn_rate
, "subsample":0.7
, "colsample_bytree":0.9
, "min_child_weight":5
, "seed":12345
, "score_tree_interval":num_round
, "backend":"cpu"
, "tree_method":"exact"
}
runH2OXGBoost(param)
# + deletable=true editable=true
param = {
"ntrees":num_round
, "max_depth":max_depth
, "eta":learn_rate
, "subsample":0.7
, "colsample_bytree":0.9
, "min_child_weight":5
, "seed":12345
, "score_tree_interval":num_round
, "backend":"cpu"
, "tree_method":"approx"
}
runH2OXGBoost(param)
# + deletable=true editable=true
param = {
"ntrees":num_round
, "max_depth":max_depth
, "eta":learn_rate
, "subsample":0.7
, "colsample_bytree":0.9
, "min_child_weight":5
, "seed":12345
, "score_tree_interval":num_round
, "backend":"cpu"
, "tree_method":"hist"
}
runH2OXGBoost(param)
# + deletable=true editable=true
param = {
"ntrees":num_round
, "max_depth":max_depth
, "learn_rate":learn_rate
, "sample_rate":0.7
, "col_sample_rate_per_tree":0.9
, "min_rows":5
, "seed":12345
, "score_tree_interval":num_round
, "backend":"gpu"
, "tree_method":"exact"
}
runH2OXGBoost(param)
# + deletable=true editable=true
param = {
"ntrees":num_round
, "max_depth":max_depth
, "learn_rate":learn_rate
, "sample_rate":0.7
, "col_sample_rate_per_tree":0.9
, "min_rows":5
, "seed":12345
, "score_tree_interval":num_round
, "backend":"gpu"
, "tree_method":"hist"
}
runH2OXGBoost(param)
# + [markdown] deletable=true editable=true
# ## H2O GBM (CPU)
# + deletable=true editable=true
from h2o.estimators.gbm import H2OGradientBoostingEstimator
param = {
"ntrees":num_round
, "max_depth":max_depth
, "learn_rate":learn_rate
, "sample_rate":0.7
, "col_sample_rate_per_tree":0.9
, "min_rows":5
, "seed":12345
, "score_tree_interval":num_round
}
t_start = time.time()
model = H2OGradientBoostingEstimator(**param)
model.train(x = list(range(1,trainhex.shape[1])), y = 0, training_frame = trainhex)
tt = time.time() - t_start
print("Time to train (sec): ", tt)
h2o_train_times.append(tt)
t_start = time.time()
preds = model.predict(trainhex)[:,2]
tt = time.time() - t_start
print("Time to predict (sec): ", tt)
h2o_score_times.append(tt)
preds = h2o.as_list(preds)
labels = train_target.values
auc = metrics.roc_auc_score(labels, preds)
print("AUC:", auc)
h2o_valid_aucs.append(auc)
h2o_plot_labels.append("H2O GBM CPU")
# + deletable=true editable=true
data = pd.DataFrame({'algorithm' :h2o_plot_labels,
'training time':h2o_train_times,
'scoring time':h2o_score_times,
'training AUC' :h2o_valid_aucs}).sort_values(by="training time")
data
# + deletable=true editable=true
data.plot(label="DMatrix + training time",kind='barh',x='algorithm',y='training time')
data.plot(title="training AUC",kind='barh',x='algorithm',y='training AUC',legend=False)
# + [markdown] deletable=true editable=true
# ### Summary: Fastest GPU algorithm (XGBoost histogram) takes 5s, fastest CPU algorithm (H2O) takes 50s
#
# ##### Note: H2O's XGBoost integration has some internal overhead still (DMatrix creation is single-threaded, and some parameters have different default values, hence the slightly slower training speed and slightly higher training accuracy) - this doesn't affect the summary conclusion
| xgboost/GPUXGBoost.ipynb |
# ##### Copyright 2021 Google LLC.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# # coins3
# <table align="left">
# <td>
# <a href="https://colab.research.google.com/github/google/or-tools/blob/master/examples/notebook/contrib/coins3.ipynb"><img src="https://raw.githubusercontent.com/google/or-tools/master/tools/colab_32px.png"/>Run in Google Colab</a>
# </td>
# <td>
# <a href="https://github.com/google/or-tools/blob/master/examples/contrib/coins3.py"><img src="https://raw.githubusercontent.com/google/or-tools/master/tools/github_32px.png"/>View source on GitHub</a>
# </td>
# </table>
# First, you must install [ortools](https://pypi.org/project/ortools/) package in this colab.
# !pip install ortools
# +
# Copyright 2010 <NAME> <EMAIL>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Coin application in Google CP Solver.
From 'Constraint Logic Programming using ECLiPSe'
pages 99f and 234 ff.
The solution in ECLiPSe is at page 236.
'''
What is the minimum number of coins that allows one to pay _exactly_
any amount smaller than one Euro? Recall that there are six different
euro cents, of denomination 1, 2, 5, 10, 20, 50
'''
Compare with the following models:
* MiniZinc: http://hakank.org/minizinc/coins3.mzn
* Comet : http://www.hakank.org/comet/coins3.co
* Gecode : http://hakank.org/gecode/coins3.cpp
* SICStus : http://hakank.org/sicstus/coins3.pl
This model was created by <NAME> (<EMAIL>)
Also see my other Google CP Solver models:
http://www.hakank.org/google_or_tools/
"""
import sys
from ortools.constraint_solver import pywrapcp
# Create the solver.
solver = pywrapcp.Solver("Coins")
#
# data
#
n = 6 # number of different coins
variables = [1, 2, 5, 10, 25, 50]
# declare variables
x = [solver.IntVar(0, 99, "x%i" % i) for i in range(n)]
num_coins = solver.IntVar(0, 99, "num_coins")
#
# constraints
#
# number of used coins, to be minimized
solver.Add(num_coins == solver.Sum(x))
# Check that all changes from 1 to 99 can be made.
for j in range(1, 100):
tmp = [solver.IntVar(0, 99, "b%i" % i) for i in range(n)]
solver.Add(solver.ScalProd(tmp, variables) == j)
[solver.Add(tmp[i] <= x[i]) for i in range(n)]
# objective
objective = solver.Minimize(num_coins, 1)
#
# solution and search
#
solution = solver.Assignment()
solution.Add(x)
solution.Add(num_coins)
solution.AddObjective(num_coins)
db = solver.Phase(x, solver.CHOOSE_MIN_SIZE_LOWEST_MAX,
solver.ASSIGN_MIN_VALUE)
solver.NewSearch(db, [objective])
num_solutions = 0
while solver.NextSolution():
print("x: ", [x[i].Value() for i in range(n)])
print("num_coins:", num_coins.Value())
print()
num_solutions += 1
solver.EndSearch()
print()
print("num_solutions:", num_solutions)
print("failures:", solver.Failures())
print("branches:", solver.Branches())
print("WallTime:", solver.WallTime())
| examples/notebook/contrib/coins3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="UYZ9oJ9b1xZ7"
# ## Prática Guiada Requests e Beautiful Soup
# -
# # Exemplo IMDB
#
# **Raspando dados para mais de 2000 filmes**
#
#
# Queremos analisar as distribuições das classificações de filmes do IMDB e do Metacritic para ver se encontramos algo interessante. Para fazer isso, primeiro vamos coletar dados para mais de 2000 filmes.
#
# É essencial identificar o objetivo da nossa raspagem desde o início. Escrever um script de raspagem pode levar muito tempo, especialmente se quisermos raspar mais de uma página da web. Queremos evitar passar horas escrevendo um script que raspe dados que realmente não precisaremos.
#
# **Trabalhando em quais páginas raspar**
#
#
# Uma vez que estabelecemos nosso objetivo, precisamos identificar um conjunto eficiente de páginas para raspar.
#
# Queremos encontrar uma combinação de páginas que requer um número relativamente pequeno de solicitações. Uma solicitação é o que acontece sempre que acessamos uma página da web. Nós "solicitamos" o conteúdo de uma página do servidor. Quanto mais pedidos fizermos, mais tempo o script precisará executar e maior será a sobrecarga do servidor.
#
# Uma forma de obter todos os dados de que precisamos é compilar uma lista de nomes de filmes e usá-los para acessar a página da web de cada filme nos sites do IMDB e do Metacritic.
# <br>
# <img src='https://www.dataquest.io/blog/content/images/2017/12/option1.gif'>
# <br>
# Como queremos obter mais de 2.000 classificações do IMDB e do Metacritic, teremos que fazer pelo menos 4.000 solicitações. Se fizermos uma solicitação por segundo, nosso script precisará de um pouco mais de uma hora para fazer 4000 solicitações. Por isso, vale a pena tentar identificar maneiras mais eficientes de obter nossos dados.
#
# Se explorarmos o site do IMDB, podemos descobrir uma maneira de reduzir o número de solicitações pela metade. As pontuações do Metacritic são mostradas na página do filme do IMDB, por isso podemos recapitular ambas as classificações com um único pedido:
#
#
# <br>
# <img src='https://www.dataquest.io/blog/content/images/option2.jpg'>
# <br>
# <br>
# Se investigarmos o site do IMDB, poderemos descobrir a página mostrada abaixo. Ele contém todos os dados que precisamos para 50 filmes. Dado o nosso objetivo, isso significa que só teremos que fazer 40 pedidos, o que é 100 vezes menor que a nossa primeira opção. Vamos explorar mais esta última opção.
#
# <img src='https://www.dataquest.io/blog/content/images/option3.jpg'>
# <br>
# <br>
# **Identificando a estrutura do URL**
# <br>
# <br>
# Nosso desafio agora é ter certeza de que entendemos a lógica do URL como as páginas que queremos para alterar a mudança. Se não conseguirmos entender essa lógica o suficiente para podermos implementá-la em código, chegaremos a um beco sem saída.
# <br>
# Se você for na página de pesquisa avançada do IMDB , você pode procurar filmes por ano:
# <br>
#
# <img src='https://www.dataquest.io/blog/content/images/advanced_search.png'>
#
# Vamos navegar pelo ano de 2017, ordenar os filmes na primeira página por número de votos, depois mudar para a próxima página. Chegaremos a esta página da web , que tem este URL:
#
#
# <br>
#
# <img src='https://www.dataquest.io/blog/content/images/url.png'>
# <br>
# <br>
#
# Na imagem acima, você pode ver que o URL tem vários parâmetros após o ponto de interrogação:
#
# * `release_date` - Mostra apenas os filmes lançados em um ano específico.
# * `sort` - Classifica os filmes na página. sort=num_votes,desctraduz para classificar por número de votos em ordem decrescente .
# * `page` - Especifica o número da página.
# * `ref_` - Leva-nos para a próxima página ou a anterior. A referência é a página em que estamos atualmente. adv_nxte adv_prvsão dois valores possíveis. Eles traduzem para avançar para a próxima página e avançar para a página anterior , respectivamente.
#
# <br>
# <br>
# <br>
#
# Se você navegar por essas páginas e observar o URL, perceberá que apenas os valores dos parâmetros mudam. Isso significa que podemos escrever um script para corresponder à lógica das alterações e fazer muito menos solicitações para coletar nossos dados.
# <br>
# <br>
#
# Vamos começar a escrever o script, solicitando o conteúdo desta página web único: http://www.imdb.com/search/title?release_date=2017&sort=num_votes,desc&page=1. Na seguinte célula de código nós iremos:
#
# Importe a get()função do requestsmódulo.
# Atribua o endereço da página da web a uma variável nomeada url.
# Solicite ao servidor o conteúdo da página da Web usando get()e armazene a resposta do servidor na variável response.
# Imprima uma pequena parte do responseconteúdo acessando seu .textatributo ( responseagora é um Responseobjeto).
# +
from requests import get
url = 'http://www.imdb.com/search/title?release_date=2017&sort=num_votes,desc&page=1'
response = get(url)
print(response.text[:500])
# -
# # Entendendo a estrutura HTML de uma única página
#
# <br>
# <img src='https://cdn-images-1.medium.com/max/1000/0*ETFzXPCNHkPpqNv_.png'>
# <br>
# Como você pode ver na primeira linha response.text, o servidor nos enviou um documento HTML. Este documento descreve a estrutura geral dessa página da Web, juntamente com seu conteúdo específico (que é o que torna essa página específica única).
#
# Todas as páginas que queremos copiar têm a mesma estrutura geral. Isso implica que eles também têm a mesma estrutura HTML geral. Então, para escrever nosso script, será suficiente entender a estrutura HTML de apenas uma página. Para fazer isso, usaremos as ferramentas de desenvolvedor do navegador .
#
# Se você usa o Chrome , clique com o botão direito do mouse em um elemento da página da web que lhe interessa e clique em Inspecionar . Isso levará você diretamente para a linha HTML que corresponde a esse elemento:
#
# <br>
# <img src='https://www.dataquest.io/blog/content/images/inspecthtml.png'>
# <br>
#
# Clique com o botão direito do mouse no nome do filme e, em seguida, clique com o botão esquerdo do mouse em Inspecionar . A linha HTML destacada em cinza corresponde ao que o usuário vê na página da Web como o nome do filme.
#
# Você também pode fazer isso usando o Firefox e o Safari DevTools.
#
# Observe que todas as informações de cada filme, incluindo o pôster, estão contidas em uma divtag.
#
# <br>
# <img src='https://www.dataquest.io/blog/content/images/one_container.jpg'>
# <br>
#
# Existem muitas linhas HTML aninhadas em cada divtag. Você pode explorá-las clicando nas pequenas setas cinzas à esquerda das linhas HTML correspondentes a cada uma delas div. Dentro dessas tags aninhadas, encontraremos as informações de que precisamos, como a classificação de um filme.
#
#
# <br>
# <img src='https://www.dataquest.io/blog/content/images/movie_rating.jpg'>
# <br>
#
# Há 50 filmes exibidos por página, portanto, deve haver um divcontêiner para cada um. Vamos extrair todos esses 50 contêineres analisando o documento HTML de nossa solicitação anterior.
#
# # Usando BeautifulSoup para analisar o conteúdo HTML
# Para analisar nosso documento HTML e extrair os 50 divcontêineres, usaremos um módulo Python chamado BeautifulSoup , o módulo de raspagem da Web mais comum para Python.
#
# Na seguinte célula de código nós iremos:
#
# - Importe o `BeautifulSoupcriador` da classe do pacote bs4.
# - Analise `response.text` criando um `BeautifulSoup` objeto e atribua esse objeto a `html_soup`. O `html.parser` argumento indica que queremos fazer a análise usando o [analisador HTML do Python](https://www.crummy.com/software/BeautifulSoup/bs4/doc/#specifying-the-parser-to-use).
#
#
response
# +
from bs4 import BeautifulSoup
html_soup = BeautifulSoup(response.text, 'html.parser')
html_soup
# -
# Antes de extrair os 50 contêineres, precisamos descobrir o que os diferencia de outros divelementos nessa página. Muitas vezes, a marca distintiva reside no class atributo . Se você inspecionar as linhas HTML dos contêineres de interesse, notará que o classatributo tem dois valores: lister-iteme mode-advanced. Essa combinação é exclusiva desses divcontêineres. Podemos ver que isso é verdade fazendo uma pesquisa rápida ( Ctrl + F). Temos 50 desses contêineres, então esperamos ver apenas 50 correspondências:
#
#
# <br>
# <img src='https://www.dataquest.io/blog/content/images/search.jpg'>
# <br>
#
# Agora vamos usar o find_all() método para extrair todos os divcontainers que possuem um classatributo lister-item mode-advanced:
#
#
movie_containers = html_soup.find_all('div', class_ = 'lister-item mode-advanced')
print(type(movie_containers))
print(len(movie_containers))
# +
# for movie in movie_containers:
# print(movie)
# break
# -
# O `find_all()` retornou um `ResultSet` objeto que é uma lista contendo todos os 50 divs que nos interessam.
#
# Agora vamos selecionar apenas o primeiro container e extrair, por sua vez, cada item de interesse:
#
# - O nome do filme
# - O ano do lançamento.
# - A classificação do IMDB.
# - O Metascore.
# - O número de votos.
#
# <br>
# <img src='https://www.dataquest.io/blog/content/images/datapoints.jpg'>
# <br>
#
#
# # Extraindo os dados para um único filme
# Podemos acessar o primeiro contêiner, que contém informações sobre um único filme, usando notação de lista em movie_containers.
#
#
first_movie = movie_containers[0]
first_movie
# Como você pode ver, o conteúdo HTML de um contêiner é muito longo. Para descobrir a linha HTML específica para cada ponto de dados, usaremos o DevTools novamente.
#
#
# # O nome do filme
#
# Começamos com o nome do filme e localizamos sua linha HTML correspondente usando o DevTools. Você pode ver que o nome está contido em uma tag de âncora ( a). Esta tag é aninhada dentro de uma tag de cabeçalho (h3). A h3 tag está aninhada em uma div tag. Este div é o terceiro do divsaninhado no container do primeiro filme. Nós armazenamos o conteúdo desse contêiner na first_movie variável.
#
# <br>
# <img src='https://www.dataquest.io/blog/content/images/movie_name.jpg'>
# <br>
#
# first_movieé um Tag objeto , e as várias tags HTML dentro dele são armazenadas como seus atributos. Podemos acessá-los como se tivéssemos acesso a qualquer atributo de um objeto Python. No entanto, usar um nome de tag como um atributo selecionará apenas a primeira tag com esse nome. Se corrermos first_movie.div, só obtemos o conteúdo da primeira divtag:
#
first_movie.div
# Acessar a primeira tag âncora (**a**) não nos leva ao nome do filme. O primeiro **a** está em algum lugar dentro do segundo div:
first_movie.a
# No entanto, acessar a primeira **h3** tag nos aproxima muito:
first_movie.h3
# A partir daqui, podemos usar a notação de atributo para acessar o primeiro **a** dentro da **h3** tag:
first_movie.h3.a
# Agora tudo é apenas uma questão de acessar o texto de dentro dessa **a** :
first_name = first_movie.h3.a.text
first_name
# # O ano do lançamento do filme
# Continuamos com a extração do ano. Esses dados são armazenados na **span** abaixo do **a** que contém o nome.
#
# <br>
# <img src='https://www.dataquest.io/blog/content/images/year_name.png'>
# <br>
#
# A notação de pontos só acessará o primeiro spanelemento. Vamos procurar pela marca distintiva do segundo **span**. Nós vamos usar o `find()` método que é quase o mesmo que `find_all()`, exceto que só retorna a primeira partida. Na verdade, `find()` é equivalente a `find_all(limit = 1)`. O limit argumento limita a saída para a primeira correspondência.
#
# A marca distintiva consiste nos valores lister-item-year text-muted unboldatribuídos ao classatributo. Então procuramos o primeiro **span** com esses valores dentro da **h3**:
first_year = first_movie.h3.find('span', class_ = 'lister-item-year text-muted unbold')
first_year
# A partir daqui, basta acessar o texto usando a notação de atributo:
first_year = first_year.text
first_year
# Poderíamos facilmente limpar essa saída e convertê-la em um inteiro. Mas, se você explorar mais páginas, perceberá que, para alguns filmes, o ano aceita valores imprevisíveis como (2017) (I) ou (2015) (V). É mais eficiente fazer a limpeza depois da raspagem, quando saberemos todos os valores do ano.
#
# # A classificação do IMDB
# Agora nos concentramos em extrair a classificação do IMDB do primeiro filme.
#
# Existem algumas maneiras de fazer isso, mas primeiro tentaremos a mais fácil. Se você inspecionar a classificação do IMDB usando DevTools, perceberá que a classificação está contida em uma **strong**.
#
# <br>
# <img src='https://www.dataquest.io/blog/content/images/imdb_rating.png'>
# <br>
#
#
# Vamos usar a notação de atributo e esperar que o primeiro **strong** também seja o que contenha a classificação.
#
#
first_movie.strong
# Ótimo! Vamos acessar o texto, convertê-lo para o `float`tipo e atribuí-lo à variável `first_imdb`:
first_imdb = float(first_movie.strong.text)
first_imdb
# # O Metascore
# Se nós inspecionarmos o Metascore usando DevTools, notamos que podemos encontrá-lo dentro de uma **span**.
#
# <br>
# <img src='https://www.dataquest.io/blog/content/images/metascore.png'>
# <br>
#
# A notação de atributos claramente não é uma solução. Existem muitas **span** antes disso. Você pode ver um logo acima da **strong**. É melhor usarmos os valores distintivos do classatributo ( metascore favorable).
#
# Observe que, se você copiar e colar esses valores da guia DevTools, haverá dois caracteres de espaço em branco entre metascoree favorable. Certifique-se de que haverá apenas um caractere de espaço em branco quando você passar os valores como argumentos. Caso contrário, `find()` não encontrará nada.
#
#
# +
first_mscore = first_movie.find('span', class_ = 'metascore favorable')
first_mscore = int(first_mscore.text)
print(first_mscore)
# -
# O favorablevalor indica um alto Metascore e define a cor de fundo da classificação como verde. Os outros dois valores possíveis são unfavorablee mixed. O que é específico para todas as classificações do Metascore é apenas o metascorevalor. Este é o que vamos usar quando vamos escrever o script para a página inteira.
#
# # O número de votos
# O número de votos está contido em uma <span>tag. Sua marca distintiva é um nameatributo com o valor nv.
#
# <br>
# <img src='https://www.dataquest.io/blog/content/images/nr_votes.png'>
# <br>
#
#
# O name atributo é diferente do class atributo. Usando BeautifulSoup podemos acessar elementos por qualquer atributo. As funções `find()` e `find_all()` têm um parâmetro chamado `attrs`. Para isso, podemos passar os atributos e valores que procuramos como dicionário:
first_votes = first_movie.find('span', attrs = {'name':'nv'})
first_votes.text
# Poderíamos usar a .text notação para acessar o <span>conteúdo da tag. Seria melhor se nós acessássemos o valor do data-valueatributo. Desta forma, podemos converter o ponto de dados extraído para um intsem ter que retirar uma vírgula.
#
# Você pode tratar um Tagobjeto como um dicionário. Os atributos HTML são as chaves do dicionário. Os valores dos atributos HTML são os valores das chaves do dicionário. É assim que podemos acessar o valor do data-valueatributo:
first_votes['data-value']
# Vamos converter esse valor para um inteiro e atribuí-lo a first_votes:
first_votes = int(first_votes['data-value'])
# É isso aí! Estamos agora em posição de escrever facilmente um script para criar uma única página.
#
# O script para uma única página
# Antes de juntar o que fizemos até agora, temos que ter certeza de que extrairemos os dados apenas dos contêineres que possuem um Metascore.
#
# <br>
# <img src='https://www.dataquest.io/blog/content/images/no_mscores.jpg'>
# <br>
#
# Precisamos adicionar uma condição para ignorar filmes sem um Metascore.
#
# Usando DevTools novamente, vemos que a seção Metascore está contida em uma **div**. O class atributo tem dois valores: inline-blocke ratings-metascore. O distintivo é claro ratings-metascore.
#
# <br>
# <img src='https://www.dataquest.io/blog/content/images/metascore_yes.png'>
# <br>
#
#
# Podemos usar find() para pesquisar cada contêiner de filme por divter essa marca distinta. Quando find() não encontra nada, retorna um Noneobjeto. Podemos usar esse resultado em uma ifinstrução para controlar se um filme é copiado.
#
# Vamos procurar na [página da web](https://www.imdb.com/search/title?release_date=2017&sort=num_votes,desc&page=1) para procurar por um contêiner de filme que não tenha um Metascore e ver o que find() retorna.
#
# Importante: quando eu corri o seguinte código, o oitavo contêiner não tinha um Metascore. No entanto, este é um alvo em movimento, porque o número de votos muda constantemente para cada filme. Para obter as mesmas saídas que na próxima célula de código demonstrativo, você deve pesquisar um contêiner que não tenha um Metascore no momento em que estiver executando o código.
movie22_mscore = movie_containers[22].find('div', class_ = 'ratings-metascore')
type(movie22_mscore)
movie22_mscore
# Agora vamos montar o código acima e comprimir o máximo possível, mas apenas na medida em que ele ainda é facilmente legível. No próximo bloco de código nós:
#
# - Declare algumas listas de variáveis para ter algo para armazenar os dados extraídos.
# - Faça um loop por cada contêiner movie_containers(a variável que contém todos os 50 contêineres de filme).
# - Extraia os pontos de dados de interesse somente se o contêiner tiver um Metascore.
# +
# Lists to store the scraped data in
names = []
years = []
imdb_ratings = []
metascores = []
votes = []
# Extract data from individual movie container
for container in movie_containers:
# If the movie has Metascore, then extract:
if container.find('div', class_ = 'ratings-metascore') is not None:
# The name
name = container.h3.a.text
names.append(name)
# The year
year = container.h3.find('span', class_ = 'lister-item-year').text
years.append(year)
# The IMDB rating
imdb = float(container.strong.text)
imdb_ratings.append(imdb)
# The Metascore
m_score = container.find('span', class_ = 'metascore').text
metascores.append(int(m_score))
# The number of votes
vote = container.find('span', attrs = {'name':'nv'})['data-value']
votes.append(int(vote))
# -
len(votes)
# Vamos verificar os dados coletados até o momento. Os pandas facilitam para nós ver se coletamos nossos dados com sucesso.
# +
import pandas as pd
test_df = pd.DataFrame({'movie': names,
'year': years,
'imdb': imdb_ratings,
'metascore': metascores,
'votes': votes})
test_df
# -
# Tudo correu como esperado!
#
# Como observação, se você executar o código de um país onde o inglês não é o idioma principal, é muito provável que você obtenha alguns dos nomes dos filmes traduzidos para o idioma principal desse país.
#
# Provavelmente, isso acontece porque o servidor infere sua localização do seu endereço IP. Mesmo se você estiver em um país onde o inglês é o idioma principal, talvez você ainda receba conteúdo traduzido. Isso pode acontecer se você estiver usando uma VPN enquanto faz as GETsolicitações.
#
# Se você encontrar esse problema, passe os seguintes valores para o headers parâmetro da função get():
#
#
# # O script para várias páginas
# Raspagem de várias páginas é um pouco mais desafiador. Nós vamos construir sobre o nosso script de uma página, fazendo mais três coisas:
#
# Fazendo todos os pedidos que queremos dentro do loop.
# Controlando a taxa do loop para evitar bombardear o servidor com solicitações.
# Monitorando o loop enquanto ele é executado.
# Nós vamos raspar as primeiras 4 páginas de cada ano no intervalo de 2000-2017. 4 páginas para cada um dos 18 anos perfazem um total de 72 páginas. Cada página tem 50 filmes, por isso vamos buscar dados para 3600 filmes no máximo. Mas nem todos os filmes têm um Metascore, então o número será menor do que isso. Mesmo assim, ainda estamos muito propensos a obter dados para mais de 2000 filmes.
#
# # Alterando os parâmetros da URL
# Como mostrado anteriormente, as URLs seguem uma certa lógica conforme as páginas da web mudam.
#
# <br>
# <img src='https://www.dataquest.io/blog/content/images/url.png'>
# <br>
#
# Como estamos fazendo as solicitações, precisaremos apenas variar os valores de apenas dois parâmetros da URL: o release_dateparâmetro e page. Vamos preparar os valores que precisaremos para o próximo ciclo. Na próxima célula de código nós iremos:
#
# - Crie uma lista chamada pages e preencha-a com as sequências correspondentes às primeiras 4 páginas.
# - Crie uma lista chamada years_url e preencha-a com as strings correspondentes aos anos 2000-2017.
pages = [str(i) for i in range(1,5)]
pages
years_url = [str(i) for i in range(2015,2018)]
years_url
# # Controlando a crawl-rate
#
# Controlar a taxa de rastreamento é benéfico para nós e para o site que estamos raspando. Se evitarmos martelar o servidor com dezenas de solicitações por segundo, é muito menos provável que nosso endereço IP seja banido. Também evitamos interromper a atividade do site que criamos, permitindo que o servidor responda também às solicitações de outros usuários.
#
# Controlaremos a taxa do loop usando a sleep() função do time módulo do Python. sleep() fará uma pausa na execução do loop por um período especificado de segundos.
#
# Para imitar o comportamento humano, vamos variar a quantidade de tempo de espera entre as solicitações usando a randint() função do random módulo do Python . randint()aleatoriamente gera inteiros dentro de um intervalo especificado.
#
# <br>
# <img src='https://www.dataquest.io/blog/content/images/2017/12/sleep_new.gif'>
# <br>
#
# Por enquanto, vamos apenas importar essas duas funções para evitar superlotação na célula de código que contém nosso loop principal.
from time import sleep
from random import randint
# # Monitorando o loop como ele ainda está indo
# Dado que estamos raspando 72 páginas, seria bom se pudéssemos encontrar uma maneira de monitorar o processo de raspagem como ele ainda está indo. Esse recurso é definitivamente opcional, mas pode ser muito útil no processo de teste e depuração. Além disso, quanto maior o número de páginas, mais útil será o monitoramento. Se você for raspar centenas ou milhares de páginas da web em uma única execução de código, eu diria que esse recurso se torna uma obrigação.
#
# Para nosso script, usaremos esse recurso e monitoraremos os seguintes parâmetros:
#
# - A frequência (velocidade) das solicitações , portanto, garantimos que nosso programa não sobrecarregue o servidor.
# - O número de pedidos , para que possamos interromper o loop caso o número de solicitações esperadas seja excedido.
# - O código de status de nossas solicitações, portanto, garantimos que o servidor esteja enviando as respostas adequadas.
#
# Para obter um valor de frequência, dividimos o número de solicitações pelo tempo decorrido desde a primeira solicitação. Isso é semelhante ao cálculo da velocidade de um carro - dividimos a distância pelo tempo gasto para cobrir essa distância. Vamos experimentar primeiro essa técnica de monitoramento em pequena escala. Na seguinte célula de código nós iremos:
#
# 1. Defina uma hora de início usando a time() função do time módulo e atribua o valor a start_time.
# 2. Atribua 0 à variável requestsque usaremos para contar o número de solicitações.
# 3. Inicie um loop e, em seguida, a cada iteração:
# 4. Simule um pedido.
# 5. Incrementar o número de solicitações por 1.
# 6. Pause o loop por um intervalo de tempo entre 8 e 15 segundos.
# 7. Calcule o tempo decorrido desde a primeira solicitação e atribua o valor a elapsed_time.
# 8. Imprima o número de pedidos e a frequência.
# +
from time import time
start_time = time()
requests = 0
for _ in range(5):
# A request would go here
requests += 1
sleep(randint(1,3))
elapsed_time = time() - start_time
print('Request: {}; Frequency: {} requests/s'.format(requests, requests/elapsed_time))
# -
# Como vamos fazer 72 requests, nosso trabalho parecerá um pouco desordenado à medida que a saída se acumular. Para evitar isso, limparemos a saída após cada iteração e a substituiremos por informações sobre a solicitação mais recente. Para fazer isso, usaremos a clear_output()função do core.display módulo do IPython . Vamos definir o parâmetro wait de clear_output() para True esperar com a substituição da saída atual até que apareça alguma nova saída.
# +
from IPython.core.display import clear_output
start_time = time()
requests = 0
for _ in range(5):
# A request would go here
requests += 1
sleep(randint(1,3))
current_time = time()
elapsed_time = current_time - start_time
print('Request: {}; Frequency: {} requests/s'.format(requests, requests/elapsed_time))
clear_output(wait = True)
# -
# Para monitorar o código de status, definiremos o programa para nos avisar se algo estiver desligado. Uma solicitação bem-sucedida é indicada por um código de status de 200. Usaremos a warn() função do warnings módulo para lançar um aviso se o código de status não for 200.
#
#
# +
from warnings import warn
warn("Warning Simulation")
# -
# Escolhemos um aviso sobre a quebra do loop, porque há uma boa possibilidade de obter dados suficientes, mesmo que algumas das solicitações falhem. Vamos apenas quebrar o loop se o número de solicitações for maior que o esperado.
#
# # Juntando tudo
# Agora vamos juntar tudo o que fizemos até agora! Na célula de código a seguir, começamos por:
#
# - Redeclarando as variáveis de listas para que elas se tornem vazias novamente.
# - Preparando o monitoramento do loop.
#
# **Então nós vamos:**
#
# Percorra a years_urllista para variar o release_dateparâmetro do URL.
# Para cada elemento years_url, percorra a pageslista para variar o pageparâmetro da URL.
# Faça os GETpedidos dentro do pagesloop.
# Pause o loop por um intervalo de tempo entre 8 e 15 segundos.
# Monitore cada solicitação como discutido anteriormente.
# Lance um aviso para códigos de status não-200.
# Quebre o loop se o número de solicitações for maior que o esperado.
# Converta o responseconteúdo HTML de um BeautifulSoupobjeto.
# Extraia todos os contêineres de filmes desse BeautifulSoupobjeto.
# Faça um loop por todos esses contêineres.
# Extraia os dados se um contêiner tiver um Metascore.
# +
# Redeclaring the lists to store data in
names = []
years = []
imdb_ratings = []
metascores = []
votes = []
# Preparing the monitoring of the loop
start_time = time()
requests = 0
# For every year in the interval 2015-2017
for year_url in years_url:
# For every page in the interval 1-4
for page in pages:
# Make a get request
response = get('http://www.imdb.com/search/title?release_date=' \
+ str(year_url) +
'&sort=num_votes,desc&page=' + str(page))
# Pause the loop
sleep(randint(1,4))
# Monitor the requests
requests += 1
elapsed_time = time() - start_time
print('Request:{}; Frequency: {} requests/s'.format(requests, requests/elapsed_time))
clear_output(wait = True)
# Throw a warning for non-200 status codes
if response.status_code != 200:
warn('Request: {}; Status code: {}'.format(requests, response.status_code))
# Break the loop if the number of requests is greater than expected
if requests > 72:
warn('Number of requests was greater than expected.')
break
# Parse the content of the request with BeautifulSoup
page_html = BeautifulSoup(response.text, 'html.parser')
# Select all the 50 movie containers from a single page
mv_containers = page_html.find_all('div', class_ = 'lister-item mode-advanced')
# For every movie of these 50
for container in mv_containers:
# If the movie has a Metascore, then:
if container.find('div', class_ = 'ratings-metascore') is not None:
# Scrape the name
name = container.h3.a.text
names.append(name)
# Scrape the year
year = container.h3.find('span', class_ = 'lister-item-year').text
years.append(year)
# Scrape the IMDB rating
imdb = float(container.strong.text)
imdb_ratings.append(imdb)
# Scrape the Metascore
m_score = container.find('span', class_ = 'metascore').text
metascores.append(int(m_score))
# Scrape the number of votes
vote = container.find('span', attrs = {'name':'nv'})['data-value']
votes.append(int(vote))
# -
# # Examinando os dados raspados
# No próximo bloco de código nós:
#
# Mesclar os dados em um pandas DataFrame.
# Imprima algumas informações sobre o recém criado DataFrame.
# Mostre as 10 primeiras entradas.
# +
movie_ratings = pd.DataFrame({'movie': names,
'year': years,
'imdb': imdb_ratings,
'metascore': metascores,
'votes': votes})
movie_ratings.head(10)
# -
movie_ratings.shape
# # Exemplo 2
#
# Utilizando bs4 para pegar dados do campeonato Argentino.
# + [markdown] colab_type="text" id="9lv5ptoP1xZ-"
# Nesta Prática Guiada usaremos BeautifulSoup para baixar a informação da primeira divisão de futebol da Argentina da página da ESPN:
#
# http://www.espn.com.ar/futbol/posiciones/_/liga/arg.
#
# Devemos baixá-la, extraí-la, ordená-la e guardá-la em um csv e/ou um DataFrame para seu uso.
# Primeiro, importamos as bibliotecas necessárias.
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="MKBr6w7w1xZ_"
import requests
from bs4 import BeautifulSoup
import pandas as pd
#Com isto, podemos ver o código HTML diretamente no Notebook
from IPython.display import HTML, display
# + [markdown] colab_type="text" id="Tgl_HAJR1xaF"
# Para verificar se não há problema scrapeando esta informação, revisamos o arquivo "robots.txt"
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="T7OWV-eX1xaG" outputId="9bd20013-c549-4e5d-f4ac-2ac525b969d3"
robots = 'http://www.espn.com.ar/' + 'robots.txt'
print(requests.get(robots).text)
# + [markdown] colab_type="text" id="E_LSwR8x1xaN"
# Nossa URL não aparece, então procedemos.
#
# Fazemos um request "GET" ao URL com a tabela e vemos o conteúdo da resposta no Notebook
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="vN3N9kof1xaN"
url = 'http://www.espn.com.ar/futbol/posiciones/_/liga/arg.1'
resp = requests.get(url)
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="2TXZujGc1xaR" outputId="42bf7326-b1d1-44f5-bb7e-583a21bd1a32"
#Vemos o conteúdo renderizado
# display(HTML(resp.text))
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="HEbXsFL71xaX"
soup = BeautifulSoup(resp.text, 'html.parser')
# + [markdown] colab_type="text" id="1PqiE90a1xaa"
# Podemos acessar "tags" diretamente
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="xigRTWKe1xac" outputId="d3110c19-c9ee-4718-9fbb-baa1df219499"
soup.title
# + [markdown] colab_type="text" id="PUFv0qov1xah"
# Para trabalhar com o conteúdo, vamos usar alguns dos seguintes métodos que este objeto tem, como:
#
# - .findAll() / .find_all
# - .find()
# - .get()
# - .get_text()
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="Ik0-gska1xai" outputId="148c5e6e-5b84-4e94-8605-5e675ee8b5d4"
soup.head.link.get('href')
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="0igGNb8h1xam"
#Podemos usar find() para encontrar a tabela
raw_table = soup.find('table', {'class':'standings has-team-logos'})
# -
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="a-OnX5yz1xar" outputId="d1e84d68-179e-42f4-fa80-20fd6c2399ff"
type(raw_table)
# + [markdown] colab_type="text" id="xHCrrVp91xaw"
# Isto nos retorna um objeto "Tag" definido pela biblioteca BeautifulSoup. Podemos ver seus atributos usando .attrs
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="rPuKpnob1xax" outputId="46ea5e2d-8a59-48c7-8798-56394cb056a4"
raw_table.attrs
# + [markdown] colab_type="text" id="zk3FSq3M1xa3"
# Dentro do atributo "children" encontramos todos os descendentes desse nó. No caso da nossa tabela, os descendentes são tags "tr", que correspondem a cada fila. Existe outro atributo chamado "descendants", que, ao contrário do primeiro, é recursivo. Neste caso, só precisamos dos nós diretamente próximos, então usamos o primeiro método.
#
# Iteramos todas as filas e formamos uma matriz com os dados, para depois carregá-los a um DataFrame.
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="GLAFHzoK1xa3" outputId="82354b3e-a8ad-4880-c681-2cc8d4d19dfa"
rows = []
for row in raw_table.children:
rows.append(row.get_text(separator= ','))
rows
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="rPorMp7m1xa-"
#Separamos as colunas e descartamos as filas vazias
table = [row.split(',') for row in rows if len(row) > 1]
# -
pd.DataFrame(table)
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="3qQS7rSv1xbC" outputId="b15b1d2e-8709-4bb5-8972-f65cc819ef5c"
#Corrigimos a tabela
table[0] = ['Index', 'Name', 'Abbr'] + table[0][1:]
print(*table, sep = '\n')
# + [markdown] colab_type="text" id="vwPuNFrp1xbJ"
# Para guardar o arquivo a um csv usando Python, usamos:
# -
df = pd.DataFrame(table[1:], columns= table[0])
df
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="02lv1Y-01xbK"
filename = 'tabla.csv'
with open(filename, 'w') as out:
out.write('\n'.join([','.join(row) for row in table]))
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="fCYubdLY1xbN"
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="QUH4I3r11xbS" outputId="b7841adb-3dc4-4f38-ee8e-28cc1e3afa69"
df.columns
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="1iF9aVJ01xbX" outputId="86f3405a-ef69-4af5-a09c-7315c36ba2f0"
df.set_index('Index', inplace=True)
df.head()
# + [markdown] colab_type="text" id="9LIa1tXS1xbb"
# Por último, podemos guardar o arquivo usando pandas com:
# + colab={"autoexec": {"startup": false, "wait_interval": 0}} colab_type="code" id="lKH8miSV1xbb"
filename = 'tabela_ex.xlsx'
df.to_excel(filename)
# -
# # Exercicio
#
# Captura uma lista de noticias **e** urls das noticias da pagina principal do site de noticias.
#
# obs: tente não acessar os href para não bloquearem nosso ip ;)
# # Desafio
#
# Crie um programa que pegue os preços dos notebooks diariamente no site walmart e avise quando o preço diminuir mais que 10%.
| 98-dados/scraping/bs4.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# > 请在环境变量中设置`DB_URI`指向数据库
# +
# %matplotlib inline
import os
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from alphamind.api import *
from alphamind.strategy.strategy import RunningSetting
from alphamind.strategy.strategy import Strategy
from PyFin.api import *
from PyFin.Math.Accumulators.StatefulAccumulators import MovingAverage
from PyFin.Math.Accumulators.StatefulAccumulators import MovingSharp
from PyFin.Math.Accumulators.StatefulAccumulators import MovingMaxDrawdown
plt.style.use('ggplot')
# +
"""
Back test parameter settings
"""
benchmark_code = 300
universe = Universe('hs300')
start_date = '2020-01-01'
end_date = '2020-02-21'
freq = '10b'
neutralized_risk = None
alpha_factors = {
'f01': CSQuantiles(LAST('EMA5D')),
'f02': CSQuantiles(LAST('EMV6D')),
}
weights = dict(
f01=1.,
f02=1.
)
alpha_model = ConstLinearModel(features=alpha_factors, weights=weights)
data_meta = DataMeta(freq=freq,
universe=universe,
batch=1,
neutralized_risk=None,
pre_process=None,
post_process=None,
data_source=os.environ['DB_URI'])
strategy = Strategy(alpha_model,
data_meta,
universe=universe,
start_date=start_date,
end_date=end_date,
freq=freq,
benchmark=benchmark_code)
strategy.prepare_backtest_data()
def create_scenario(weights_bandwidth=0.02, target_vol=0.01, method='risk_neutral'):
industry_names = industry_list('sw', 1)
constraint_risk = ['SIZE', 'SIZENL', 'BETA']
total_risk_names = constraint_risk + ['benchmark', 'total']
all_styles = risk_styles + industry_styles + macro_styles
b_type = []
l_val = []
u_val = []
previous_pos = pd.DataFrame()
rets = []
turn_overs = []
leverags = []
for name in total_risk_names:
if name == 'benchmark':
b_type.append(BoundaryType.RELATIVE)
l_val.append(0.8)
u_val.append(1.0)
else:
b_type.append(BoundaryType.ABSOLUTE)
l_val.append(0.0)
u_val.append(0.0)
bounds = create_box_bounds(total_risk_names, b_type, l_val, u_val)
running_setting = RunningSetting(weights_bandwidth=weights_bandwidth,
rebalance_method=method,
bounds=bounds,
target_vol=target_vol,
turn_over_target=0.4)
ret_df, positions = strategy.run(running_setting)
return ret_df
# -
def create_report(ret_df, windows):
sharp_calc = MovingSharp(windows, x='ret', y='riskFree')
drawdown_calc = MovingMaxDrawdown(windows, x='ret')
max_drawdown_calc = MovingMaxDrawdown(len(ret_df), x='ret')
ret_df['ret_after_tc'] = ret_df.excess_return - 0.002 * ret_df.turn_over
res_df = pd.DataFrame(columns=['daily_return', 'cum_ret', 'sharp', 'drawdown', 'max_drawn', 'leverage'])
total_returns = 0.
for i, ret in enumerate(ret_df['ret_after_tc']):
date = ret_df.index[i]
total_returns += ret
sharp_calc.push({'ret': ret, 'riskFree': 0.})
drawdown_calc.push({'ret': ret})
max_drawdown_calc.push({'ret': ret})
res_df.loc[date, 'daily_return'] = ret
res_df.loc[date, 'cum_ret'] = total_returns
res_df.loc[date, 'drawdown'] = drawdown_calc.result()
res_df.loc[date, 'max_drawn'] = max_drawdown_calc.result()
res_df.loc[date, 'leverage'] = ret_df.loc[date, 'leverage']
if i < 5:
res_df.loc[date, 'sharp'] = 0.
else:
res_df.loc[date, 'sharp'] = sharp_calc.result() * np.sqrt(windows)
return res_df
# +
# %%time
weight_gaps = [0.005, 0.010, 0.015, 0.020]
with pd.ExcelWriter(f'zz800_cyb_{benchmark_code}_gap.xlsx', engine='xlsxwriter') as writer:
for i, weight_gap in enumerate(weight_gaps):
ret_df = create_scenario(weight_gap, target_vol=0.01, method='risk_neutral')
res_df = create_report(ret_df, 25)
res_df.to_excel(writer, sheet_name=f'{i}')
alpha_logger.info(f"weight_gap: {weight_gap} finished")
# +
# %%time
target_vols = [0.015, 0.030, 0.045, 0.060]
with pd.ExcelWriter(f'hs300_{benchmark_code}_tv.xlsx', engine='xlsxwriter') as writer:
for i, target_vol in enumerate(target_vols):
ret_df = create_scenario(0.01, target_vol=target_vol, method='tv')
res_df = create_report(ret_df, 25)
res_df.to_excel(writer, sheet_name=f'{i}')
alpha_logger.info(f"target_vol: {target_vol:.4f} finished")
# -
| notebooks/Example 3 - Multi Weight Gap Comparison.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Import modulu Dataset
from skgaze.core.Dataset import Dataset
# Prvý argument classy Dataset je tsv súbor z Tobii Studia, druhý je textový súbor obsahujúci informácie o jednotlivých oblastiach záujmu a posledný argument je optional a odovzdáva informáciu našej knižnici o tom či je náš dataset vyfiltrovaný.
dataset = Dataset('../data/DOD2016_fixations.tsv',
'../data/SegmentedPages.txt')
# Výpis základných údajov z datasetu.
#Basic information about dataset
dataset.info()
# Každý dataset obsahuje pole participantov zúčastnených na experimente. Pre prístup ku konkrétnemu participantovi jednoducho indexujeme v poli. Jednotlivé atríbuty dostaneme nasledovne.
print(dataset.participants[0].id)
print(dataset.participants[0].scanpath)
#Dostaneme sa aj k jednotlivým fixáciam no pre dĺžku výstupu nechávame túto časť kódu zakomentovanú
#print(dataset.participants[0].fixations)
# Informácie o oblastiach záujmu dostaneme podobným spôsobom ako pri participantoch.
print(dataset.aoi_array[0].type_of_element)
print(dataset.aoi_array[0].aoi_char)
print(dataset.aoi_array[0].x)
print(dataset.aoi_array[0].y)
print(dataset.aoi_array[0].width)
print(dataset.aoi_array[0].height)
| docs/Dataset.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import re
import networkx as nx
from IPython.display import Image, display
# +
import matplotlib as mpl
import matplotlib.pyplot as plt
# %matplotlib inline
mpl.style.use('seaborn-muted')
# -
class Token:
def __init__(self, token, ignore_case=True, scrub_re='\.'):
self.ignore_case = ignore_case
self.scrub_re = scrub_re
self.token = token
self.token_clean = self._clean(token)
def __str__(self):
return '<%s>' % self.token_clean
def __repr__(self):
return '%s<%s>' % (self.__class__.__name__, self.token_clean)
def _clean(self, token):
if self.ignore_case:
token = token.lower()
if self.scrub_re:
token = re.sub(self.scrub_re, '', token)
return token
def __call__(self, input_token):
return self._clean(input_token) == self.token_clean
class GeoFSA(nx.DiGraph):
def __init__(self):
super().__init__()
def _next_state(self):
state = max(self.nodes) + 1 if self.nodes else 0
self.add_node(state)
return state
def add_token(self, accept_fn, parent=None, optional=False):
s1 = parent if parent else self._next_state()
s2 = self._next_state()
self.add_edge(s1, s2, accept_fn=accept_fn, label=str(accept_fn))
if optional:
s3 = self._next_state()
self.add_edge(s2, s3, label='ε')
self.add_edge(s1, s3, label='ε')
return s3
return s2
class Matcher:
def __init__(self, fsa):
self.fsa = fsa
self._states = set([0])
self.accepted = []
def step(self, start_state, token, visited=None):
if not visited:
visited = set()
visited.add(start_state)
next_states = set()
for d_state, attrs in self.fsa[start_state].items():
accept_fn = attrs.get('accept_fn')
if accept_fn:
if accept_fn(token):
next_states.add(d_state)
elif d_state not in visited:
next_states.update(self.step(d_state, token, visited))
return next_states
def __call__(self, token):
next_states = set()
for state in self._states:
next_states.update(self.step(state, token))
if next_states:
self._states = next_states
self.accepted.append(token)
return True
return False
# +
title_tokens = ['South', 'Lake', 'Tahoe']
states = [['California'], ['CA']]
g = GeoFSA()
parent = None
for token in title_tokens:
parent = g.add_token(Token(token), parent=parent)
comma = g.add_token(Token(','), parent=parent, optional=True)
for state in states:
parent = comma
for token in state:
parent = g.add_token(Token(token), parent=parent)
# -
m = Matcher(g)
print(m('South'))
print(m('Lake'))
print(m('Tahoe'))
print(m(','))
# print(m(','))
# print(m(','))
print(m('CA'))
m.accepted
dot = nx.drawing.nx_pydot.to_pydot(g)
dot.set_rankdir('LR')
display(Image(dot.create_png()))
# # Next
#
# - tighten up builder API?
# - `__str__` on Token?
# - build index, wire up with find_locs
# - hydrate metadata from FSA matches
# - states
| notebooks/17-fsa.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # ResNet Model Latency Testing
#
# Testing ResNet model with the default Seldon Tensor and Tensorflow Tensor.
#
# <img src="dog.jpeg"/>
# !cp ../../../proto/prediction.proto ./proto
# !cp -vr ../../../proto/tensorflow/tensorflow .
# !python -m grpc.tools.protoc -I./ --python_out=./ --grpc_python_out=./ ./proto/prediction.proto
# # Download model
#
# !mkdir -p model
# !wget -O model/saved_model.pb https://storage.googleapis.com/inference-eu/models_zoo/resnet_V1_50/saved_model/saved_model.pb
# # Wrap inference
# !s2i build -E environment_grpc . seldonio/seldon-core-s2i-python36:1.14.0-dev seldon-resnet2.4
# !docker run --name "resnet" -d --rm -p 5000:5000 -v ${PWD}/model:/model seldon-resnet2.4
# # Test
# +
import base64
import datetime
import json
import pickle
import cv2
import grpc
import numpy as np
import requests
import tensorflow as tf
from proto import prediction_pb2, prediction_pb2_grpc
# +
def image_2_vector(input_file):
nparr = np.fromfile(input_file, dtype=np.float32)
print("nparr", nparr.dtype, nparr.shape)
img = cv2.imdecode(nparr, cv2.IMREAD_ANYCOLOR)
print("img", img.dtype, img.shape)
print("Initial size", img.shape)
image = cv2.resize(img, (w, h))
print("image", image.dtype)
print("Converted size", image.shape)
vector = image.reshape((w * h * 3))
print("vector shape", vector.shape, "vector type", vector.dtype)
return vector
def image_2_bytes(input_file):
with open(input_file, "rb") as binary_file:
# Read the whole file at once
data = binary_file.read()
# data = data.tobytes()
# print(data)
print("binary data size:", len(data), type(data))
return data
def run(function, image_path, iterations=1):
w = 224
h = 224
# NOTE(gRPC Python Team): .close() is possible on a channel and should be
# used in circumstances in which the with statement does not fit the needs
# of the code.
with grpc.insecure_channel("localhost:5000") as channel:
stub = prediction_pb2_grpc.ModelStub(channel)
print("seldon stub", stub)
start_time = datetime.datetime.now()
processing_times = np.zeros((0), int)
img = cv2.imread(image_path)
print("img type", type(img))
print("img", img.shape)
print("Initial size", img.shape)
image = cv2.resize(img, (w, h))
image = image.reshape(1, w, h, 3)
print("image", image.dtype)
print("Converted size", image.shape)
if function == "tensor":
datadef = prediction_pb2.DefaultData(
names="x",
tensor=prediction_pb2.Tensor(
shape=image.shape, values=image.ravel().tolist()
),
)
elif function == "tftensor":
print("Create tftensor")
datadef = prediction_pb2.DefaultData(
names="x", tftensor=tf.make_tensor_proto(image)
)
GRPC_request = prediction_pb2.SeldonMessage(data=datadef)
for I in range(iterations):
start_time = datetime.datetime.now()
response = stub.Predict(request=GRPC_request)
end_time = datetime.datetime.now()
duration = (end_time - start_time).total_seconds() * 1000
processing_times = np.append(processing_times, np.array([int(duration)]))
print("processing time for all iterations")
for x in processing_times:
print(x, "ms")
print("processing_statistics")
print(
"average time:",
round(np.average(processing_times), 1),
"ms; average speed:",
round(1000 / np.average(processing_times), 1),
"fps",
)
print(
"median time:",
round(np.median(processing_times), 1),
"ms; median speed:",
round(1000 / np.median(processing_times), 1),
"fps",
)
print(
"max time:",
round(np.max(processing_times), 1),
"ms; max speed:",
round(1000 / np.max(processing_times), 1),
"fps",
)
print(
"min time:",
round(np.min(processing_times), 1),
"ms; min speed:",
round(1000 / np.min(processing_times), 1),
"fps",
)
print(
"time percentile 90:",
round(np.percentile(processing_times, 90), 1),
"ms; speed percentile 90:",
round(1000 / np.percentile(processing_times, 90), 1),
"fps",
)
print(
"time percentile 50:",
round(np.percentile(processing_times, 50), 1),
"ms; speed percentile 50:",
round(1000 / np.percentile(processing_times, 50), 1),
"fps",
)
print("time standard deviation:", round(np.std(processing_times)))
print("time variance:", round(np.var(processing_times)))
# -
run("tensor", "./dog.jpeg", iterations=100)
run("tftensor", "./dog.jpeg", iterations=100)
# The stats illustrate that the tftensor payload which is the only difference improves on the latency performance.
# !docker rm -f resnet
| examples/models/resnet/reset.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="1a6f6qUM6jUG"
# **O(1) Constant**
# + colab={"base_uri": "https://localhost:8080/"} id="mYp1jxAA40Fs" outputId="5b92c127-6a0a-45b0-f85a-661282a49e5a"
def func_constant(values):
'''
prints first item in the list values
'''
print(values[0])
func_constant([1, 2, 3])
# + [markdown] id="OWjFS_1u6tI_"
# **O(n) Linear**
# + colab={"base_uri": "https://localhost:8080/"} id="JbAu41s06eUu" outputId="744a7909-2752-4f29-8903-4e99a78cc7a5"
def func_linear(lst):
'''
Takes list and print all the values
'''
for value in lst:
print(value)
func_linear([1, 2, 3])
# + [markdown] id="ll8aVFiI7bTU"
# **O(n^2) Quadratic**
# + colab={"base_uri": "https://localhost:8080/"} id="mRFH5u8Z7QuJ" outputId="313918b3-30ba-4426-ebe6-f86edd202dae"
def func_quadratic(lst):
'''
Print pairs for every item in list
'''
for item_1 in lst:
for item_2 in lst:
print(item_1, item_2)
func_quadratic((1, 2, 3, 4))
# + [markdown] id="StwkQO0j7jIu"
# **Calculating Scale of Big O**
# + id="W8fZR8u68mPE"
def print_once(lst):
'''
Prints all items once
'''
for value in lst:
print(value)
# + colab={"base_uri": "https://localhost:8080/"} id="WLhU2k0X86Tj" outputId="de59afd9-daed-4487-bb1c-dfc6f4055859"
a_list = [0, 1, 2, 3]
print_once(a_list)
# + id="3qRqZqQJ9BvL"
def print_3(lst):
'''
prints all items three times
'''
for value in lst:
print(value)
for value in lst:
print(value)
for value in lst:
print(value)
# + colab={"base_uri": "https://localhost:8080/"} id="7hXHIipQ9lNH" outputId="da20b86f-7725-413b-c7a2-8045281587ad"
print_3(a_list)
# + id="JiHffvGW-BC1"
def func_compare(lst):
'''
this function prints the first item O(1)
then prints the first half of the list O(n/2)
then prints a string 10 times O(10)
'''
print(lst[0])
midpoint = int(len(lst) / 2)
for value in lst[:midpoint]:
print(value)
for i in range(10):
print("Number")
# + colab={"base_uri": "https://localhost:8080/"} id="FGXfFS8SHrEc" outputId="f92c2ab0-9532-4346-9c89-64e54f1dc221"
b_list = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
func_compare(b_list)
# + [markdown] id="nkQLiRojIfyy"
# **Worst Case VS Best Case**
# + id="hr2uVzx1H2ki"
def matcher(lst, match):
'''
given a list, return a boolean indicating if a match item is in the list
'''
for item in lst:
if item == match:
return True
return False
# + colab={"base_uri": "https://localhost:8080/"} id="3MpES_B2Jgtf" outputId="5e834445-a2b6-4718-931a-f3de9ab682d6"
b_list
# + colab={"base_uri": "https://localhost:8080/"} id="Df6wURNMJda2" outputId="57613bc6-7363-4c94-d3bd-b332a1c91a4c"
matcher(b_list, 1)
# + colab={"base_uri": "https://localhost:8080/"} id="1wQc8myfI-mT" outputId="55b9d9ba-67f6-48e3-9caf-f0cb9a213b7d"
matcher(b_list, 5)
# + colab={"base_uri": "https://localhost:8080/"} id="5GTh0bYTJCix" outputId="b025e367-d882-409a-c6cf-91e8aba371db"
matcher(b_list, 33)
# + [markdown] id="MK0AUVAYJkN8"
# **Space Complexity**
# + id="bW5T2RlmJYBY"
def printer(n=10):
'''
prints "hello world!" 10 times
'''
for i in range(n):
print("Hello World!")
# + colab={"base_uri": "https://localhost:8080/"} id="isPQ7NPGKDzh" outputId="4bf90bee-dc50-4d7a-e8fc-c3bcc7d378d6"
printer()
# + [markdown] id="kz3I2YqoKOQh"
# Note how we only assign the 'hello world!' variable once, not every time we print. So the algorithm has O(1) **space** complexity and an O(n) **time** complexity.
# + id="qQAQnXWPKEd5"
def create_list(n):
new_list = []
for i in range(n):
new_list.append("new")
return new_list
# + colab={"base_uri": "https://localhost:8080/"} id="uwzhnUgUKpVp" outputId="f30f1588-6300-48d8-fc4d-6773f4c78e33"
create_list(5)
# + [markdown] id="oiYnpHuqK_gL"
# Note how the size of the new_list object scales with the input n, this shows that it is an O(n) algorithm with regards to **space** complexity.
#
#
| Big-O/Big_O_examples.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:PythonData] *
# language: python
# name: conda-env-PythonData-py
# ---
# +
# Dependencies and Setup
import pandas as pd
# File to Load (Remember to Change These)
file_to_load = "Resources/purchase_data.csv"
# Read Purchasing File and store into Pandas data frame
purchase_data = pd.read_csv(file_to_load)
purchase_data.head()
# -
# Player Count
# Display total number of players
total_players = len(pd.unique(purchase_data["SN"]))
df_total_players = pd.DataFrame({'Total Players':[total_players]})
df_total_players
# +
# Purchasing Analysis (Total)
# Run basic calculations to obtain number of unique items, average price, etc.
number_of_unique_items = len(pd.unique(purchase_data["Item ID"]))
average_price = purchase_data["Price"].mean()
number_of_purchases = len(purchase_data["Purchase ID"])
total_revenue = purchase_data["Price"].sum()
# Create a summary data frame to hold the results
data = {"Number of Unique Items": [number_of_unique_items],
"Average Price": [average_price],
"Number of Purchases": [number_of_purchases],
"Total Revenue":[total_revenue]}
summary_purchase_data = pd.DataFrame(data)
# give the displayed data cleaner formatting
summary_purchase_data["Average Price"] = summary_purchase_data["Average Price"].map("${:,.2f}".format)
summary_purchase_data["Total Revenue"] = summary_purchase_data["Total Revenue"].map("${:,.2f}".format)
# Display the summary data frame
summary_purchase_data
# +
# Gender Demographics
# total count of players by gender
total_count = purchase_data.groupby("Gender")["SN"].nunique()
# percentage of players by gender
percentage_of_players = (total_count/total_players)*100
# build gender_demographics dataframe
gender_demographics=pd.DataFrame(columns = ["Total Count","Percentage of Players"])
gender_demographics["Total Count"] = total_count
gender_demographics["Percentage of Players"] = percentage_of_players
gender_demographics_summary=gender_demographics.sort_values(by=['Percentage of Players'],ascending=False)
# format percentage
gender_demographics_summary["Percentage of Players"] = gender_demographics_summary["Percentage of Players"].map("{:.2f}%".format)
gender_demographics_summary
# +
# Purchasing Analysis (Gender)
# Run basic calculations to obtain number of unique items, average price, etc
purchase_analysis=purchase_data.groupby("Gender").agg({'Purchase ID':['count'],'Price':['mean','sum']}).rename(columns={'count':'Purchase Count','mean':'Average Purchase Price','sum':'Total Purchase Value'})
# create a dataframe to hold calculations
purchase_analysis_by_gender = pd.DataFrame(purchase_analysis)
# Drop first row of column header
purchase_analysis_by_gender.columns = purchase_analysis_by_gender.columns.droplevel(0)
# Join the dataframes to get all columns to calculate Avg Total Purchase per Person
purchase_analysis_all = pd.merge(gender_demographics,purchase_analysis_by_gender,on=['Gender'])
# calculate Avg Total Purchase per Person
purchase_analysis_all["Avg Total Purchase per Person"]=purchase_analysis_all["Total Purchase Value"]/purchase_analysis_all["Total Count"]
#Kepp only those columns necessary for display
purchase_analysis_all = purchase_analysis_all[['Purchase Count','Average Purchase Price','Total Purchase Value','Avg Total Purchase per Person']]
#Format the column values before displaying
purchase_analysis_all['Average Purchase Price']=purchase_analysis_all['Average Purchase Price'].map("${:.2f}".format)
purchase_analysis_all['Total Purchase Value']=purchase_analysis_all['Total Purchase Value'].map("${:.2f}".format)
purchase_analysis_all['Avg Total Purchase per Person']=purchase_analysis_all['Avg Total Purchase per Person'].map("${:.2f}".format)
#display dataframe
purchase_analysis_all
# +
# Age Demographics
# Establish bins for ages
age_bins = [0,9,14,19,24,29,34,39,45]
age_groups = ['<10','10-14','15-19','20-24','25-29','30-34','35-39','40+']
# Categorize the existing players using the age bins. Hint: use pd.cut()
age_group_split = purchase_data.groupby(pd.cut(purchase_data["Age"],bins=age_bins,labels=age_groups))
# Calculate the numbers and percentages by age group
totalcount_by_age=age_group_split.SN.nunique()
percentage_players=age_group_split.SN.nunique()/len(pd.unique(purchase_data["SN"]))*100
# Create a summary data frame to hold the results
age_demographics = pd.merge(totalcount_by_age,percentage_players,on=["Age"])
#rename columns
age_demographics.rename(columns={'SN_x':'Total Count','SN_y':'Percentage of Players'},inplace=True)
# round the percentage column to two decimal points
age_demographics["Percentage of Players"] = age_demographics["Percentage of Players"].map("{:.2f}%".format)
# Display Age Demographics Table
age_demographics
# +
#Purchasing Analysis (Age)
# Run basic calculations to obtain purchase count, avg. purchase price, avg. purchase total per person etc. in the table below
purchase_count = age_group_split["Purchase ID"].count()
total_purchase_value=age_group_split["Price"].sum()
average_purchase_price=age_group_split["Price"].sum()/age_group_split["Purchase ID"].count()
average_total_purchase_per_person=age_group_split["Price"].sum()/age_group_split.SN.nunique()
# merge series into dataframes
purchase_analysis_merge1 = pd.concat([purchase_count,average_purchase_price],axis=1)
purchase_analysis_merge2 = pd.concat([total_purchase_value,average_total_purchase_per_person],axis=1)
# Create a summary data frame by merging two dataframes to hold the results
purchase_analysis_by_age = pd.merge(purchase_analysis_merge1,purchase_analysis_merge2,on=["Age"])
# rename columns
purchase_analysis_by_age.rename(columns={'Purchase ID':'Purchase Count','0_x':'Average Purchase Price','Price':'Total Purchase Value','0_y':'Average Total Purchase per Person'},inplace=True)
#format columns
purchase_analysis_by_age['Total Purchase Value'] = purchase_analysis_by_age['Total Purchase Value'].map("${:.2f}".format)
purchase_analysis_by_age['Average Purchase Price'] = purchase_analysis_by_age['Average Purchase Price'].map("${:.2f}".format)
purchase_analysis_by_age['Average Total Purchase per Person'] = purchase_analysis_by_age['Average Total Purchase per Person'].map("${:.2f}".format)
# Display the summary data frame
purchase_analysis_by_age
# +
# Top Spenders
# Run basic calculations
spenders_by_SN=purchase_data.groupby("SN").agg({'Purchase ID':['count'],'Price':['mean','sum']}).rename(columns={'count':'Purchase Count','mean':'Average Purchase Price','sum':'Total Purchase Value'})
# create a dataframe to hold calculations
top_spender_analysis = pd.DataFrame(spenders_by_SN)
# Drop first row of column header
top_spender_analysis.columns = top_spender_analysis.columns.droplevel(0)
# Sort the purchase value column in descending order
top5_spenders = top_spender_analysis.sort_values(by=["Total Purchase Value"],ascending=False).head()
# give the displayed data cleaner formatting
top5_spenders['Total Purchase Value'] = top5_spenders['Total Purchase Value'].map("${:.2f}".format)
top5_spenders['Average Purchase Price'] = top5_spenders['Average Purchase Price'].map("${:.2f}".format)
# Display a preview of the summary data frame
top5_spenders
# +
# Most Popular Items
# Group by Item ID and Item Name. Perform calculations to obtain purchase count, average item price, and total purchase value
purchase_data_by_itemidname = purchase_data.groupby(['Item ID', 'Item Name'])
popular_items = purchase_data_by_itemidname.agg({'Purchase ID':['count'],'Price':['mean','sum']}).rename(columns={'count':'Purchase Count','mean':'Item Price','sum':'Total Purchase Value'})
# Drop first row of column header
popular_items.columns = popular_items.columns.droplevel(0)
# Sort the purchase count column in descending order
popular_items_summary = popular_items.sort_values(by=["Purchase Count"],ascending=False).head()
# give the displayed data cleaner formatting
popular_items_summary['Total Purchase Value'] = popular_items_summary['Total Purchase Value'].map("${:.2f}".format)
popular_items_summary['Item Price'] = popular_items_summary['Item Price'].map("${:.2f}".format)
# Display a preview of the summary data frame
popular_items_summary
# +
# Most Profitable Items
# Sort the purchase value column in descending order
profitable_items_summary = popular_items.sort_values(by=["Total Purchase Value"],ascending=False).head()
# give the displayed data cleaner formatting
profitable_items_summary['Total Purchase Value'] = profitable_items_summary['Total Purchase Value'].map("${:.2f}".format)
profitable_items_summary['Item Price'] = profitable_items_summary['Item Price'].map("${:.2f}".format)
# Display a preview of the summary data frame
profitable_items_summary
| heroesofpymoli/heroesofpymoli.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Environment (conda_anaconda3)
# language: python
# name: conda_anaconda3
# ---
# # Using FFT to do convolution.
# [Source code link from StackOverflow](https://stackoverflow.com/questions/40703751/using-fourier-transforms-to-do-convolution?utm_medium=organic&utm_source=google_rich_qa&utm_campaign=google_rich_qa)
# +
import sys
from scipy import signal
from scipy import linalg
import numpy as np
x = [[1 , 0 , 0 , 0] , [0 , -1 , 0 , 0] , [0 , 0 , 3 , 0] , [0 , 0 , 0 , 1]]
x = np.array(x)
y = [[4 , 5] , [3 , 4]]
y = np.array(y)
standard_conv = signal.convolve2d(x , y , 'full')
print("conv:" , standard_conv)
s1 = np.array(x.shape)
s2 = np.array(y.shape)
size = s1 + s2 - 1
fsize = 2 ** np.ceil(np.log2(size)).astype(int)
fslice = tuple([slice(0, int(sz)) for sz in size])
# Along each axis, if the given shape (fsize) is smaller than that of the input, the input is cropped.
# If it is larger, the input is padded with zeros. if s is not given, the shape of the input along the axes
# specified by axes is used.
new_x = np.fft.fft2(x, fsize)
new_y = np.fft.fft2(y, fsize)
result = np.fft.ifft2(new_x*new_y)[fslice].copy()
result_int = np.array(result.real , np.int32)
my_result = np.array(result, np.double)
print("my_result (doubles): ", my_result)
print("fft for my method (ints):" , result_int)
print("is my method correct (for ints): ", np.array_equal(result_int, standard_conv))
print("fft for my method (doubles):" , result)
print("fft with int32 output:" , np.array(signal.fftconvolve(x ,y) , np.int32))
lib_result = np.array(signal.fftconvolve(x, y) , np.double)
print("fft with double output:" , np.allclose(my_result, lib_result, atol=1e-12))
# the correct way is to take the amplitude: the abs of a complex number gives us its amplitude/mangnitude
lib_magnitude = np.abs(signal.fftconvolve(x, y))
print("lib_magnitude: ", lib_magnitude)
my_magnitude = np.abs(result)
print("is the magnitude correct: ", np.allclose(my_magnitude, lib_magnitude, atol=1e-12))
# +
import sys
from scipy import signal
from scipy import linalg
import numpy as np
time_series = [1,2,1,5,4]
time_series = np.array(time_series)
filter = [1,3]
fitler = np.array(filter)
standard_conv = signal.convolve(time_series , filter, mode='full', method='direct')
print("conv:" , standard_conv)
# +
import sys
from scipy import signal
from scipy import linalg
import numpy as np
time_series = [1,2,1,5,4]
time_series = np.array(time_series)
filter = [1,3]
fitler = np.array(filter)
standard_conv = signal.correlate(time_series , filter, mode='full', method='direct')
print("conv:" , standard_conv)
# +
s1 = np.array(x.shape)
s2 = np.array(y.shape)
size = s1 + s2 - 1
fsize = 2 ** np.ceil(np.log2(size)).astype(int)
fslice = tuple([slice(0, int(sz)) for sz in size])
# Along each axis, if the given shape (fsize) is smaller than that of the input, the input is cropped.
# If it is larger, the input is padded with zeros. if s is not given, the shape of the input along the axes
# specified by axes is used.
new_x = np.fft.fft2(x, fsize)
new_y = np.fft.fft2(y, fsize)
result = np.fft.ifft2(new_x*new_y)[fslice].copy()
result_int = np.array(result.real , np.int32)
my_result = np.array(result, np.double)
print("my_result (doubles): ", my_result)
print("fft for my method (ints):" , result_int)
print("is my method correct (for ints): ", np.array_equal(result_int, standard_conv))
print("fft for my method (doubles):" , result)
print("fft with int32 output:" , np.array(signal.fftconvolve(x ,y) , np.int32))
lib_result = np.array(signal.fftconvolve(x, y) , np.double)
print("fft with double output:" , np.allclose(my_result, lib_result, atol=1e-12))
# the correct way is to take the amplitude: the abs of a complex number gives us its amplitude/mangnitude
lib_magnitude = np.abs(signal.fftconvolve(x, y))
print("lib_magnitude: ", lib_magnitude)
my_magnitude = np.abs(result)
print("is the magnitude correct: ", np.allclose(my_magnitude, lib_magnitude, atol=1e-12))
| cnns/ConvWithFFT.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import itertools
from sklearn.metrics import r2_score
import glob
data = pd.read_csv('D:\KNEE\\freq1.csv')
data = data[data['Freq of bouts']>30]
singular_data = data.groupby('Subject ID ').mean()
ids = singular_data.index.values
# +
A_Files = glob.glob('D:\\KNEE\\[0-9]?[A]*bin.npy')
B_Files = glob.glob('D:\\KNEE\\[0-9]?[B]*bin.npy')
C_Files = glob.glob('D:\\KNEE\\[0-9]?[C]*bin.npy')
C = np.array([np.load(x, allow_pickle = True) for x in C_Files])
B = np.array([np.load(x, allow_pickle = True) for x in B_Files])
A = np.array([np.load(x, allow_pickle = True) for x in A_Files])
# +
X_dur = [y[1][2] for y in A]
X_Vol = [y[1][3] for y in A]
X_Int = [y[1][4] for y in A]
K_dur =[[np.sum(Z[Z>i])/np.sum(Z) for i in np.arange(3000)] for Z in X_dur]
K_Vol =[[np.sum(Z[Z>i])/np.sum(Z) for i in np.arange(1500)] for Z in X_Vol]
K_Int =[[np.sum(Z[Z>i])/np.sum(Z) for i in np.arange(0,0.5, 0.01)] for Z in X_Int]
# +
plt.plot(np.arange(3000), np.mean(K_dur, 0))
plt.fill_between(np.arange(3000),np.mean(K_dur, 0)-np.std(K_dur, 0) ,np.mean(K_dur, 0)+np.std(K_dur, 0) , alpha = 0.5)
plt.ylabel('Cumulative Active Duration')
plt.xlabel('Duration of Active events')
plt.title('Plot of cumulative active Duration against Duration of events')
plt.show()
plt.plot(np.arange(1500), np.mean(K_Vol, 0))
plt.fill_between(np.arange(1500),np.mean(K_Vol, 0)-np.std(K_Vol, 0) ,np.mean(K_Vol, 0)+np.std(K_Vol, 0) , alpha = 0.5)
plt.ylabel('Cumulative Active Volume')
plt.xlabel('Volume of Active events')
plt.title('Plot of cumulative active volume against volume of events')
plt.show()
plt.plot(np.arange(50), np.mean(K_Int, 0))
plt.fill_between(np.arange(50),np.mean(K_Int, 0)-np.std(K_Int, 0) ,np.mean(K_Int, 0)+np.std(K_Int, 0) , alpha = 0.5)
plt.xticks([0, 5, 10, 15, 20, 25, 30, 35, 40, 45], ('0', '50', '100','150', '200', '250', '300','350', '400', '450'))
plt.ylabel('Cumulative Active Int')
plt.xlabel('Int of Active events')
plt.title('Plot of cumulative active Int against Int of events')
plt.show()
| Granatt plots.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# <img src="images/usm.jpg" width="480" height="240" align="left"/>
# + [markdown] slideshow={"slide_type": "slide"}
# # MAT281 - Visualización Declarativa
# + [markdown] slideshow={"slide_type": "slide"}
# ## Objetivos de la clase
#
# * Comprender el estilo de visualización declarativa.
# * Crear gráficos con la librería declarativa Seaborn.
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Contenidos
# * [Visualización Declarativa](#declarative)
# * [Seaborn](#seaborn)
# * [Gráfico a Gráfico](#plot-plot)
#
# + [markdown] slideshow={"slide_type": "slide"}
# ## I.- Visualización Declarativa
# <a id='declarative'></a>
#
#
#
#
# Es un paradigma de visualización en donde se busca preocuparse de los datos y sus relaciones, más que en detalles sin mayor importancia. Algunas características son:
#
# * Se especifica lo que se desea hacer.
# * Los detalles se determinan automáticamente.
# * Especificación y Ejecución están separadas.
#
# A modo de resumen, se refiere a construir visualizaciones a partir de los siguientes elementos:
#
# * _Data_
# * _Transformation_
# * _Marks_
# * _Encoding_
# * _Scale_
# * _Guides_
# -
# ### Diferencias entre enfoques
#
# | Imperativa | Declarativa |
# | ------|------------ |
# | Especificar _cómo_ se debe hacer algo | Especificar _qué_ se quiere hacer |
# | Especificación y ejecución entrelazadas | Separar especificación de ejecución |
# | _Colocar un círculo rojo aquí y un círculo azul acá_ | _Mapear `x` como posición e `y` como el color_ |
#
# ### Ejemplo
# El **Iris dataset** es un conjunto de datos que contine una muestras de tres especies de Iris (Iris setosa, Iris virginica e Iris versicolor). Se midió cuatro rasgos de cada muestra: el largo y ancho del sépalo y pétalo, en centímetros.
#
# Este ejemplo servirá para mostrar una de las mayores diferencias entre una visualización imperativa (como `matplotlib`) versus una declarativa (como `seaborn`).
#
# <img src="./images/logo_iris.jpg" width="360" height="360" align="center"/>
# +
# librerias
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
pd.set_option('display.max_columns', 500) # Ver más columnas de los dataframes
# Ver gráficos de matplotlib en jupyter notebook/lab
# %matplotlib inline
# +
# cargar datos
iris_df = pd.read_csv(os.path.join("data","iris.csv"))
iris_df.columns = ['sepalLength',
'sepalWidth',
'petalLength',
'petalWidth',
'species']
iris_df.head()
# -
# El ejemplo clásico consiste en graficar _sepalWidth_ versus _petalLength_ y colorear por especie.
# ### Imperativo
#
# En `matplotlib` sería algo así:
# +
color_map = dict(zip(iris_df["species"].unique(),
["blue", "green", "red"]))
plt.figure(figsize=(10, 6))
for species, group in iris_df.groupby("species"):
plt.scatter(group["petalLength"],
group["sepalWidth"],
color=color_map[species],
alpha=0.3,
edgecolor=None,
label=species,
)
plt.legend(frameon=True, title="species")
plt.xlabel("petalLength")
plt.ylabel("sepalWidth")
plt.show()
# -
# ### Declarativo
#
# En `seaborn` sería algo así:
# +
sns.set(rc={'figure.figsize':(10,8)})
sns.scatterplot(
x='petalLength',
y='sepalWidth',
data=iris_df,
hue='species',
palette = ['blue', 'green', 'red']
)
# -
# ## II.- Seaborn
# <a id='seaborn'></a>
#
# <img src="images/seaborn.png" alt="" width="200" align="middle"/>
#
# `Matplotlib` ha demostrado ser una herramienta de visualización increíblemente útil y popular, pero incluso los usuarios entusiastas admitirán que a menudo deja mucho que desear. Hay varias quejas válidas sobre Matplotlib que a menudo surgen:
#
# * Antes de la versión 2.0, los valores predeterminados de Matplotlib no son exactamente las mejores opciones. Se basó en MATLAB alrededor de 1999, y esto a menudo se nota.
#
#
# * La API de Matplotlib es de nivel relativamente bajo. Es posible realizar una visualización estadística sofisticada, pero a menudo requiere mucho código repetitivo.
# Matplotlib fue anterior a Pandas en más de una década y, por lo tanto, no está diseñado para su uso con Pandas DataFrames. Para visualizar datos de un Pandas DataFrame, debe extraer cada Serie y, a menudo, concatenarlas juntas en el formato correcto. Sería mejor tener una biblioteca de trazado que pueda usar inteligentemente las etiquetas de DataFrame en un trazado.
#
# Una respuesta a estos problemas es `Seaborn`. Seaborn proporciona una API sobre Matplotlib que ofrece opciones sensatas para el estilo de trazado y los valores predeterminados de color, define funciones simples de alto nivel para tipos de trazado estadísticos comunes, y se integra con la funcionalidad proporcionada por Pandas DataFrames.
#
# ## III.- Gráfico a Gráfico
# <a id='plot-plot'></a>
#
# Para mostrar el funcionamiento de seaborn, se ocupa el conjunto de datos: **pokemon.csv**. Para el caso de `seaborn` se los gráficos serán generados directamente desde el dataframe.
#
# <img src="./images/logo_pokemon.png" width="480" height="240" align="center"/>
#
# cargar datos
pokemon_data = pd.read_csv(os.path.join("data","pokemon.csv"), sep=",")
pokemon_data.head()
# ### a) Gráfico de línea y puntos
#
# Realizar un gráfico de línea y otro de puntos para analizar el ataque vs defensa de todos los pokemones separados por generación.
# +
# grafico de linea
palette = sns.color_palette("hls", 6)
sns.lineplot(
x='Attack',
y='Defense',
hue='Generation',# color por Generation
data=pokemon_data,
ci = None,
palette=palette
)
# -
# grafico de puntos
sns.scatterplot(
x='Attack',
y='Defense',
hue='Generation',# color por Generation
data=pokemon_data,
palette=palette
)
# ### b) Boxplot
#
# Realizar un gráfico box plot sobre los stats de los pokemones.
# +
# Pre-format DataFrame
stats_df = pokemon_data.drop(['#', 'Generation', 'Legendary'], axis=1)
# New boxplot using stats_df
sns.boxplot(data=stats_df)
# -
# ### c) Mapas de calor
#
# Realizar un mapa de calor sobre los stats de los pokemones.
# +
# Calculate correlations
corr = stats_df.corr()
# Heatmap
sns.heatmap(corr)
# -
# ### d) Histogramas
#
# Realizar un histograma del stat **attack**.
# Distribution Plot (a.k.a. Histogram)
sns.distplot(pokemon_data.Attack)
# ### e) Barplot
#
# Realizar un bar plot sobre la cantidad de pokemones que hay por generación
# realizar conteo de manera manual
df_generation = pokemon_data.groupby('Generation').apply(lambda x: len(x)).reset_index()
df_generation.columns = ['Generation','Count']
df_generation.head()
# plot seaborn: barplot
sns.barplot(
x='Generation',
y='Count',
data=df_generation
)
# ### f) countplot
#
# Realizar un conteo sobre los distintos tipos **Type 1** de pokemones.
# +
# Count Plot (a.k.a. Bar Plot)
pkmn_type_colors = ['#78C850', # Grass
'#F08030', # Fire
'#6890F0', # Water
'#A8B820', # Bug
'#A8A878', # Normal
'#A040A0', # Poison
'#F8D030', # Electric
'#E0C068', # Ground
'#EE99AC', # Fairy
'#C03028', # Fighting
'#F85888', # Psychic
'#B8A038', # Rock
'#705898', # Ghost
'#98D8D8', # Ice
'#7038F8', # Dragon
]
sns.countplot(x='Type 1',
data=pokemon_data,
palette=pkmn_type_colors)
# Rotate x-labels
plt.xticks(rotation=-45)
# -
# ### g) Factor plot
#
# Realizar un catplot de los distintos tipos de pokemones para la generación **Type 1**, analizando si el pokemón es legendario o no.
# +
# Factor Plot
g = sns.catplot(x='Type 1',
y='Attack',
data=pokemon_data,
hue='Legendary', # Color by stage
col='Legendary', # Separate by stage
kind='swarm') # Swarmplot
# Rotate x-axis labels
g.set_xticklabels(rotation=-45)
# Doesn't work because only rotates last plot
# plt.xticks(rotation=-45)
# -
# ### h) Customizando con Matplotlib.
#
# Seaborn es una interfaz de alto nivel para Matplotlib. Según nuestra experiencia, Seaborn lo llevará a la mayor parte del camino, pero a veces necesitará traer Matplotlib.
#
# Establecer los límites de los ejes es uno de esos momentos, pero el proceso es bastante simple:
#
# Primero, usar la función lmplotde Seaborn de manera normal.
# Luego, use las funciones de customización de Matplotlib. En este caso, usaremos sus funciones *ylim ()* y *xlim ()*.
# Aquí está nuestro nuevo diagrama de dispersión con límites de ejes sensibles:
# +
# plot seaborn
sns.lmplot(x='Attack',
y='Defense',
data=pokemon_data,
fit_reg=False,
height=8,
hue='Generation')
# usar Matplotlib
plt.ylim(0, None)
plt.xlim(0, None)
# -
# ## Referencia
#
# 1. [Gallery-seaborn](https://seaborn.pydata.org/)
#
| labs/05_visualizacion/05_visualizacion_declarativa.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# ---
# +
import panel as pn
pn.extension()
# -
# The ``CheckBoxGroup`` widget allows selecting between a list of options by ticking the corresponding checkboxes. It falls into the broad category of multi-option selection widgets that provide a compatible API and include the [``MultiSelect``](MultiSelect.ipynb), [``CrossSelector``](CrossSelector.ipynb) and [``CheckButtonGroup``](CheckButtonGroup.ipynb) widgets.
#
# For more information about listening to widget events and laying out widgets refer to the [widgets user guide](../../user_guide/Widgets.ipynb). Alternatively you can learn how to build GUIs by declaring parameters independently of any specific widgets in the [param user guide](../../user_guide/Param.ipynb). To express interactivity entirely using Javascript without the need for a Python server take a look at the [links user guide](../../user_guide/Param.ipynb).
#
# #### Parameters:
#
# For layout and styling related parameters see the [customization user guide](../../user_guide/Customization.ipynb).
#
# ##### Core
#
# * **``options``** (list or dict): List or dictionary of options
# * **``value``** (boolean): Currently selected options
#
# ##### Display
#
# * **``disabled``** (boolean): Whether the widget is editable
# * **``inline``** (boolean): Whether to arrange the items vertically in a column (``False``) or horizontally in a line (``True``)
# * **``name``** (str): The title of the widget
#
# ___
# +
checkbox_group = pn.widgets.CheckBoxGroup(
name='Fruits', value=['Apple', 'Pear'], options=['Apple', 'Banana', 'Pear', 'Strawberry'],
inline=True)
checkbox_group
# -
# ``CheckBoxGroup.value`` returns a list of the currently selected options:
checkbox_group.value
| examples/reference/widgets/CheckBoxGroup.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Etablere analysegrunnlag
# Tilrettelegge data for å gjøre en analyse med formidlingsfokus. Dette er et *automatisert* prosessteg. Tilretteleggingen av data omfatter beregning av aggregater og å lage tabeller for analyseformål. Prosessteget kan også inkludere strukturering, integrering av data, avlede nye variabler, lage indikatorer, utføre indeksberegninger og sesongjustere.
#
# Kan inkluderer følgende prosessteg:
# - Hente klargjorte data
# - Beregne aggregater
# - Lage analysetabeller - produsere tabeller for analyseformål
# - Lagre analysegrunnlaget
# - Strukture
# - Integrere
# - Avlede
# - Lage indikatorer
# - Lage indekser
# - Sesongjustere
# - Gjøre beregninger
# - Kontrollere
# ### Hente klargjorte data
| notebooks/Statistikkproduksjon/4_etablere_analysegrunnlag/etablere_analysegrunnlag.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# formats: jl:light,ipynb
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.4.2
# language: julia
# name: julia-1.4
# ---
# # Hartree-Fock Self-Consistent Field Theory
# +
"""Tutorial implementing a basic Hartree--Fock SCF program."""
__authors__ = "<NAME>", "<NAME>"
__credits__ = "<NAME>", "<NAME>"
__email__ = "<EMAIL>"
__copyright__ = "(c) 2014-2020, The Psi4Julia Developers"
__license__ = "BSD-3-Clause"
__date__ = "07/28/2020"
# -
# ## I. Theoretical Overview
# In this tutorial, we will seek to introduce the theory and implementation of the quantum chemical method known as Hartree-Fock Self-Consistent Field Theory (HF-SCF) with restricted orbitals and closed-shell systems (RHF). This theory seeks to solve the pseudo-eigenvalue matrix equation
#
# $$\sum_{\nu} F_{\mu\nu}C_{\nu i} = \epsilon_i\sum_{\nu}S_{\mu\nu}C_{\nu i}$$
# $${\bf FC} = {\bf SC\epsilon},$$
#
# called the Roothan equations, which can be solved self-consistently for the orbital coefficient matrix **C** and the orbital energy eigenvalues $\epsilon_i$. The Fock matrix, **F**, has elements $F_{\mu\nu}$ given (in the atomic orbital basis) as
#
# $$F_{\mu\nu} = H_{\mu\nu} + 2(\mu\,\nu\left|\,\lambda\,\sigma)D_{\lambda\sigma} - (\mu\,\lambda\,\right|\nu\,\sigma)D_{\lambda\sigma},$$
#
# where $D_{\lambda\sigma}$ is an element of the one-particle density matrix **D**, constructed from the orbital coefficient matrix **C**:
#
# $$D_{\lambda\sigma} = C_{\sigma i}C_{\lambda i}$$
#
# Formally, the orbital coefficient matrix **C** is a $N\times M$ matrix, where $N$ is the number of atomic basis functions, and $M$ is the total number of molecular orbitals. Physically, this matrix describes the contribution of every atomic basis function (columns) to a particular molecular orbital (e.g., the $i^{\rm th}$ row). The density matrix **D** is a square matrix describing the electron density contained in each orbital. In the molecular orbital basis, the density matrix has elements
#
# $$D_{pq} = \left\{
# \begin{array}{ll}
# 2\delta_{pq} & p\; {\rm occupied} \\
# 0 & p\; {\rm virtual} \\
# \end{array}\right .$$
#
# The total RHF energy is given by
#
# $$E^{\rm RHF}_{\rm total} = E^{\rm RHF}_{\rm elec} + E^{\rm BO}_{\rm nuc},$$
#
# where $E^{\rm RHF}_{\rm elec}$ is the final electronic RHF energy, and $E^{\rm BO}_{\rm nuc}$ is the total nuclear repulsion energy within the Born-Oppenheimer approximation. To compute the electronic energy, we may use the density matrix in the AO basis:
#
# $$E^{\rm RHF}_{\rm elec} = (F_{\mu\nu} + H_{\mu\nu})D_{\mu\nu},$$
#
# and the nuclear repulsion energy is simply
#
# $$E^{\rm BO}_{\rm nuc} = \sum_{A>B}\frac{Z_AZ_B}{r_{AB}}$$
#
# where $Z_A$ is the nuclear charge of atom $A$, and the sum runs over all unique nuclear pairs.
# ## II. Implementation
#
# Using the above overview, let's write a RHF program using <span style="font-variant: small-caps"> Psi4 </span>, NumPy, TensorOperations, and Julia's LinearAlgebra standard library. First, we need to import these modules:
# ==> Import Psi4 & NumPy <==
using PyCall: pyimport
psi4 = pyimport("psi4")
np = pyimport("numpy") # used only to cast to Psi4 arrays
using TensorOperations: @tensor
using LinearAlgebra: Diagonal, Hermitian, eigen, tr
using Printf: @printf
# Next, using what you learned in the previous tutorial module, set the following <span style="font-variant: small-caps"> Psi4 </span> and molecule options.
#
# Memory & Output specifications:
# - Give 500 Mb of memory to Psi4
# - Set Psi4 output file to "output.dat"
# - Set a variable `numpy_memory` to an acceptable amount of available memory for the working computer to use for storing tensors
#
# Molecule definition:
# - Define the "physicist's water molecule" (O-H bond length = 1.1 Angstroms, HOH bond angle = 104 degrees)
# - Molecular symmetry C1
#
# Computation options:
# - basis set cc-pVDZ
# - SCF type PK
# - Energy convergence criterion to 0.00000001
#
# +
# ==> Set Basic Psi4 Options <==
# Memory specification
psi4.set_memory(Int(5e8))
numpy_memory = 2
# Set output file
psi4.core.set_output_file("output.dat", false)
# Define Physicist's water -- don't forget C1 symmetry!
mol = psi4.geometry("""
O
H 1 1.1
H 1 1.1 2 104
symmetry c1
""")
# Set computation options
psi4.set_options(Dict("basis" => "cc-pvdz",
"scf_type" => "pk",
"e_convergence" => 1e-8))
# -
# Since we will be writing our own, iterative RHF procedure, we will need to define options that we can use to tweak our convergence behavior. For example, if something goes wrong and our SCF doesn't converge, we don't want to spiral into an infinite loop. Instead, we can specify the maximum number of iterations allowed, and store this value in a variable called `maxiter`. Here are some good default options for our program:
# ~~~python
# MAXITER = 40
# E_conv = 1.0e-6
# ~~~
# These are by no means the only possible values for these options, and it's encouraged to try different values and see for yourself how different choices affect the performance of our program. For now, let's use the above as our default.
# ==> Set default program options <==
# Maximum SCF iterations
MAXITER = 40
# Energy convergence criterion
E_conv = 1.0e-6;
# Before we can build our Fock matrix, we'll need to compute the following static one- and two-electron quantities:
#
# - Electron repulsion integrals (ERIs) **I** between our AOs
# - Overlap matrix **S**
# - Core Hamiltonian matrix **H**
#
# Fortunately for us, we can do this using the machinery in <span style='font-variant: small-caps'> Psi4</span>. In the first module, you learned about `psi4.core.Wavefunction` and `psi4.core.MintsHelper` classes. In the cell below, use these classes to perform the following:
#
# 1. Create Class Instances
#
# a. Build a wavefunction for our molecule and basis set
#
# b. Create an instance of the `MintsHelper` class with the basis set for the wavefunction
#
# 2. Build overlap matrix, **S**
#
# a. Get the AO overlap matrix from `MintsHelper`, and cast it into a Julia array
#
# b. Get the number of AO basis functions and number of doubly occupied orbitals from S and the wavefunciton
#
# 3. Compute ERI Tensor, **I**
#
# a. Get ERI tensor from `MintsHelper`, and cast it into a Julia array
#
# 4. Build core Hamiltonian, **H**
#
# a. Get AO kinetic energy matrix from `MintsHelper`, and cast it into a Julia array
#
# b. Get AO potential energy matrix from `MintsHelper`, and cast it into a Julia array
#
# c. Build core Hamiltonian from kinetic & potential energy matrices
# +
# ==> Compute static 1e- and 2e- quantities with Psi4 <==
# Class instantiation
wfn = psi4.core.Wavefunction.build(mol, psi4.core.get_global_option("basis"))
mints = psi4.core.MintsHelper(wfn.basisset())
# Overlap matrix
S = np.asarray(mints.ao_overlap()) # we only need a copy
# Number of basis Functions & doubly occupied orbitals
nbf = size(S)[1]
ndocc = wfn.nalpha()
println("Number of occupied orbitals: ", ndocc)
println("Number of basis functions: ", nbf)
# Memory check for ERI tensor
I_size = nbf^4 * 8.e-9
println("\nSize of the ERI tensor will be $I_size GB.")
memory_footprint = I_size * 1.5
if I_size > numpy_memory
psi4.core.clean()
throw(OutOfMemoryError("Estimated memory utilization ($memory_footprint GB) exceeds " *
"allotted memory limit of $numpy_memory GB."))
end
# Build ERI Tensor
I = np.asarray(mints.ao_eri()) # we only need a copy
# Build core Hamiltonian
T = np.asarray(mints.ao_kinetic()) # we only need a copy
V = np.asarray(mints.ao_potential()) # we only need a copy
H = T + V;
# -
# The Roothan equations
#
# $${\bf FC} = {\bf SC\epsilon}$$
#
# are only *pseudo*-eigenvalue equations due to the presence of the overlap matrix **S** on the right hand side of the equation. Normally, the AO basis set will not be orthonormal, so the overlap matrix **S** will not be unity and therefore cannot be ignored. Let's check to see whether our AO basis is orthonormal:
# ==> Inspecting S for AO orthonormality <==
hope = S ≈ Diagonal(ones(size(S)[1]))
println("\nDo we have any hope that our AO basis is orthonormal? ", hope)
# Just as we'd expected -- looks like we can't ignore the AO overlap matrix. Therefore, the Fock matrix **F** cannot simply be diagonalized to solve for the orbital coefficient matrix **C**. There is still hope, however! We can overcome this issue by transforming the AO basis so that all of our basis functions are orthonormal. In other words, we seek a matrix **A** such that the transformation
#
# $${\bf A}^{\dagger}{\bf SA} = {\bf 1}$$
#
# One method of doing this is called *symmetric orthogonalization*, which lets ${\bf A} = {\bf S}^{-1/2}$. Then,
#
# $${\bf A}^{\dagger}{\bf SA} = {\bf S}^{-1/2}{\bf SS}^{-1/2} = {\bf S}^{-1/2}{\bf S}^{1/2} = {\bf S}^0 = {\bf 1},$$
#
# and we see that this choice for **A** does in fact yield an orthonormal AO basis. In the cell below, construct this transformation matrix using <span style='font-variant: small-caps'> Psi4</span>'s built-in `Matrix` class member function `power()` just like the following:
# ~~~python
# A = mints.ao_overlap()
# A.power(-0.5, 1.e-16)
# A = np.asarray(A)
# ~~~
# +
# ==> Construct AO orthogonalization matrix A <==
A = mints.ao_overlap()
A.power(-0.5, 1.e-16) # ≈ Julia's A^(-0.5) after psi4view()
A = np.asarray(A) # we only need a copy
# Check orthonormality
S_p = A * S * A
new_hope = S ≈ Diagonal(ones(size(S)[1]))
if new_hope
println("There is a new hope for diagonalization!")
else
println("Whoops...something went wrong. Check that you've correctly built the transformation matrix.")
@show sum(S_p) - tr(S_p)
end
# -
# The drawback of this scheme is that we would now have to either re-compute the ERI and core Hamiltonian tensors in the newly orthogonal AO basis, or transform them using our **A** matrix (both would be overly costly, especially transforming **I**). On the other hand, substitute ${\bf C} = {\bf AC}'$ into the Roothan equations:
#
# \begin{align}
# {\bf FAC'} &= {\bf SAC}'{\bf \epsilon}\\
# {\bf A}^{\dagger}({\bf FAC}')&= {\bf A}^{\dagger}({\bf SAC}'){\bf \epsilon}\\
# ({\bf A}^{\dagger}{\bf FA}){\bf C}'&= ({\bf A}^{\dagger}{\bf SA}){\bf C}'{\bf \epsilon}\\
# {\bf F}'{\bf C}' &= {\bf 1C}'{\bf \epsilon}\\
# {\bf F}'{\bf C}' &= {\bf C}'{\bf \epsilon}\\
# \end{align}
#
# Clearly, we have arrived at a canonical eigenvalue equation. This equation can be solved directly for the transformed orbital coefficient matrix ${\bf C}'$ by diagonalizing the transformed Fock matrix, ${\bf F}'$, before transforming ${\bf C}'$ back into the original AO basis with ${\bf C} = {\bf AC}'$.
#
# Before we can get down to the business of using the Fock matrix **F** to compute the RHF energy, we first need to compute the orbital coefficient **C** matrix. But, before we compute the **C** matrix, we first need to build **F**. Wait...hold on a second. Which comes first, **C** or **F**? Looking at the Roothan equations more closely, we see that that both sides depend on the **C** matrix, since **F** is a function of the orbitals:
#
#
# $${\bf F}({\bf C}){\bf C} = {\bf SC\epsilon}\,;\;\;F_{\mu\nu} = H_{\mu\nu} + 2(\mu\,\nu\mid\lambda\,\sigma)C_{\sigma i}C_{\lambda i} - (\mu\,\lambda\,\mid\nu\,\sigma)C_{\sigma i}C_{\lambda i}.$$
#
# Therefore technically, *neither* **F** nor **C** can come first! In order to proceed, we instead begin with a *guess* for the Fock matrix, from which we obtain a guess at the **C** matrix. Without orbital coefficients (and therefore without electron densities), the most logical starting point for obtaining a guess at the Fock matrix is to begin with the only component of **F** that does *not* involve densities: the core Hamiltonian, **H**. Below, using the `eigen()` function, and forcing hermitianess with `Hermitian()`, obtain coefficient and density matrices using the core guess:
#
# 1. Obtain ${\bf F}'$ by transforming the core Hamiltonian with the ${\bf A}$ matrix
# 2. Diagonalize the transformed Fock matrix for $\epsilon$ and ${\bf C}'$
# 3. Use doubly-occupied slice of coefficient matrix to build density matrix
# +
# ==> Compute C & D matrices with CORE guess <==
# Transformed Fock matrix
F_p = A * H * A
# Diagonalize F_p for eigenvalues & eigenvectors with NumPy
e, C_p = eigen(Hermitian(F_p))
# Transform C_p back into AO basis
C = A * C_p
# Grab occupied orbitals
C_occ = C[:, 1:ndocc]
# Build density matrix from occupied orbitals
D = C_occ * C_occ' ;
# -
# The final quantity we need to compute before we can proceed with our implementation of the SCF procedure is the Born-Oppenheimer nuclear repulsion energy, $E^{\rm BO}_{\rm nuc}$. We could use the expression given above in $\S$1, however we can also obtain this value directly from <span style='font-variant: small-caps'> Psi4</span>'s `Molecule` class. In the cell below, compute the nuclear repulsion energy using either method.
# ==> Nuclear Repulsion Energy <==
E_nuc = mol.nuclear_repulsion_energy()
# Within each SCF iteration, we'll have to perform a number of tensor contractions when building the Fock matrix, computing the total RHF energy, and performing several transformations. Since the computational expense of this process is related to the number of unique indices, the most intensive step of computing the total electronic energy will be performing the four-index contractions corresponding to building Coulomb and Exchange matrices **J** and **K**, with elements
#
# \begin{align}
# J[D_{\lambda\sigma}]_{\mu\nu} &= (\mu\,\nu\mid\lambda\,\sigma)D_{\lambda\sigma}\\
# K[D_{\lambda\sigma}]_{\mu\nu} &= (\mu\,\lambda\mid\nu\,\sigma)D_{\lambda\sigma},
# \end{align}
#
# when building the Fock matrix. Fortunately, once **J** and **K** have been built, the Fock matrix may be computed as a simple matrix addition, instead of element-wise:
#
# $$ {\bf F} = {\bf H} + 2{\bf J} - {\bf K} = {\bf H} + {\bf G}.$$
#
# Formation of the **J** and **K** matrices will be the most expensive step of the RHF procedure, scaling with respect to the number of AOs as ${\cal O}(N^4)$. Strategies for building these marices efficiently, as well as different methods for handling these tensor contractions, will be discussed in greater detail in tutorials 2c and 2d in this module, respectively.
#
# Let's now write our SCF iterations according to the following algorithm:
#
# #### Algorithm 1: SCF Iteration
# for `scf_iter` less than `MAXITER`, do:
# 1. Build Fock matrix
# - Build the two-electron Coulomb & Exchange matrix **G**
# - Form the Fock matrix
# 2. RHF Energy
# - Compute total RHF energy
# - If change in RHF energy less than `E_conv`, break
# - Save latest RHF energy as `E_old`
# 3. Compute new orbital guess
# - Transform Fock matrix to orthonormal AO basis
# - Diagonalize ${\bf F}'$ for $\epsilon$ and ${\bf C}'$
# - Back transform ${\bf C}'$ to AO basis
# - Form **D** from occupied orbital slice of **C**
#
# +
# ==> SCF Iterations <==
# Output and pre-iteration energy declarations
SCF_E = let SCF_E = 0.0, E_old = 0.0, D = D
print("==> Starting SCF Iterations <==\n")
# Begin Iterations
for scf_iter in 1:MAXITER
# Build Fock matrix
@tensor G[p,q] := (2I[p,q,r,s] - I[p,r,q,s]) * D[r,s]
F = H + G
# Compute RHF energy
SCF_E = tr((H + F) * D) + E_nuc
@printf("SCF Iteration %3d: Energy = %4.16f dE = %1.5e \n",scf_iter, SCF_E, SCF_E - E_old)
# SCF Converged?
if abs(SCF_E - E_old) < E_conv
break
end
E_old = SCF_E
# Compute new orbital guess
F_p = A * F * A
e, C_p = eigen(Hermitian(F_p))
C = A * C_p
C_occ = C[:, 1:ndocc]
D = C_occ * C_occ'
# MAXITER exceeded?
if scf_iter == MAXITER
psi4.core.clean()
throw(MethodError("Maximum number of SCF iterations exceeded."))
end
end
SCF_E # return RHF SCF energy
end
# Post iterations
println("\nSCF converged.")
println("Final RHF Energy: $SCF_E [Eh]")
# -
# Congratulations! You've written your very own Restricted Hartree-Fock program! Finally, let's check your final RHF energy against <span style='font-variant: small-caps'> Psi4</span>:
# Compare to Psi4
SCF_E_psi = psi4.energy("SCF")
psi4.compare_values(SCF_E_psi, SCF_E, 6, "SCF Energy")
# ## References
# 1. [[Szabo:1996](http://store.doverpublications.com/0486691861.html)] <NAME> and <NAME>, *Modern Quantum Chemistry*, Introduction to Advanced Electronic Structure Theory. Courier Corporation, 1996.
# 2. [[Levine:2000](https://books.google.com/books?id=80RpQgAACAAJ&dq=levine%20quantum%20chemistry%205th%20edition&source=gbs_book_other_versions)] <NAME>, *Quantum Chemistry*. Prentice-Hall, New Jersey, 5th edition, 2000.
# 3. [[Helgaker:2000](https://books.google.com/books?id=lNVLBAAAQBAJ&pg=PT1067&dq=helgaker+molecular+electronic+structure+theory&hl=en&sa=X&ved=0ahUKEwj37I7MkofUAhWG5SYKHaoPAAkQ6AEIKDAA#v=onepage&q=helgaker%20molecular%20electronic%20structure%20theory&f=false)] <NAME>, <NAME>, and <NAME>, *Molecular Electronic Structure Theory*, <NAME>iley & Sons Inc, 2000.
| Tutorials/03_Hartree-Fock/3a_restricted-hartree-fock.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Fire up Turi Create
#
# We always start with this line before using any part of Turi Create
import turicreate
# # Load a tabular data set
sf = turicreate.SFrame('people-example.csv')
# # SFrame basics
sf #we can view first few lines of table
sf.tail() # view end of the table
# # Turi Create visualization
# .show() visualizes any data structure in Turi Create
sf.show()
sf['age'].show()
# # Inspect columns of dataset
sf['Country']
sf['age']
# Some simple columnar operations
sf['age'].mean()
sf['age'].max()
# # Create new columns in our SFrame
sf
sf['Full Name'] = sf['First Name'] + ' ' + sf['Last Name']
sf
sf['age'] * sf['age']
# # Use the apply function to do an advanced transformation to our data
sf['Country']
sf['Country'].show()
def transform_country(country):
if country == 'USA':
return 'United States'
else:
return country
transform_country('Brazil')
transform_country('Brasil')
transform_country('USA')
sf['Country'].apply(transform_country)
sf['Country'] = sf['Country'].apply(transform_country)
sf
sf['Country'].show()
| 1. Machine Learning Foundations/Week 1/Turi-Getting-Started-with-SFrames.ipynb/.ipynb_checkpoints/Turi Getting Started with SFrames-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import os
os.chdir("I:\\all data")
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
data=pd.read_excel("order data.xlsx",'ListOfOrders')
data.head()
data.info()
data.shape
data.index
data.keys
data.columns
data.columns = ['Order_Id','Order_Date','Cust_Name','City','Country','Region','Segment','Ship_Date','Ship_Mode','State'] #renaming column names
data #renaming Columns name
data['Country'].value_counts() # counting the categorical data
# +
#data1=pd. read_excel("order data.xlsx", 'OrderBreakdown') #loading second sheet
# +
#data1
# -
data['City'].value_counts()
data['Cust_Name'].value_counts()
data['Segment'].value_counts()
data['Ship_Mode'].value_counts()
data['State'].value_counts()
data.isnull().sum()
data[data['Segment']=='Consumer'] #.count()
data[(data['State']=='England') & (data['Region']=='North')]
dataa=pd. read_excel("order data.xlsx", 'OrderBreakdown') #loading second sheet
dataa
# +
start_date = "2013-01-07" #filtering dates from data
end_date = "2016-12-31"
after_start_date = data["Ship_Date"] >= start_date
before_end_date = data["Ship_Date"] <= end_date
between_two_dates = after_start_date & before_end_date
filtered_dates = data.loc[between_two_dates]
print(filtered_dates)
# -
filtered_dates[(filtered_dates['Country']=='United Kingdom') & (filtered_dates['Region']=='North')]
data.groupby(['Region','Segment']).size()
dataa.columns
dataa=['Order_Id','Product_Name','Discount','Sales','Profit','Quantity','Category','Sub_Categorty']
dataa
dataa.isnull().sum()
| Slicing grouping and data and time treatment.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import matplotlib
matplotlib.use('Qt5Agg')
from header import *
from mne.stats import spatio_temporal_cluster_1samp_test, spatio_temporal_cluster_test, permutation_cluster_1samp_test, permutation_cluster_test, summarize_clusters_stc
from scipy.stats.distributions import f,t
from tqdm import tqdm
import xarray as xr
#warnings.filterwarnings("ignore",category=DeprecationWarning)
# +
t0 = time.perf_counter()
task = 'SMEG' #'MIMOSA'
states = ['RS','FA','OM']
subjects = get_subjlist(task)#, include_all=True)
no_blk2 = ['002', '004', '007', '016']
no_mri = ['019', '021']
reject = ['002', '004', '010', '011']
for sub in no_mri + reject:
if sub in subjects:
subjects.remove(sub)
subjects.sort()
experts = []
novices = []
experts_i = []
novices_i = []
for s,sub in enumerate(subjects):
if expertise(sub) == 'N':
novices.append(sub)
novices_i.append(s)
if expertise(sub) == 'E':
experts.append(sub)
experts_i.append(s)
clu = dict()
# -
PSD = xr.open_dataarray(op.join(Analysis_path, task, 'meg', 'Alpha', 'PSD.nc'))
PSD.load()
PSD = PSD.transpose('state', 'subject', 'freq', 'chan')
PSD_norm = np.log10(PSD) #log transform
PSD_norm = (PSD_norm - PSD_norm.mean('chan'))/PSD_norm.std('chan') #spatial normalisation
#PSD_norm = PSD/PSD.sum(['freq', 'chan'])
print(PSD_norm)
PSD_ave = np.empty((len(states), *PSD_norm.shape[1:]))
for s,state in enumerate(states):
PSD_ave[s] = PSD_norm.loc[fnmatch.filter(PSD_norm.state.values, state+'*')].mean('state').values
coords = {dim: PSD_norm.coords[dim].values for dim in PSD_norm.dims}
coords.update({'state': states})
PSD_ave = xr.DataArray(PSD_ave, dims=PSD_norm.dims, coords=coords)
print(PSD_ave)
def sensor_perm_test(X1, X2, stat_file, test_key, freqs, sensors, mode='a', p_threshold=0.01, connectivity=None, paired=False, fif_significance=0.05):
"""
If paired, test X1-X2.
A summary Evoked of the stats is saved if there is a significant cluster (p-value < fif_significance).
(Time is replaced by freqs.)
Saving can be forced by setting fif_significance to 1, or disabled by setting it to 0.
Input: arrays of shape (subjects, freq, space)
"""
os.makedirs(op.dirname(stat_file), exist_ok=True)
evoked_file = op.splitext(stat_file)[0] + '_' + test_key + '_stat-ave.fif'
if not isinstance(X2, (np.ndarray, xr.DataArray, list)):
X2 = np.zeros(X1.shape)
if paired:
X = X1 - X2
t_threshold = -t.ppf(p_threshold / 2, X.shape[0] - 1)
T_obs, clusters, cluster_pv, H0 = clu_all = spatio_temporal_cluster_1samp_test(X, connectivity=connectivity, threshold=t_threshold, n_jobs=4)
else:
f_threshold = f.ppf(1 - p_threshold / 2, X1.shape[0] - 1, X2.shape[0] - 1)
T_obs, clusters, cluster_pv, H0 = clu_all = spatio_temporal_cluster_test([X1,X2], connectivity=connectivity, threshold=f_threshold, n_jobs=4)
p_val = np.ones_like(T_obs)
clu_inds = np.zeros_like(T_obs)
info_file = op.join(Analysis_path, 'MEG', 'meta', 'mag-info.fif')
if op.isfile(info_file):
info = mne.io.read_info(info_file)
info['sfreq'] = 1 / (freqs[1] - freqs[0])
else:
info = mne.create_info(sensors, 1 / (freqs[1] - freqs[0]), 'mag')
evokeds = []
for c,clu in enumerate(clusters):
p_val[clu] = cluster_pv[c]
clu_inds[clu] = c+1
if np.any(cluster_pv[c] <= fif_significance):
data = np.full_like(T_obs, 0)#np.nan)
data[clu] = T_obs[clu]
#mne.write_evokeds(evoked_file, mne.EvokedArray(data.T, info, freqs[0], 'cluster_{}'.format(c+1)))
evokeds.append(mne.EvokedArray(data.T, info, freqs[0], 'cluster_{}'.format(c+1)))
if np.any(p_val <= fif_significance):
evokeds.append(mne.EvokedArray(np.where(p_val <= fif_significance, T_obs, 0).T, info, freqs[0], 'all_clusters'))
mne.write_evokeds(evoked_file, evokeds)
stats = xr.DataArray(np.zeros((3, *T_obs.shape)), dims=['data', 'freq', 'sensor'], coords={'data':['T_stat', 'p_val', 'clu_inds'], 'freq':freqs, 'sensor':sensors})
stats.loc['T_stat'] = T_obs
stats.loc['p_val'] = p_val
stats.loc['clu_inds'] = clu_inds
stats.to_netcdf(path=stat_file, group=test_key, mode=mode if op.isfile(stat_file) else 'w')
return clu_all
# # States
fmin = .5 #PSD_norm.freq.values[0]
fmax = 100 #PSD_norm.freq.values[-1]
stat_path = op.join(Analysis_path, task, 'meg', 'Stats', 'PSD')
os.makedirs(stat_path, exist_ok=True)
stat_file = op.join(stat_path, '{}-{}Hz.nc'.format(fmin, fmax))
paired_tests = {'FA_vs_RS':('FA', 'RS', subjects), 'OM_vs_RS':('OM', 'RS', subjects),
'FA_vs_OM':('FA', 'OM', subjects), 'FA_vs_RS+E':('FA', 'RS', experts),
'OM_vs_RS+E':('OM', 'RS', experts), 'FA_vs_OM+E':('FA', 'OM', experts),
'FA_vs_RS+N':('FA', 'RS', novices), 'OM_vs_RS+N':('OM', 'RS', novices),
'FA_vs_OM+N':('FA', 'OM', novices)}
for key,val in paired_tests.items():
logger.info(key)
clu[key] = sensor_perm_test(PSD_ave.loc[val[0],val[2],fmin:fmax].values, PSD_ave.loc[val[1],val[2],fmin:fmax].values, stat_file=stat_file, test_key=key, freqs=PSD_ave.loc[:,:,fmin:fmax].freq.values, sensors=PSD_ave.chan.values.tolist(), paired=True)
# # Expertise
fmin = .5 #PSD_norm.freq.values[0]
fmax = 100 #PSD_norm.freq.values[-1]
stat_path = op.join(Analysis_path, task, 'meg', 'Stats', 'PSD')
os.makedirs(stat_path, exist_ok=True)
stat_file = op.join(stat_path, '{}-{}Hz.nc'.format(fmin, fmax))
exp_tests = {'N_vs_E+RS': 'RS', 'N_vs_E+FA': 'FA', 'N_vs_E+OM': 'OM'}
for key,val in exp_tests.items():
logger.info(key)
clu[key] = sensor_perm_test(PSD_ave.loc[val,novices,fmin:fmax].values, PSD_ave.loc[val,experts,fmin:fmax].values, stat_file=stat_file, test_key=key, freqs=PSD_ave.loc[:,:,fmin:fmax].freq.values, sensors=PSD_ave.chan.values.tolist(), paired=False)
# # Interaction
fmin = .5 #PSD_norm.freq.values[0]
fmax = 100 #PSD_norm.freq.values[-1]
stat_path = op.join(Analysis_path, task, 'meg', 'Stats', 'PSD')
os.makedirs(stat_path, exist_ok=True)
stat_file = op.join(stat_path, '{}-{}Hz.nc'.format(fmin, fmax))
inter_tests = {'N_vs_E+OM-RS': ('OM', 'RS'), 'N_vs_E+FA-RS': ('FA', 'RS'), 'N_vs_E+FA-OM': ('FA', 'OM')}
for key,val in inter_tests.items():
logger.info(key)
clu[key] = sensor_perm_test(PSD_ave.loc[val[0],novices,fmin:fmax].values - PSD_ave.loc[val[1],novices,fmin:fmax].values, PSD_ave.loc[val[0],experts,fmin:fmax].values - PSD_ave.loc[val[1],experts,fmin:fmax].values, stat_file=stat_file, test_key=key, freqs=PSD_ave.loc[:,:,fmin:fmax].freq.values, sensors=PSD_ave.chan.values.tolist(), paired=False)
| utils_processing/stats_PSD.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Post-Estimation Tutorial
# +
# %matplotlib inline
import pyblp
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
pyblp.options.digits = 2
pyblp.options.verbose = False
pyblp.__version__
# -
# This tutorial covers several features of `pyblp` which are available after estimation including:
#
# 1. Calculating elasticities and diversion ratios.
# 2. Calculating marginal costs and markups.
# 3. Computing the effects of mergers: prices, shares, and HHI.
# 4. Using a parametric bootstrap to estimate standard errors.
# 5. Estimating optimal instruments.
#
#
# ## Problem Results
#
# As in the [fake cereal tutorial](nevo.ipynb), we'll first solve the fake cereal problem from :ref:`references:Nevo (2000)`. We load the fake data and estimate the model as in the previous tutorial. We output the setup of the model to confirm we have correctly configured the :class:`Problem`
product_data = pd.read_csv(pyblp.data.NEVO_PRODUCTS_LOCATION)
agent_data = pd.read_csv(pyblp.data.NEVO_AGENTS_LOCATION)
product_formulations = (
pyblp.Formulation('0 + prices', absorb='C(product_ids)'),
pyblp.Formulation('1 + prices + sugar + mushy')
)
agent_formulation = pyblp.Formulation('0 + income + income_squared + age + child')
problem = pyblp.Problem(product_formulations, product_data, agent_formulation, agent_data)
problem
# We'll solve the problem in the same way as before. The :meth:`Problem.solve` method returns a :meth:`ProblemResults` class, which displays basic estimation results. The results that are displayed are simply formatted information extracted from various class attributes such as :attr:`ProblemResults.sigma` and :attr:`ProblemResults.sigma_se`.
initial_sigma = np.diag([0.3302, 2.4526, 0.0163, 0.2441])
initial_pi = [
[ 5.4819, 0, 0.2037, 0 ],
[15.8935, -1.2000, 0, 2.6342],
[-0.2506, 0, 0.0511, 0 ],
[ 1.2650, 0, -0.8091, 0 ]
]
bfgs = pyblp.Optimization('bfgs')
results = problem.solve(
initial_sigma,
initial_pi,
optimization=bfgs,
method='1s'
)
results
# Additional post-estimation outputs can be computed with :class:`ProblemResults` methods.
#
#
# ## Elasticities and Diversion Ratios
#
# We can estimate elasticities, $\varepsilon$, and diversion ratios, $\mathscr{D}$, with :meth:`ProblemResults.compute_elasticities` and :meth:`ProblemResults.compute_diversion_ratios`.
#
# As a reminder, elasticities in each market are
#
# $$\varepsilon_{jk} = \frac{x_k}{s_j}\frac{\partial s_j}{\partial x_k}.$$
#
# Diversion ratios are
#
# $$\mathscr{D}_{jk} = -\frac{\partial s_k}{\partial x_j} \Big/ \frac{\partial s_j}{\partial x_j}.$$
#
# Following :ref:`references:Conlon and Mortimer (2018)`, we report the diversion to the outside good $D_{j0}$ on the diagonal instead of $D_{jj}=-1$.
elasticities = results.compute_elasticities()
diversions = results.compute_diversion_ratios()
# Post-estimation outputs are computed for each market and stacked. We'll use [matplotlib](https://matplotlib.org/) functions to display the matrices associated with a single market.
single_market = product_data['market_ids'] == 'C01Q1'
plt.colorbar(plt.matshow(elasticities[single_market]));
plt.colorbar(plt.matshow(diversions[single_market]));
# The diagonal of the first image consists of own elasticities and the diagonal of the second image consists of diversion ratios to the outside good. As one might expect, own price elasticities are large and negative while cross-price elasticities are positive but much smaller.
#
# Elasticities and diversion ratios can be computed with respect to variables other than `prices` with the `name` argument of :meth:`ProblemResults.compute_elasticities` and :meth:`ProblemResults.compute_diversion_ratios`. Additionally, :meth:`ProblemResults.compute_long_run_diversion_ratios` can be used to used to understand substitution when products are eliminated from the choice set.
#
# The convenience methods :meth:`ProblemResults.extract_diagonals` and :meth:`ProblemResults.extract_diagonal_means` can be used to extract information about own elasticities of demand from elasticity matrices.
means = results.extract_diagonal_means(elasticities)
# An alternative to summarizing full elasticity matrices is to use :meth:`ProblemResults.compute_aggregate_elasticities` to estimate aggregate elasticities of demand, $E$, in each market, which reflect the change in total sales under a proportional sales tax of some factor.
aggregates = results.compute_aggregate_elasticities(factor=0.1)
# Since demand for an entire product category is generally less elastic than the average elasticity of individual products, mean own elasticities are generally larger in magnitude than aggregate elasticities.
plt.hist(
[means.flatten(), aggregates.flatten()],
color=['red', 'blue'],
bins=50
);
plt.legend(['Mean Own Elasticities', 'Aggregate Elasticities']);
# ## Marginal Costs and Markups
#
# To compute marginal costs, $c$, the `product_data` passed to :class:`Problem` must have had a `firm_ids` field. Since we included firm IDs when configuring the problem, we can use :meth:`ProblemResults.compute_costs`.
costs = results.compute_costs()
plt.hist(costs, bins=50);
plt.legend(["Marginal Costs"]);
# Other methods that compute supply-side outputs often compute marginal costs themselves. For example, :meth:`ProblemResults.compute_markups` will compute marginal costs when estimating markups, $\mathscr{M}$, but computation can be sped up if we just use our pre-computed values.
markups = results.compute_markups(costs=costs)
plt.hist(markups, bins=50);
plt.legend(["Markups"]);
# ## Mergers
#
# Before computing post-merger outputs, we'll supplement our pre-merger markups with some other outputs. We'll compute Herfindahl-Hirschman Indices, $\text{HHI}$, with :meth:`ProblemResults.compute_hhi`; population-normalized gross expected profits, $\pi$, with :meth:`ProblemResults.compute_profits`; and population-normalized consumer surpluses, $\text{CS}$, with :meth:`ProblemResults.compute_consumer_surpluses`.
hhi = results.compute_hhi()
profits = results.compute_profits(costs=costs)
cs = results.compute_consumer_surpluses()
# To compute post-merger outputs, we'll create a new set of firm IDs that represent a merger of firms ``2`` and ``1``.
product_data['merger_ids'] = product_data['firm_ids'].replace(2, 1)
# We can use :meth:`ProblemResults.compute_approximate_prices` or :meth:`ProblemResults.compute_prices` to estimate post-merger prices. The first method, which is discussed, for example, in :ref:`references:Nevo (1997)`, assumes that shares and their price derivatives are unaffected by the merger. The second method does not make these assumptions and iterates over the $\zeta$-markup equation from :ref:`references:Morrow and Skerlos (2011)` to solve the full system of $J_t$ equations and $J_t$ unknowns in each market $t$. We'll use the latter, since it is fast enough for this example problem.
changed_prices = results.compute_prices(
firm_ids=product_data['merger_ids'],
costs=costs
)
# We'll compute post-merger shares with :meth:`ProblemResults.compute_shares`.
changed_shares = results.compute_shares(changed_prices)
# Post-merger prices and shares are used to compute other post-merger outputs. For example, $\text{HHI}$ increases.
changed_hhi = results.compute_hhi(
firm_ids=product_data['merger_ids'],
shares=changed_shares
)
plt.hist(changed_hhi - hhi, bins=50);
plt.legend(["HHI Changes"]);
# Markups, $\mathscr{M}$, and profits, $\pi$, generally increase as well.
changed_markups = results.compute_markups(changed_prices, costs)
plt.hist(changed_markups - markups, bins=50);
plt.legend(["Markup Changes"]);
changed_profits = results.compute_profits(changed_prices, changed_shares, costs)
plt.hist(changed_profits - profits, bins=50);
plt.legend(["Profit Changes"]);
# On the other hand, consumer surpluses, $\text{CS}$, generally decrease.
changed_cs = results.compute_consumer_surpluses(changed_prices)
plt.hist(changed_cs - cs, bins=50);
plt.legend(["Consumer Surplus Changes"]);
# ## Bootstrapping Results
#
# Post-estimation outputs can be informative, but they don't mean much without a sense sample-to-sample variability. One way to estimate confidence intervals for post-estimation outputs is with a standard bootstrap procedure:
#
# 1. Construct a large number of bootstrap samples by sampling with replacement from the original product data.
# 2. Initialize and solve a :class:`Problem` for each bootstrap sample.
# 3. Compute the desired post-estimation output for each bootstrapped :class:`ProblemResults` and from the resulting empirical distribution, construct boostrap confidence intervals.
#
# Although appealing because of its simplicity, the computational resources required for this procedure are often prohibatively expensive. Furthermore, human oversight of the optimization routine is often required to determine whether the routine ran into any problems and if it successfully converged. Human oversight of estimation for each bootstrapped problem is usually not feasible.
#
# A more reasonable alternative is a parametric bootstrap procedure:
#
# 1. Construct a large number of draws from the estimated joint distribution of parameters.
# 2. Compute the implied mean utility, $\delta$, and shares, $s$, for each draw. If a supply side was estimated, also computed the implied marginal costs, $c$, and prices, $p$.
# 3. Compute the desired post-estimation output under each of these parametric bootstrap samples. Again, from the resulting empirical distribution, construct boostrap confidence intervals.
#
# Compared to the standard bootstrap procedure, the parametric bootstrap requires far fewer computational resources, and is simple enough to not require human oversight of each bootstrap iteration. The primary complication to this procedure is that when supply is estimated, equilibrium prices and shares need to be computed for each parametric bootstrap sample by iterating over the $\zeta$-markup equation from :ref:`references:Morrow and Skerlos (2011)`. Although nontrivial, this fixed point iteration problem is much less demanding than the full optimization routine required to solve the BLP problem from the start.
#
# An empirical distribution of results computed according to this parametric bootstrap procedure can be created with the :meth:`ProblemResults.bootstrap` method, which returns a :class:`BootstrappedResults` class that can be used just like :class:`ProblemResults` to compute various post-estimation outputs. The difference is that :class:`BootstrappedResults` methods return arrays with an extra first dimension, along which bootstrapped results are stacked.
#
# We'll construct 90% parametric bootstrap confidence intervals for estimated mean own elasticities in each market of the fake cereal problem. Usually, bootstrapped confidence intervals should be based on thousands of draws, but we'll only use a few for the sake of speed in this example.
bootstrapped_results = results.bootstrap(draws=100, seed=0)
bootstrapped_results
bounds = np.percentile(
bootstrapped_results.extract_diagonal_means(
bootstrapped_results.compute_elasticities()
),
q=[10, 90],
axis=0
)
table = pd.DataFrame(index=problem.unique_market_ids, data={
'Lower Bound': bounds[0].flatten(),
'Mean Own Elasticity': aggregates.flatten(),
'Upper Bound': bounds[1].flatten()
})
table.round(2).head()
# ## Optimal Instruments
#
# Given a consistent estimate of $\theta$, we may want to compute the optimal instruments of :ref:`references:Chamberlain (1987)` and use them to re-solve the problem. Optimal instruments have been shown, for example, by :ref:`references:Reynaert and Verboven (2014)`, to reduce bias, improve efficiency, and enhance stability of BLP estimates.
#
# The :meth:`ProblemResults.compute_optimal_instruments` method computes the expected Jacobians that comprise the optimal instruments by integrating over the density of $\xi$ (and $\omega$ if a supply side was estimated). By default, the method approximates this integral by averaging over the Jacobian realizations computed under draws from the asymptotic normal distribution of the error terms. Since this process is computationally expensive and often doesn't make much of a difference, we'll use `method='approximate'` in this example to simply evaluate the Jacobians at the expected value of $\xi$, zero.
instrument_results = results.compute_optimal_instruments(method='approximate')
instrument_results
# We can use the :meth:`OptimalInstrumentResults.to_problem` method to re-create the fake cereal problem with the estimated optimal excluded instruments.
updated_problem = instrument_results.to_problem()
updated_problem
# We can solve this updated problem just like the original one. We'll start at our consistent estimate of $\theta$.
updated_results = updated_problem.solve(
results.sigma,
results.pi,
optimization=pyblp.Optimization('bfgs'),
method='1s'
)
updated_results
| docs/notebooks/tutorial/post_estimation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/csaybar/EarthEngineMasterGIS/blob/master/module02/07_Lambda.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="otznJ1uhF3uw" colab_type="text"
# <!--COURSE_INFORMATION-->
# <img align="left" style="padding-right:10px;" src="https://user-images.githubusercontent.com/16768318/73986808-75b3ca00-4936-11ea-90f1-3a6c352766ce.png" width=10% >
# <img align="right" style="padding-left:10px;" src="https://user-images.githubusercontent.com/16768318/73986811-764c6080-4936-11ea-9653-a3eacc47caed.png" width=10% >
#
# **Bienvenidos!** Este *colab notebook* es parte del curso [**Introduccion a Google Earth Engine con Python**](https://github.com/csaybar/EarthEngineMasterGIS) desarrollado por el equipo [**MasterGIS**](https://www.mastergis.com/). Obten mas informacion del curso en este [**enlace**](https://www.mastergis.com/product/google-earth-engine/). El contenido del curso esta disponible en [**GitHub**](https://github.com/csaybar/EarthEngineMasterGIS) bajo licencia [**MIT**](https://opensource.org/licenses/MIT).
# + [markdown] id="nsEOLErx6WCf" colab_type="text"
# # **MASTERGIS: Sintaxis de minima de Python para GEE**
#
# En esta lectura, usted aprendera acerca de: las **funciones lambda, map, filter and reduce**.
#
# Veremos los siguientes topicos:
#
# 1) Funciones lambda.
# 2) Metodos de la programacion funcional:
# - map
# - filter
# - reduce*
#
# ### **Programacion funcional**
#
# Los programas escritos en un **lenguaje funcional** estan constituidos unicamente por definiciones de **funciones**, entendiendo estas no como subprogramas clasicos de un lenguaje imperativo, sino como funciones puramente matematicas.
#
# <center>
# <img src="https://user-images.githubusercontent.com/16768318/73006022-d523bd00-3e01-11ea-8c1a-8ce4f5529158.png" width="35%">
# </center>
#
# ### **Funciones lambda**
#
# Sirve para crear funciones anonimas. Ejemplo:
#
# + id="TOTBARnt6Lhi" colab_type="code" colab={}
def double(x):
return x**2
# + id="gqz5TzS8_weU" colab_type="code" colab={}
double(25)
# + id="QcVfQr-e_zBO" colab_type="code" colab={}
double = lambda x: x**2
# + id="RcsrFRGG_7eN" colab_type="code" outputId="5c120276-2fa5-4bbb-b01d-34bd39d125ac" colab={"base_uri": "https://localhost:8080/", "height": 34}
double(25)
# + id="kHuKKQ7e_-rT" colab_type="code" colab={}
reverse_f = lambda x: x[::-1]
# + id="Xpb-b3xQLCOt" colab_type="code" outputId="b9de9094-e4c1-43c0-f233-32f60fa67e81" colab={"base_uri": "https://localhost:8080/", "height": 34}
reverse_f('hola')
# + [markdown] id="mZcdf3C-LGpm" colab_type="text"
# ### **Map**
#
# Aplica una condicion (definida por una funcion) sobretodos los elementos de una estructura de datos.
# <img src="https://leblancfg.com/img/map_function.png" width="50%">
#
#
# + [markdown] id="xRrjjwTrO4vc" colab_type="text"
# #### **Ejemplo 1**
# Convertir nombre de mis mascotas a mayuscula
# + id="cgIL58gOQo2M" colab_type="code" colab={}
mascotas_nombre_min = ['alfred', 'tabitha', 'william', 'arla']
# + id="mAVbl7f2N_Si" colab_type="code" outputId="96f07ed2-5f87-4641-e978-388bbd5b257d" colab={"base_uri": "https://localhost:8080/", "height": 34}
# Programación imperativa
mascotas_nombre_may = []
for pet in mascotas_nombre_min:
pet_ = pet.upper()
mascotas_nombre_may.append(pet_)
print(uppered_pets)
# + id="DYdUckemOWuW" colab_type="code" outputId="fb502884-a609-4a14-ee70-fc7330625c78" colab={"base_uri": "https://localhost:8080/", "height": 34}
# Programación funcional
mascotas_nombre_may = list(map(str.upper, my_pets))
print(mascotas_nombre_may)
# + [markdown] id="1bl4nKrkORx-" colab_type="text"
# #### **Ejemplo 2**
#
# Convertir los valores de temperatura media de Celsius a Kelvin
# + id="MJz1s2l_OC00" colab_type="code" colab={}
temp_Pucallpa_C = [28, 32, 15, 26, 24, 30]
# + id="CcJQUBdORJZF" colab_type="code" outputId="7a65bef8-3ee4-47b7-be58-7d68c386129c" colab={"base_uri": "https://localhost:8080/", "height": 34}
# Programación imperativa
temp_Pucallpa_K = []
for temp in temp_Pucallpa_C:
new_temp = temp + 273.15
temp_Pucallpa_K.append(new_temp)
print(temp_Pucallpa_K)
# + id="wW4u3RrZUNPg" colab_type="code" outputId="4a6c8aa6-7f20-4a54-9f78-1b2f8f7bbae4" colab={"base_uri": "https://localhost:8080/", "height": 34}
# Programación funcional
temp_Pucallpa_K = list(map(lambda x:x+273.15, temp_Pucallpa_C))
print(temp_Pucallpa_K)
# + [markdown] id="bn9eybKUUd1l" colab_type="text"
# ### **Filter**
#
# Mientras que **map()** pasa cada elemento del iterable a traves de una funcion y devuelve el resultado de todos los elementos que han pasado a traves de la funcion, **filter()**, en primer lugar, requiere que **la funcion devuelva valores booleanos (verdadero o falso)** y luego pasa cada elemento del iterable a traves de la funcion, **"filtrando" los que son falsos**.
#
# <img src="https://leblancfg.com/img/filter.png" width="50%">
# + id="dDCGYR6yVnYK" colab_type="code" colab={}
# Filter
temp_Pucallpa_C = [28, 32, 15, 26, 24, 30]
# + id="78a6jsnOZ13Y" colab_type="code" outputId="b585692a-4bbc-428c-de5d-dcb72819d451" colab={"base_uri": "https://localhost:8080/", "height": 34}
over_30 = list(filter(lambda temp:temp >= 30, temp_Pucallpa_C))
print(over_30)
# + [markdown] id="y473dHN1GuCd" colab_type="text"
# ### **¿Dudas con este Jupyer-Notebook?**
#
# Estaremos felices de ayudarte!. Create una cuenta Github si es que no la tienes, luego detalla tu problema ampliamente en: https://github.com/csaybar/EarthEngineMasterGIS/issues
#
# **Tienes que dar clic en el boton verde!**
#
# <center>
# <img src="https://user-images.githubusercontent.com/16768318/79680748-d5511000-81d8-11ea-9f89-44bd010adf69.png" width = 70%>
# </center>
| module02/07_Lambda.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Module, CSV und Plotten
# ## Module
# Viele der Skripte, welche ihr bis jetzt geschrieben habt behandeln häufig auftretende Probleme,
# wie zum Beispiel die Suche nach einer Wurzel.
# Dies ist kein triviales Problem, welches verschiedene Lösungen zulässt.
# Die beste Lösung kann man entweder durch das Lesen von Fachzeitschriften und langes Knobeln selber finden
# oder man verwendet bereits vorgefertigten Code.
#
# Dieser Code wird für Python in sogenannten „Modulen“ verteilt.
# Dies sind thematische Sammlungen verschiedener Klassen und Funktionen,
# welche gesammelt zur Verfügung gestellt werden.
#
# Die Verwendung eines Moduls wird hier kurz anhand von numpy demonstriert.
# Der erste Schritt ist das Modul zu laden oder zu importieren.
# Anschließend kann der Inhalt genutzt werden.
# Die Syntax entspricht dabei dem Aufruf einer Methode.
# +
# Zuerst beginnen wir mit dem Import des Moduls "numpy"
import numpy
a = 5
# Nun können wir die Wurzel-Funktion des Moduls "sqrt" rufen
b = numpy.sqrt(a)
print(b)
# -
# Es ist auch möglich Module während des Imports mit „as“ umzubenennen.
# +
import numpy as np
# Dies entspricht:
# import numpy
# np = numpy
a = 5
# Wir können numpy nun als np rufen
print(np.sqrt(a))
# -
#
# ## Plotten mit matplotlib
# Im Anfängerpraktikum werdet ihr,
# wie vermutlich auch im Rest eures Studiums Messergebnisse graphisch darstellen müssen.
# Für kleine Datenmengen kann dies mit Papier und Bleistift bewerkstelligt werden.
# Für größere Datenmengen sollte man jedoch einen Computer verwenden.
# In Python ist dies mit matplotlib möglich.
#
# Hier ein kurzes Beispiel:
# +
import matplotlib.pyplot as plt
# Erzeugung der X Werte
x = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
# Erzeugung der Y-Werte
y = [0, 1, 4, 9, 16, 25, 36, 49, 64, 81, 100]
plt.plot(x, y)
plt.show()
# -
# Wie ihr sehen könnt übergeben wir „plot“ zwei iterierbare Objekte, in diesem Fall Listen als Argumente,
# interpretiert diese als Datenpunkte und legt anschließend eine Linie durch diese.
# „show“ dient dazu das Ergebnis am Ende darzustellen.
# Es ist möglich vor show noch weiter Graphen zeichnen zu lassen.
# Hierzu ein weiteres Beispiel:
# +
import matplotlib.pyplot as plt
# Erzeugung der X_Werte
x = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
# Erzeugung der Y-Werte
y = [0, 1, 4, 9, 16, 25, 36, 49, 64, 81, 100]
plt.plot(x, y)
plt.plot(x) # übergeben wir nur eine Liste so wird diese als y Koordinaten über (0...n) interpretiert
plt.show()# show zeigt nun beide Graphen
# -
# Die Darstellung als Graph ist meistens für Datenpunkte unerwünscht,
# stattdessen wird ein Streudiagramm bevorzugt.
# Dieses können wir mittels „scatter“ erzeugen, welches ähnlich wie „plot“ verwendet wird.
# +
import matplotlib.pyplot as plt
# Erzeugung der x Werte
x = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
# Erzeugung der Y-Werte
y = [0, 1, 4, 9, 16, 25, 36, 49, 64, 81, 100]
plt.scatter(x, y)
plt.show()
# -
# Versucht als Übungen nun die Quadrate und Kuben (3. Potenz)
# aller ganzen Zahlen zwischen 0 und 100 graphisch darzustellen.
# Die Verwendung von Schleifen und der Listenmethode „append“ könnte sich hierbei als nützlich erweisen.
# +
# Fügt hier bitte eure Lösung ein
# -
# ## Numpy-arrays
# Als nächstes wollen wir uns mit „numpy“-Arrays beschäftigen.
# Diese finden häufig Verwendung, da sie schnelleres Rechnen ermöglichen.
# Dies liegt daran, dass sie strikt typisiert und damit C-Arrays sehr ähnlich sind,
# was die Einsparungen in der Verarbeitung der Daten ermöglicht.
# Zuerst wollen wir einmal einige „numpy“-Arrays erstellen.
# Wir beginnen damit eine Liste in ein „numpy“-Arrays umzuwandeln:
# +
import numpy as np
Liste = [1, 2, 4]
array = np.array(Liste)
print(array)
# -
# Als nächstes nutzen wir 3 integrierte Funktionen um häufig verwendete „numpy“-Arrays zu erzeugen.
# +
import numpy as np
# Arange gibt uns einen Zahlen in gleichmäßigen Intervallen,
# kann im Gegensatz zu range, jedoch auch Fließkommazahlen verwenden
arange = np.arange(0, 1, 0.1)
# ones(n) erzeugt ein array mit n Einsen
ones = np.ones(5)
# zeros(n) erzeug ein array mit n Nullen
zeros = np.zeros(3)
print(arange)
print(ones)
print(zeros)
# -
# Verwendet dieses Wissen nun um die Quadrate aller Zahlen zwischen 0 und 1 mit einem Intervall von 0.01 darzustellen:
# +
# Fügt hier bitte eure Lösung ein
# -
# ### Nachteile von Numpy-Arrays
# Beschäftigen wir uns nun mit dem Nachteilen von „numpy“-Arrays.
# Der erste Nachteil ist die strikte Typisierung.
# So kann in einem Array nur ein Datentyp abgelegt werden.
# In folgendem Code werden damit alle Einträge innerhalb des „numpy“-Arrays auf den Datentyp „string“ gewzungen,
# wie ihr an den Anführungszeichen erkennen können werdet.
import numpy as np
Liste = ["Klaus", 1, 0.1]
Array = np.array(Liste)
print(Liste)
print(Array)
# Ein zweites Problem resultiert aus der Hardware nahen Funktionsweise von numpy.
# Die Einträge der „numpy“-Arrays sind in bestimmten aufeinanderfolgenden Speicherzellen aufbewahrt.
# Diese Speicherzellen haben eine begrenzte Anzahl „Bits“,
# also boolescher Variable.
# Deren Wahrheitswerte genutzt werden um binäre Zahlen darzustellen.
# Hier ein Beispiel für die Darstellung der Zahl „201“ mit 8 „Bits“.
#
# | Bit-Nummer | 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 |
# |:--------------:|:----:|:----:|:----:|:----:|:----:|:----:|:----:|:----:|
# |Wahrheitswert | Wahr | Wahr |Falsch|Falsch| Wahr |Falsch|Falsch| Wahr |
# |Binärdarstellung| 1 | 1 | 0 | 0 | 1 | 0 | 0 | 1 |
# | Zahlenwert | 128 | 64 | 32 | 16 | 8 | 4 | 2 | 1 |
#
# Im Arbeitsspeicher würden wir also: | Wahr | Wahr | Falsch | Falsch | Wahr | Falsch | Falsch | Wahr |
#
# Beziehungsweise: 11001001 finden.
#
# Mit 8 „Bits“ lassen sich $2^8$, also 256 verschiedene Zustände darstellen.
# Für ganze Zahl also der Zahlenraum von 0 bis 255 oder -128 bis 127.
# Versuchen wir nun auf 255 (in binärer Schreibweise 11111111) 1 zu addieren so erhalten wir
# 256 (in binärer Schreibweise 100000000).
# Diese können wir jedoch mit unseren Speicherzellen nicht mehr darstellen und die oberste Stelle geht verloren.
# Die im Arbeitsspeicher vorliegende Zahl ist 00000000 also 0.
# Dieses Verhalten bezeichnet man als Overflow.
import numpy as np
a = np.array([255], dtype=np.uint8)
b = np.array([1], dtype=np.uint8)
c = np.ones(1, dtype=np.uint8)
c[0] = a[0] + b[0]
print(c)
# ## Datenverarbeitung
# Natürlich solltet ihr eure Messergebnisse machinenlesbar abspeichern,
# um sie automatisch auslesen und auswerten zu können.
# Ein Beispiel ist die Datei „data1.dat“.
# Wenn ihr wollt könnt ihr sie euch herunterladen und mit einem Texteditor (wie Notepad, Gedit oder vim) ansehen.
# Diese Datei können wir auch mit numpy einlesen.
# Hierfür verwenden wir „loadtxt“ aus „numpy“.
# +
import numpy as np
values = np.loadtxt("data01.dat")
# Hier geben die eingelesenen Werte aus
print(values)
# +
import numpy as np
values = np.loadtxt("data01.dat")
# Nun betrachten wir eines der eingelesenen Wertepaare, welche sich innerhalb einer Zeile befanden
print(values[0])
# +
import numpy as np
values = np.loadtxt("data01.dat")
# Natürlich können wir auch einen Teil dieses Wertepaares ausgeben
print(values[0][1])
# -
# Als nächstes wollen wir nun diese Wertepaare als Punkte interpretieren
# und mittels „append“ in eine x- und eine y-Liste einfügen um sie anschließend mittels „scatter“ darzustellen.
# +
import numpy as np
import matplotlib.pyplot as plt
x = []
y = []
values = np.loadtxt("data01.dat")
for value in values:
x.append(value[0])
y.append(value[1])
plt.scatter(x,y)
plt.show()
# -
# Versucht nun den Inhalt der Datei „data02.dat“ ebenfalls mit „scatter“ darzustellen.
import numpy as np
import matplotlib.pyplot as plt
# Fügt hier bitte eure Lösung ein
# Bei „data03.csv“ handelt es sich um ein „Comma-seperated-value“-file.
# Die Werte werden also durch Kommata getrennt.
# Wir können sie mit dem „csv“-Modul einlesen.
import matplotlib.pyplot as plt
import csv
# Zuerst öffnen wir die Datei "data03.csv" und nennen sie "csv_file"
# With sorgt auch dafür, dass der Code nur ausgeführt wird, wenn das öffnen erfolgreich war
# und die Datei automatisch wieder geschlossen wird.
with open("data03.csv") as csv_file:
X = []
Y = []
# Nun erklären wir, dass die Werte durch den delimiter ',' getrennt werden
readCSV = csv.reader(csv_file, delimiter=',')
# Die einzelnen Zeilen sind nun Elemente in readCSV
for row in readCSV:
# Es gilt zu bemerken, dass die Daten noch als Strings(Zeichenketten) abgelegt sind
# Wir müssen sie also zu Fließkommazahlen konvertieren
X.append(float(row[0]))
Y.append(float(row[1]))
plt.scatter(X, Y)
plt.show()
# Verwendet nun diesen Skript um die Daten aus „data04.csv“ darzustellen.
import csv
import matplotlib.pyplot as plt
# Fügt hier bitte eure Lösung ein
# Als nächstes sollt ihr die Daten aus „data05.csv“ darstellen.
# Wenn ihr euch die Datei anseht werdet ihr feststellen,
# dass die Werte durch „&“ getrennt werden.
# Um sie korrekt einlesen müsst ihr den Delimiter in der „csv.reader“ Funktion ändern.
import csv
import matplotlib.pyplot as plt
# Fügt hier bitte eure Lösung ein
# In „data06.csv“ wurden die Fließkommazahlen mittels „,“ statt „.“ getrennt.
# Der einfachste weg dieses Problem zu lösen ist die „replace“ Methode der „string“-Klasse.
# Der Delimiter ist „;“.
import csv
import matplotlib.pyplot as plt
with open("data06.csv") as csv_file:
X = []
Y = []
# Nun erklären wir, dass die Werte durch den delimiter ',' getrennt werden
readCSV = csv.reader(csv_file, delimiter=';')
# Die einzelnen Zeilen sind nun Elemente in readCSV
for row in readCSV:
X.append(float(row[0].replace(",",".")))
Y.append(float(row[1].replace(",",".")))
plt.scatter(X,Y)
plt.show()
# In „data07.csv“ findet ihr einen Datensatz mit demselben Problem.
# Versucht diesen nun entsprechend zu bearbeiten.
import csv
import matplotlib.pyplot as plt
# Fügt hier bitte eure Lösung ein
# Als nächstes betrachten wir „data08.csv“.
# Hier finden mehre Gaußkurven in verschiedenen Spalten.
# Als erstes wollen wir den Kommentar eliminieren.
# Hierbei könnten wir natürlich alle mit „#“ gekennzeichneten Zeilen ignorieren.
# Stattdessen werden wir die ersten Zeilen mittels eines Counters ignorieren,
# da wir dafür nicht das erste Zeichen der Zeile berücksichtigen müssen,
# was etwas komplizierter wäre.
import csv
import matplotlib.pyplot as plt
with open("data08.csv") as csv_file:
x = []
y = []
readCSV = csv.reader(csv_file, delimiter=",")
# Hier erzeugen wir einen Counter names Skip_lines, welcher die Anzahl der Kommentar Zeilen herunter zählt
Skip_lines = 2
for row in readCSV:
# Hier überspringen wir die Zeilen
if Skip_lines > 0:
Skip_lines -= 1
else:
# Hier lesen wir die erste Zeile ein
x.append(float(row[0]))
y.append(float(row[1]))
plt.scatter(x,y)
plt.show()
# Nun sollten wir noch die anderen Gaußkurven darstellen.
# Wir könnten natürlich für jede Zeile eine eigene Variable erstellen,
# stattdessen verwenden wir hier ein Konstrukt,
# welches dynamisch Listen in eine Superliste einfügt.
import csv
import matplotlib.pyplot as plt
with open("data08.csv") as csv_file:
readCSV = csv.reader(csv_file, delimiter=",")
Skip_lines = 2
# Hier erstellen wir eine Liste um unsere Ergebnisse zu speichern
result = []
# Mit dieser "flag" merken wir uns, ob dies unser erster Durchgang ist
first = True
for row in readCSV:
if Skip_lines > 0:
Skip_lines -= 1
else:
for i in range(0, len(row)):
if first:
# Befinden wir uns ersten Durchgang erstellen wir neue Listen,
# welche unsere Werte enthalten und appenden diese an result.
result.append([float(row[i].replace(",","."))])
else:
# Anschließen können wir an diese Listen appenden
result[i].append(float(row[i].replace(",",".")))
# Nachdem wir die Listen erzeugt haben setzen wir die Flag auf False,
# um nicht noch mehr Listen zu erzeugen.
if first:
first = False
# Für die plots gehen wir ähnlich vor.
# Zuerst erstellen wir eine flag, welche angbit ob x bereits gelesen wurde
IsX = True
for array in result:
if IsX:
# Das erste Array, welches der ersten Spalte entspricht,
# wir hier eingelesen und die flag gesetzt.
x = array
IsX = False
else:
# Alle folgenden arrays sind die y Zeilen.
# Sie werden hier eingelesen und geplottet.
y = array
plt.scatter(x,y)
# Anschließend zeigen wir die plots.
plt.show()
# Nun versucht die in „data09.csv“ gegebenen Daten darzustellen.
import csv
import matplotlib.pyplot as plt
# Fügt hier bitte eure Lösung ein
# Leider gibt es Datensätze, welche mit nicht einfach mit „csv.reader“ ausgelesen werden können.
# In diesen Fällen ist es meist sinnvoll die Zeilen einzeln als Zeichenketten zu bearbeiten.
# Ein Beispiel findet sich in „data10.dat“.
# Hier finden wir 2 durch Leerzeichen getrennte Spalten, denen Einheiten angehängt wurden.
#
# Um diese auswerten zu können müssen wir uns mit „slicing“ beschäftigen.
# Durch „slicing“ können wir eine Teilliste oder einen Teilstring aus einer Liste oder einem String auswählen.
# Hier ein kurzes Beispiel:
Liste = ["Hallo, Klaus!", 2, 3, 4]
# Slicing findet innerhalb eckiger Klammern statt
# Die erste Zahl gibt das erste Element an, ab dem gesliced wird.
# Die zweite Zahl bis zu welchem Element gesliced wird. Diese ist nicht inkludiert!
print(Liste[1:3])
# Lassen wir eine Zahl weg. So wird bis zum passenden Ende gesliced.
print(Liste[:2])
print(Liste[2:])
# Es ist auch möglich eine Schrittweite anzugeben.
# Lassen wir uns zum Beispiel jedes 2 Element des Strings ausgeben
String = Liste[0]
print(String[::2])
# Oder jedes 2 Element zwischen dem 2 und 8 Element
print(String[2:9:2])
# Es ist auch möglich eine negative Schrittweite zu wählen
print(String[::-1])
# Da die Daten in „data10.dat“ sind durch Leerzeichen getrennt
# und alle gleich breit, weshalb slicing eine Möglichkeit darstellt sie zu separieren.
import csv
import matplotlib.pyplot as plt
with open("data10.dat") as csv_file:
Skip_lines = 2
result = []
first = True
# Statt dem CSV Reader anzuwenden lassen wir uns die Zeilen direkt als string übergeben.
for line in csv_file:
if Skip_lines > 0:
Skip_lines -= 1
else:
# Zuerst nutzen wir "replace" um die Einheiten zu eliminieren
line = line.replace("min"," ") # Die Ersetzung hält die Abstände Konstant
line = line.replace("K"," ")
# Nun nutzen wir slicing um die Zeile in 2 Zeichenketten zu zerlegen.
row = [line[0:7], line[7:]]
for i in range(0, len(row)):
if first:
result.append([float(row[i].replace(",","."))])
else:
result[i].append(float(row[i].replace(",",".")))
if first:
first = False
IsX = True
for array in result:
if IsX:
x = array
IsX = False
else:
y = array
plt.scatter(x,y)
plt.show()
# Natürlich könnt ihr dies nun an „data11.dat“ ausprobieren.
import csv
import matplotlib.pyplot as plt
# Fügt hier bitte eure Lösung ein
# Nun solltet ihr alle Fähigkeiten haben, welche ihr benötigen werdet um Daten-files einzulesen.
# Beginnt einmal mit „data12.dat“.
# Hierbei ist die Lösung relativ naheliegend, jedoch nicht unbedingt leicht zu finden.
import csv
import matplotlib.pyplot as plt
# Fügt hier bitte eure Lösung ein
# Beschäftigt euch nun mit „data13.dat“.
# Leere Felder könnt ihr als „float("Nan")“ einfügen,
# da „matplotlib“ nur Nummern einliest.
# („Nan“ steht für „Not a number“ und ist ein definierter Zustand einer Fließkommazahl.)
import csv
import matplotlib.pyplot as plt
# Fügt hier bitte eure Lösung ein
# In „data14.dat“ findet ihr ein recht ähnliches Problem.
# Versucht sie darzustellen.
import csv
import matplotlib.pyplot as plt
# Fügt hier bitte eure Lösung ein
# Hier findet ihr noch in „Astro.dat“ ein Beispiel aus der Praxis.
# Zwar sind die meisten Tabellen gutartiger, jedoch kann euch auch solch eine Tabelle begegnen.
# Versucht die Daten auszulesen und sinnvoll darzustellen.
# (Nachdem es sich um echte Daten handelt werdet ihr vermutlich keine eindeutig perfekte Darstellung finden.)
import csv
import matplotlib.pyplot as plt
# Fügt hier bitte eure Lösung ein
# Ein weiteres Praxisbeispiel findet ihr in „wireshark-capture.dat“,
# diese wurde für diese Übung von 260Mb auf 120kb verkürzt.
# Plottet bitte „No.“ gegen „ack“.
# Hierfür müsst ihr etwas Stringmanipulation betreiben.
import csv
import matplotlib.pyplot as plt
# Fügt hier bitte eure Lösung ein
| Day3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import os
#print(os.getcwd())
import sys
sys.path.append('..') # examples
sys.path.append('../..') # PINNFramework etc.
import matplotlib.pyplot as plt
import torch
from tqdm.notebook import tqdm
from PINNFramework.PINN import Interface
from PINNFramework.models.mlp import MLP
from torch.autograd import grad
import torch
import numpy as np
import torch.nn as nn
import scipy.io
from pyDOE import lhs
import torch.optim as optim
class Poisson1DPINN(Interface):
def __init__(self, model, input_d = 1, output_d = 1, lb = [0], ub = [1], C = 1):
super().__init__(model,input_d,output_d)
self.lb = lb
self.ub = ub
self.C = C
def derivatives(self, u, x):
grads= torch.ones(x.shape[0])
pred_u = u[:,0]
J_u = grad(pred_u, x, create_graph=True, grad_outputs=grads)[0]
#calculate first order derivatives
u_x = J_u[:,0]
pred_derivatives = torch.stack([u_x],1)
return pred_derivatives
def pde(self, x, u, derivatives):
u_x = derivatives[:,0]
_u = u[:,0]
pde_residual = u_x + self.C**2 * _u #Du - g(x)
return pde_residual
def initial_loss(self, x, u0):
"""
Supervised loss for training the initial condition
"""
x = x.view(-1)
UV = self.forward(x)
u = UV[:, 0]
u0 = u0.view(-1)
return torch.mean((u0 - u) ** 2)
def input_normalization(self,x):
"""
Implementation of min-max scaling in range of [-1,1]
"""
xx = 2.0 * (x - self.lb) / (self.ub - self.lb) - 1.0
return xx
# +
# Poisson equation
g = lambda x, C : np.sin(C*x)
h = lambda x, C : -C**2 * g(x,C)
C = 1
# +
# bounds
lb = torch.tensor([0.0])
ub = torch.tensor([1.0])
# boundary conditions at x=0 and x=1
coord_bc = [0, 1]
data_bc = [h(0,1), h(1,1)]
# residual points
x_f = np.random.uniform(0,1, size = 100)
# +
pinn_model = MLP(input_size=1, output_size=1, num_hidden=3, hidden_size=100)
pinn_model = nn.Sequential(
nn.Linear(1,100),
nn.Tanh(),
nn.Linear(100,100),
nn.Tanh(),
nn.Linear(100,100),
nn.Tanh(),
nn.Linear(100,100),
nn.Tanh(),
nn.Linear(100,1)
)
# -
model = Poisson1DPINN(model = pinn_model, ub=ub, lb = lb)
x = {"x_0": torch.tensor(coord_bc).float().view(-1,1), "x_f":torch.tensor(x_f).float().view(-1,1), "x_b": torch.tensor(coord_bc).float().view(-1,1)}
data_ub = torch.tensor(data_bc).float().view(-1,1)
optimizer = optim.Adam(model.parameters(),lr=1e-5)
pbar = tqdm()
num_epochs = 100000
pbar.reset(total=num_epochs)
for epoch in range(num_epochs):
optimizer.zero_grad()
loss = model.pinn_loss(x, data_ub, data_ub ,interpolation_criterion=nn.MSELoss(), boundary_criterion=nn.MSELoss(), pde_norm=nn.MSELoss())
loss.backward()
optimizer.step()
pbar.set_description("Loss %10f" % loss.item())
pbar.update()
pbar.refresh()
# +
with torch.no_grad():
y_hat = model(torch.tensor(x_f).float().view(-1,1))
plt.plot(x_f,y_hat.cpu().numpy(),'.')
| examples/1D_Poisson/1DPoissonWorkflow.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# I have always wanted to write a ufunc function in Python. With Numba, you can --- and it will be fast.
# %pylab inline
import numpy as np
from numba import jit
import math
# Define some polynomial evaluation tools.
# +
@jit('f8(f8,f8[:])', nopython=True)
def polevl(x, coef):
N = len(coef)
ans = coef[0]
i = 1
while i < N:
ans = ans * x + coef[i]
i += 1
return ans
@jit('f8(f8,f8[:])', nopython=True)
def p1evl(x, coef):
N = len(coef)
ans = x + coef[0]
i = 1
while i < N:
ans = ans * x + coef[i]
i += 1
return ans
# -
# Define some constants!
# +
PP = np.array([
7.96936729297347051624E-4,
8.28352392107440799803E-2,
1.23953371646414299388E0,
5.44725003058768775090E0,
8.74716500199817011941E0,
5.30324038235394892183E0,
9.99999999999999997821E-1], 'd')
PQ = np.array([
9.24408810558863637013E-4,
8.56288474354474431428E-2,
1.25352743901058953537E0,
5.47097740330417105182E0,
8.76190883237069594232E0,
5.30605288235394617618E0,
1.00000000000000000218E0], 'd')
DR1 = 5.783185962946784521175995758455807035071
DR2 = 30.47126234366208639907816317502275584842
RP = np.array([
-4.79443220978201773821E9,
1.95617491946556577543E12,
-2.49248344360967716204E14,
9.70862251047306323952E15], 'd')
RQ = np.array([
# 1.00000000000000000000E0,
4.99563147152651017219E2,
1.73785401676374683123E5,
4.84409658339962045305E7,
1.11855537045356834862E10,
2.11277520115489217587E12,
3.10518229857422583814E14,
3.18121955943204943306E16,
1.71086294081043136091E18], 'd')
QP = np.array([
-1.13663838898469149931E-2,
-1.28252718670509318512E0,
-1.95539544257735972385E1,
-9.32060152123768231369E1,
-1.77681167980488050595E2,
-1.47077505154951170175E2,
-5.14105326766599330220E1,
-6.05014350600728481186E0], 'd')
QQ = np.array([
# 1.00000000000000000000E0,
6.43178256118178023184E1,
8.56430025976980587198E2,
3.88240183605401609683E3,
7.24046774195652478189E3,
5.93072701187316984827E3,
2.06209331660327847417E3,
2.42005740240291393179E2], 'd')
NPY_PI_4 = .78539816339744830962
SQ2OPI = .79788456080286535587989
# -
# Now for the function itself
@jit('f8(f8)')
def j0(x):
if (x < 0):
x = -x
if (x <= 5.0):
z = x * x
if (x < 1.0e-5):
return (1.0 - z / 4.0)
p = (z-DR1) * (z-DR2)
p = p * polevl(z, RP) / polevl(z, RQ)
return p
w = 5.0 / x
q = 25.0 / (x*x)
p = polevl(q, PP) / polevl(q, PQ)
q = polevl(q, QP) / p1evl(q, QQ)
xn = x - NPY_PI_4
p = p*math.cos(xn) - w * q * math.sin(xn)
return p * SQ2OPI / math.sqrt(x)
# +
from numba import vectorize
import scipy.special as ss
vj0 = vectorize(['f8(f8)'])(j0.py_func)
# -
x = np.linspace(-10,10,1000)
# %timeit vj0(x)
# %timeit ss.j0(x)
plot(x, vj0(x), x, ss.j0(x))
# This was run on a Macbook Air. Running `sysctl -n machdep.cpu.brand_string` resulted in:
#
# Intel(R) Core(TM) i7-3720QM CPU @ 2.60GHz
| examples/notebooks/j0 in Numba.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <table>
# <tr><td align="right" style="background-color:#ffffff;">
# <img src="../images/logo.jpg" width="20%" align="right">
# </td></tr>
# <tr><td align="right" style="color:#777777;background-color:#ffffff;font-size:12px;">
# <NAME> | April 28, 2019 (updated)
# </td></tr>
# <tr><td align="right" style="color:#bbbbbb;background-color:#ffffff;font-size:11px;font-style:italic;">
# This cell contains some macros. If there is a problem with displaying mathematical formulas, please run this cell to load these macros.
# </td></tr>
# </table>
# $ \newcommand{\bra}[1]{\langle #1|} $
# $ \newcommand{\ket}[1]{|#1\rangle} $
# $ \newcommand{\braket}[2]{\langle #1|#2\rangle} $
# $ \newcommand{\dot}[2]{ #1 \cdot #2} $
# $ \newcommand{\biginner}[2]{\left\langle #1,#2\right\rangle} $
# $ \newcommand{\mymatrix}[2]{\left( \begin{array}{#1} #2\end{array} \right)} $
# $ \newcommand{\myvector}[1]{\mymatrix{c}{#1}} $
# $ \newcommand{\myrvector}[1]{\mymatrix{r}{#1}} $
# $ \newcommand{\mypar}[1]{\left( #1 \right)} $
# $ \newcommand{\mybigpar}[1]{ \Big( #1 \Big)} $
# $ \newcommand{\sqrttwo}{\frac{1}{\sqrt{2}}} $
# $ \newcommand{\dsqrttwo}{\dfrac{1}{\sqrt{2}}} $
# $ \newcommand{\onehalf}{\frac{1}{2}} $
# $ \newcommand{\donehalf}{\dfrac{1}{2}} $
# $ \newcommand{\hadamard}{ \mymatrix{rr}{ \sqrttwo & \sqrttwo \\ \sqrttwo & -\sqrttwo }} $
# $ \newcommand{\vzero}{\myvector{1\\0}} $
# $ \newcommand{\vone}{\myvector{0\\1}} $
# $ \newcommand{\vhadamardzero}{\myvector{ \sqrttwo \\ \sqrttwo } } $
# $ \newcommand{\vhadamardone}{ \myrvector{ \sqrttwo \\ -\sqrttwo } } $
# $ \newcommand{\myarray}[2]{ \begin{array}{#1}#2\end{array}} $
# $ \newcommand{\X}{ \mymatrix{cc}{0 & 1 \\ 1 & 0} } $
# $ \newcommand{\Z}{ \mymatrix{rr}{1 & 0 \\ 0 & -1} } $
# $ \newcommand{\Htwo}{ \mymatrix{rrrr}{ \frac{1}{2} & \frac{1}{2} & \frac{1}{2} & \frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & \frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} & \frac{1}{2} } } $
# $ \newcommand{\CNOT}{ \mymatrix{cccc}{1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 \\ 0 & 0 & 0 & 1 \\ 0 & 0 & 1 & 0} } $
# $ \newcommand{\norm}[1]{ \left\lVert #1 \right\rVert } $
# <h2>Quantum Teleportation</h2>
# Asja wants to send a qubit to Balvis by using only classical communication.
#
# Let $ \ket{v} = \myvector{a\\b} \in \mathbb{R}^2 $ be the quantum state.
#
# If Asja has many copies of this qubit, then she can collect the statistics based on these qubits and obtain an approximation of $ a $ and $ b $, say $ \tilde{a} $ and $\tilde{b}$, respectively. After this, Asja can send $ \tilde{a} $ and $\tilde{b}$ by using many classical bits, the number of which depends on the precision of the amplitudes.
# On the other hand, If Asja and Balvis share the entangaled qubits in state $ \sqrttwo\ket{00} + \sqrttwo\ket{11} $ in advance, then it is possible for Balvis to create $ \ket{v} $ in his qubit after receiving two bits of information from Asja.
# <h3> Protocol </h3>
# Asja has two qubits and Balvis has one qubit.
#
# The first qubit of Asja is in $ \ket{v} = \myvector{a\\b} = a\ket{0} + b\ket{1} $.
#
# The quantum state of Asja's second qubit and Balvis' qubit is $ \sqrttwo\ket{00} + \sqrttwo\ket{11} $.
#
# So, the state of the three qubits is
#
# $$ \mypar{a\ket{0} + b\ket{1}}\mypar{\sqrttwo\ket{00} + \sqrttwo\ket{11}}
# = \sqrttwo \big( a\ket{000} + a \ket{011} + b\ket{100} + b \ket{111} \big). $$
# <h4> CNOT operator by Asja </h4>
#
# Asja applies CNOT gate to her qubits where her first qubit is the control qubit and her second qubit is the target qubit.
# <h3>Task 1</h3>
#
# Calculate the new quantum state after this CNOT operator.
# <a href="B68_Quantum_Teleportation_Solutions.ipynb#task1">click for our solution</a>
# <h3>Hadamard operator by Asja</h3>
#
# Asja applies Hadamard gate to her first qubit.
# <h3>Task 2</h3>
#
# Calculate the new quantum state after this Hadamard operator.
#
# Verify that the resulting quantum state can be written as follows:
#
# $$
# \frac{1}{2} \ket{00} \big( a\ket{0}+b\ket{1} \big) +
# \frac{1}{2} \ket{01} \big( a\ket{1}+b\ket{0} \big) +
# \frac{1}{2} \ket{10} \big( a\ket{0}-b\ket{1} \big) +
# \frac{1}{2} \ket{11} \big( a\ket{1}-b\ket{0} \big) .
# $$
# <a href="B68_Quantum_Teleportation_Solutions.ipynb#task2">click for our solution</a>
# <h3> Measurement by Asja </h3>
#
# Asja measures her qubit. With probability $ \frac{1}{4} $, she can observe one of the basis states.
#
# Depeding on the measurement outcomes, Balvis' qubit is in the following states:
# <ol>
# <li> "00": $ \ket{v_{00}} = a\ket{0} + b \ket{1} $ </li>
# <li> "01": $ \ket{v_{01}} = a\ket{1} + b \ket{0} $ </li>
# <li> "10": $ \ket{v_{10}} = a\ket{0} - b \ket{1} $ </li>
# <li> "11": $ \ket{v_{11}} = a\ket{1} - b \ket{0} $ </li>
# </ol>
# As can be observed, the amplitudes $ a $ and $ b $ are "transferred" to Balvis' qubit in any case.
#
# If Asja sends the measurement outcomes, then Balvis can construct $ \ket{v} $ exactly.
# <h3>Task 3</h3>
#
# Asja sends the measurement outcomes to Balvis by using two classical bits: $ x $ and $ y $.
#
# For each $ (x,y) $ pair, determine the quantum operator(s) that Balvis can apply to obtain $ \ket{v} = a\ket{0}+b\ket{1} $ exactly.
# <a href="B68_Quantum_Teleportation_Solutions.ipynb#task3">click for our solution</a>
# <h3> Task 4 </h3>
#
# Create a quantum circuit with three qubits and three classical bits.
#
# Assume that Asja has the first two qubits and Balvis has the third qubit.
#
# Implement the protocol given above until Asja makes the measurements (included).
#
# The state of Asja's first qubit can be set by a rotation with randomly picked angle.
#
# At this point, read the state vector of the circuit by using "statevector_simulator".
#
# <i> When a circuit having measurement is simulated by "statevector_simulator", the simulator picks one of the outcomes, and so we see one of the states after the measurement.</i>
#
# Verify that the state of Balvis' qubit is in one of these: $ \ket{v_{00}}$, $ \ket{v_{01}}$, $ \ket{v_{10}}$, and $ \ket{v_{11}}$.
#
# Guess the measurement outcome obtained by "statevector_simulator".
#
# <i> Remark that, the qubits are combined in reverse order in qiskit.
#
# For example, when we have three qubits, the basis states are expected to be ordered as
#
# $$ 000 , 001, 010, 011, 100, 101, 110, 111, $$
#
# but they are odered in qiskit as follows:
#
# $$ 000 , 100, 010, 110, 001, 101, 011, 111. $$
#
# Please take this into account when interpreting the results given by any local simulator.
# +
#
# your code is here
#
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer
from math import pi, cos, sin
from random import randrange
# quantum circuit with three qubits and three bits
qreg = QuantumRegister(3)
creg = ClassicalRegister(3)
mycircuit = QuantumCircuit(qreg,creg)
# rotate the first qubit by random angle
r = randrange(100)
theta = 2*pi*(r/100) # radians
print("the picked angle is",r*3.6,"degrees and",theta,"radians")
a = cos(theta)
b = sin(theta)
print("a=",round(a,3),"b=",round(b,3))
print("a*a=",round(a**2,3),"b*b=",round(b**2,3))
print()
mycircuit.ry(2*theta,qreg[0])
# creating an entanglement between the second and third qubits
mycircuit.h(qreg[1])
mycircuit.cx(qreg[1],qreg[2])
# CNOT operator by Asja on her qubits where the first qubit is the control qubit
mycircuit.cx(qreg[0],qreg[1])
# Hadamard operator by Asja on the first qubit
mycircuit.h(qreg[0])
# measurement done by Asja
mycircuit.measure(qreg[0],creg[0])
mycircuit.measure(qreg[1],creg[1])
# read the state vector
job = execute(mycircuit,Aer.get_backend('statevector_simulator'))
current_quantum_state=job.result().get_statevector(mycircuit)
print("the state vector is")
for i in range(len(current_quantum_state)):
print(current_quantum_state[i].real)
print()
# reverse the order
def get_state(i):
if i==0: return "000"
if i==1: return "100"
if i==2: return "010"
if i==3: return "110"
if i==4: return "001"
if i==5: return "101"
if i==6: return "011"
if i==7: return "111"
balvis_state = ""
for i in range(len(current_quantum_state)):
if current_quantum_state[i].real!=0:
if abs(current_quantum_state[i].real-a)<0.000001:
balvis_state += "+a|"+ get_state(i)+">"
elif abs(current_quantum_state[i].real+a)<0.000001:
balvis_state += "-a|"+ get_state(i)+">"
elif abs(current_quantum_state[i].real-b)<0.000001:
balvis_state += "+b|"+ get_state(i)+">"
elif abs(current_quantum_state[i].real+b)<0.000001:
balvis_state += "-b|"+ get_state(i)+">"
print("which is",balvis_state)
mycircuit.draw()
# -
# <a href="B68_Quantum_Teleportation_Solutions.ipynb#task4">click for our solution</a>
| bronze/B68_Quantum_Teleportation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
from itertools import product
dirl = '/home/jujuman/Research/RawGDB11Database/test/'
dirs = '/home/jujuman/Research/RawGDB11Database/test/'
files = ['gdb11_size'+str(i+1)+'.smi' for i in range(9,10)]
rpl = [('O','S'),('F','Cl')]
def comb_replace(ft, smiles):
fc = ft[0]
tc = ft[1]
options = [(c,) if c != fc else (fc, tc) for c in smiles]
return product(*options)
for f in files:
print('Working on: '+f)
data = open(dirl+f,'r').read().split('\n')[1:-1]
molset = []
for d in data:
for i in comb_replace(rpl[0], d):
for j in comb_replace(rpl[1], i):
molset.append(''.join(j))
#print(molset)
molset = [k for k in molset if (rpl[0][1] in k or rpl[1][1] in k or 'F' in k)]
#print(molset)
of = open(dirs+f.split('_')[0]+'SFCl'+f.split('_')[1],'w')
of.write('FILE: '+f+'\n')
for k in molset:
of.write(k+'\n')
of.close()
| notebooks/GDB_atom_replacer.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import math
((10-5)**2 + (12-6)**2)**0.5
((10-2)**2 + (12-2)**2+(5-7)**2+(6-8)**2)**0.5
import numpy as np
a = (10, 12)
b = (5,6)
a = np.array((10 ,12))
b = np.array((5, 6))
dist = np.linalg.norm(a-b)
dist
import numpy as np
a = (10, 12, 5, 6)
b = (2,2,7,8)
a = np.array(a)
b = np.array(b)
dist = np.linalg.norm(a-b)
dist
# Reference: https://stackoverflow.com/questions/25375359/how-to-project-new-sets-of-data-onto-a-pca-space-in-matplotlib
# +
import numpy as np
mu_vec1 = np.array([0,0,0])
cov_mat1 = np.array([[1,0,0],[0,1,0],[0,0,1]])
class1_sample = np.random.multivariate_normal(mu_vec1, cov_mat1, 20).T
assert class1_sample.shape == (3,20), "The matrix has not the dimensions 3x20"
mu_vec2 = np.array([1,1,1])
cov_mat2 = np.array([[1,0,0],[0,1,0],[0,0,1]])
class2_sample = np.random.multivariate_normal(mu_vec2, cov_mat2, 20).T
assert class2_sample.shape == (3,20), "The matrix has not the dimensions 3x20"
# concatenate 2 numpy arrays: row-wise
all_samples = np.concatenate((class1_sample.T, class2_sample.T))
#print(all_samples.shape)
# +
# %matplotlib inline
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA as sklearnPCA
sklearn_pca = sklearnPCA(n_components=2)
sklearn_transf = sklearn_pca.fit_transform(all_samples)
plt.plot(sklearn_transf[0:20,0],sklearn_transf[0:20,1],\
'o', markersize=7, color='blue', alpha=0.5, label='class1')
plt.plot(sklearn_transf[20:40,0], sklearn_transf[20:40,1],\
'^', markersize=7, color='red', alpha=0.5, label='class2')
plt.xlabel('x_values')
plt.ylabel('y_values')
plt.xlim([-4,4])
plt.ylim([-4,4])
plt.legend()
plt.title('Transformed samples with class labels from matplotlib.mlab.PCA()')
plt.show()
| Collection_Testing_Learning_Codes.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="Ap8LrtlVGjIq"
# <h2><center>Predicción de muertes por fallas cardiacas</center></h2>
# + [markdown] id="aDvstIiDGyAl"
# <p style='text-align: justify;'>Debido a los avances que ha logrado el mundo en la sanidad pública se han disminuido sustancialmente las enfermedades infecciosas. Esta proesa ha traido nuevos retos; atender las nuevas enfermedades predominantes, las crónico-degenerativas no infecciosas.</p>
#
# <p style='text-align: justify;'>La primera causa de muerte en el mundo es una de ellas, la enfermedad cardiovascular. Los países realizan múltiples esfuerzos para intentar combatirla pues muchas de las causas que la provocan son de carácter prevenible: alimentación, sobrepeso, obesidad, tabaco, drogas, sedentarismo, etc.</p>
#
# <p style='text-align: justify;'>La estadística indica que 3 de cada 10 personas que fallecen, mueren debido a enfermedades del corazón. Su abordaje es necesariamente multidisciplinario y las tecnologias de la información tienen un gran valor que aportar a esta tarea. Conocer los diferentes factores que inciden en su complicación más común: la falla cardíaca, permite dar a los hospitales y sistemas de salud mejores herramientas para tomar decisiones sobre pacientes graves.</p>
#
# <p style='text-align: justify;'>Con la siguiente información que se te presenta serás capaz de estudiar las variables relevantes para comprender la gravedad de cada paciente y poder establecer algoritmos y rutas de decisión mucho más precisas que ayuden a mejorar la sobrevida de las personas.</p>
#
#
# <p style='text-align: justify;'>Compartiendo la siguiente cita, de la cual se obtiene de la base de datos.</p>
#
# <p style='text-align:center;'><strong>Larxel. (2020). <em>Heart Failure Prediction</em>. Kaggle. <a href="https://www.kaggle.com/andrewmvd/heart-failure-clinical-data" target="_blank">https://www.kaggle.com/andrewmvd/heart-failure-clinical-data</a></strong></p>
#
# <p style='text-align: justify;'>Compartiendo el artículo desde el cual se obtuvo la base de datos original; es un estudio desde el cual se utilizan métodos de aprendizaje máquina para la predicción de fallas cardiacas con base en estudios de creatinina sérica y fracción de eyección.</p>
#
# <p style='text-align:center;'><strong><NAME>., & <NAME>. (2020). <em>Machine learning can predict survival of patients with heart failure from serum creatinine and ejection fraction alone.</em> BMC Medical Informatics and Decision Making. <a href="https://bmcmedinformdecismak.biomedcentral.com/articles/10.1186/s12911-020-1023-5" target="_blank">https://bmcmedinformdecismak.biomedcentral.com/articles/10.1186/s12911-020-1023-5</a></strong></p>
# + id="SA1wTn5MGqkm" executionInfo={"status": "ok", "timestamp": 1626293609875, "user_tz": 240, "elapsed": 2355, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgimKXlswsRONzCsgtK97IzHh9zcvgsGKFppYo=s64", "userId": "10197604116353826977"}}
# Librerias ha utilizar
import pandas as pd
import numpy as np
from numpy.random import seed
seed(30)
from sklearn.preprocessing import MinMaxScaler
import tensorflow
tensorflow.random.set_seed(30)
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.optimizers import SGD
from sklearn.metrics import confusion_matrix
# + id="abNpWTWdHcWT" executionInfo={"status": "ok", "timestamp": 1626293609876, "user_tz": 240, "elapsed": 9, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgimKXlswsRONzCsgtK97IzHh9zcvgsGKFppYo=s64", "userId": "10197604116353826977"}}
df = pd.read_csv('heart_failure_clinical_records_dataset.csv')
# + id="rau4_ZqVHd2n" executionInfo={"status": "ok", "timestamp": 1626293609876, "user_tz": 240, "elapsed": 9, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgimKXlswsRONzCsgtK97IzHh9zcvgsGKFppYo=s64", "userId": "10197604116353826977"}}
#Tomaremos una muestra probabilistica
df = df.sample(frac=1)
# + colab={"base_uri": "https://localhost:8080/", "height": 204} id="jwYJRnZXIKfG" executionInfo={"status": "ok", "timestamp": 1626293609877, "user_tz": 240, "elapsed": 9, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgimKXlswsRONzCsgtK97IzHh9zcvgsGKFppYo=s64", "userId": "10197604116353826977"}} outputId="41f1ce77-df12-435b-8be9-4d23786fad55"
df.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 768} id="AG8d1n0BsaAu" executionInfo={"status": "ok", "timestamp": 1626296629095, "user_tz": 240, "elapsed": 1186, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgimKXlswsRONzCsgtK97IzHh9zcvgsGKFppYo=s64", "userId": "10197604116353826977"}} outputId="bb168499-5b4a-474f-855a-f0c6f5504ef9"
import seaborn as sns
sns.catplot(x="DEATH_EVENT", y="age", data=df)
sns.catplot(x="DEATH_EVENT", y="smoking", data=df)
# + id="jMWfR-CKIQT6" executionInfo={"status": "ok", "timestamp": 1626293609877, "user_tz": 240, "elapsed": 8, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgimKXlswsRONzCsgtK97IzHh9zcvgsGKFppYo=s64", "userId": "10197604116353826977"}}
#Separamos los datos de variables independientes y dependientes
X = df.drop("DEATH_EVENT",axis=1)
y = df.DEATH_EVENT
# + id="Km3uuZAGInj7" executionInfo={"status": "ok", "timestamp": 1626296990104, "user_tz": 240, "elapsed": 250, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgimKXlswsRONzCsgtK97IzHh9zcvgsGKFppYo=s64", "userId": "10197604116353826977"}}
#Ajustamos los datos
scaler = MinMaxScaler()
X = scaler.fit_transform(X)
# + colab={"base_uri": "https://localhost:8080/", "height": 265} id="cs20Xcdr2s0Z" executionInfo={"status": "ok", "timestamp": 1626301284020, "user_tz": 240, "elapsed": 294, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgimKXlswsRONzCsgtK97IzHh9zcvgsGKFppYo=s64", "userId": "10197604116353826977"}} outputId="87633a86-77d1-4d0c-fbcb-b23787fdd16e"
import numpy as np
import matplotlib.pyplot as plt
x=X[:,0]
y=X[:,2]
plt.scatter(x,y)
plt.show()
# + id="vhWttPfTJ0QT" executionInfo={"status": "ok", "timestamp": 1626293609877, "user_tz": 240, "elapsed": 8, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgimKXlswsRONzCsgtK97IzHh9zcvgsGKFppYo=s64", "userId": "10197604116353826977"}}
#Separamos los datos en entrenamiento y prueba
split = round(X.shape[0]*0.7)
X_train, X_test = X[:split], X[split:]
y_train, y_test = y[:split], y[split:]
# + colab={"base_uri": "https://localhost:8080/"} id="QIF5X2DmJ-AF" executionInfo={"status": "ok", "timestamp": 1626293832202, "user_tz": 240, "elapsed": 42029, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgimKXlswsRONzCsgtK97IzHh9zcvgsGKFppYo=s64", "userId": "10197604116353826977"}} outputId="23712b28-9f2d-4b91-d595-424c7046cf56"
#Se generara un modelo de red neuronal, con 5 capas ocultas dentro de cada capa tendra 20 neuronas
#Funcion de activacion Relu entre las capas ocultas
#Funcion de activacion sigmoide entre la ultima capa oculta y la capa de salida
#Learning rate de 10%
#Optimizador: Decenso del gradiente estocastico
#Funcion de perdida: Error promedio cuadratico
#Epoch: 600, batch:10
model = keras.Sequential([
layers.Dense(20, activation='relu',input_shape=(12,)),
layers.Dense(20, activation='relu'),
layers.Dense(20, activation='relu'),
layers.Dense(20, activation='relu'),
layers.Dense(20, activation='relu'),
layers.Dense(1, activation='sigmoid')
])
sgd = SGD(lr=0.1)
model.compile(optimizer=sgd,
loss='mean_squared_error')
model.fit(X_train, y_train, epochs=600, batch_size=10)
# + id="nvvlkwBSLJUX" executionInfo={"status": "ok", "timestamp": 1626293832653, "user_tz": 240, "elapsed": 454, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgimKXlswsRONzCsgtK97IzHh9zcvgsGKFppYo=s64", "userId": "10197604116353826977"}}
y_hat_train = model.predict(X_train).round()
y_hat_test = model.predict(X_test).round()
# + id="hqTXUDyLLZ_e" executionInfo={"status": "ok", "timestamp": 1626293832654, "user_tz": 240, "elapsed": 6, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgimKXlswsRONzCsgtK97IzHh9zcvgsGKFppYo=s64", "userId": "10197604116353826977"}}
from sklearn.metrics import confusion_matrix
cf = confusion_matrix(y_test,y_hat_test)
# + colab={"base_uri": "https://localhost:8080/", "height": 265} id="h9CUiH_TLkP2" executionInfo={"status": "ok", "timestamp": 1626293969562, "user_tz": 240, "elapsed": 847, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GgimKXlswsRONzCsgtK97IzHh9zcvgsGKFppYo=s64", "userId": "10197604116353826977"}} outputId="1f1e0ce7-a49b-4fe6-edc7-e0738b2b08dc"
import seaborn as sns
sns.heatmap(cf, annot=True, xticklabels=False, yticklabels=False)
# + [markdown] id="kkbrKljTmkB1"
# ## Comentando los Resultados
# + [markdown] id="BSJUhAiGmpzh"
# La matriz de confucion nos dice que la red predijo 73 casos de manaera correcta 57 fueron verdaderos positivos, es decir, la red predijo la defunción de alguien y acerto, 16 veces la red predijo la no defunción de alguien y acerto.
# Por otro lado la red se equivoco 17 veces, 12 predijo la no defunción de alguien y se equivoco y 5 veces la predijo la defuncion de alguien y se equivoco.
# + id="5jao4sHnMktA"
| Prediccion Sanitaria.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Start jupyter server such that the print of the current working dir below is the root directory of the repo
# Setup the environment...
# +
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv2D, Dropout, Flatten, MaxPooling2D
import os
import sys
os.environ['STATIC_VARIABLES_FILE_PATH'] = "globalserver/static_variables.json"
os.environ['PATH_TO_GLOBALSERVER'] = "globalserver/api/"
if os.getcwd().split('/')[-1]!="Federated-Learning":
os.chdir('..')
print(os.getcwd())
sys.path.append(os.getcwd())
import tensorflow_addons as tfa
from testing.test_class import Testing
from globalserver.operator_.operator_class_db import Operator
clients = [f"c{i}" for i in range(2)]
TestSetup = Testing(clients, start_servers=True, clear_logs=True, clear_db=False, interface=False)
# -
# Download the data from polybox https://polybox.ethz.ch/index.php/s/W1oSh3H81HqYSQp?path=%2FDatasets%2FKK_Box_Normalized_jsonl and put it into the datasets/kkbox folder. Then split the data into two sets of size 70000.
# +
import random
import json
def split_data():
with open(f'datasets/kk_box/train_kkbox.jsonl', 'r') as f:
train=f.readlines()
with open(f'datasets/kk_box/test_kkbox.jsonl', 'r') as f:
test=f.readlines()
random.shuffle(train)
train_c1 = train[:70000]
train_c2 = train[70000:140000]
validation_c1 = train[140000:160000]
validation_c2 = train[160000:180000]
with open(f'datasets/train_c0.jsonl', 'w+') as f:
f.writelines(train_c1)
with open(f'datasets/validation_c0.jsonl', 'w+') as f:
f.writelines(validation_c1)
with open(f'datasets/test_c0.jsonl', 'w+') as f:
f.writelines(validation_c1)
with open(f'datasets/train_c1.jsonl', 'w+') as f:
f.writelines(train_c2)
with open(f'datasets/validation_c1.jsonl', 'w+') as f:
f.writelines(validation_c2)
with open(f'datasets/test_c1.jsonl', 'w+') as f:
f.writelines(validation_c2)
# split_data()
# -
# We define the experimental setup in several steps. First we define the model.
def kknox_nn(param_dict):
model = Sequential()
model.add(Dense(50, activation=tf.nn.relu, input_dim=61))
model.add(Dense(50, activation=tf.nn.relu))
model.add(Dense(1, activation=tf.nn.sigmoid))
model.compile(optimizer=tf.keras.optimizers.SGD(learning_rate=param_dict['lr']), # momentum=mt),
loss='binary_crossentropy',
metrics=[])
model.summary()
return model
# We define all the metadata as well as the training configurations. Most important: rounds, epochs, batch_size, steps_per_epoch.
setup_dict = {"model_function": #here we set how to run our model function. In the operator it will run kknox_nn({"lr": 0.1})
{
"function": kknox_nn,
"parameters": {
"lr": 0.20014587438532283
}
},
"git_version": 'e9339081b76ad3a89b1862bd38d8af26f0541f1c', # the git commit version (for now set manually)
"protocol": 'NN', # the protocol name. NN is for neural networks
"model_name": "test_model",
"model_description": "this model is just to test the db",
"testing": True, # This flag is helpful if you do not want to keep the created models. When you run Testing with cleardb=True it will clear all models and experiments with testing flag.
"rounds": 2,
"round": ["fetch_model", "train_model", "send_training_loss", "send_validation_loss", "send_model", "aggregate"],
"final_round":["fetch_model", "send_training_loss", "send_validation_loss","send_test_loss"],
"training_config": { # standard configuration for the training setup
'epochs': 1,
'verbose': 1,
'batch_size': 2000,
"validation_steps": 8,#should be at most len(validation data)/batch_size
"test_steps": 8, #should be at most len(test data)/batch_size
"steps_per_epoch": 65, #should be at most len(training data)/batch_size
"dataset": "",
"skmetrics":["f1_score"],
"tfmetrics":["AUC"]
},
"clients": clients,
"experiment_name": "kkbox",
"experiment_description": f"desc if nice experiment",
"preprocessing": {
},
}
# +
operator = Operator()
operator.define_and_start_experiment(setup_dict)
# -
| examples/NN/notebook_example_new_wrapper.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="05Fw-ijTrMzo"
#i
from sklearn.datasets import load_digits
digits = load_digits()
# + id="j5dAerANrRXi"
#ii
from sklearn.model_selection import train_test_split
x_train_1,x_test_1,y_train_1,y_test_1=train_test_split(digits.data,digits.target,test_size=0.3)
# + colab={"base_uri": "https://localhost:8080/"} id="Uh7S-8E_rZgw" outputId="ab78ac62-f8e8-4745-c22b-13c1c138eeab"
#iii
import numpy as np
print(np.arange(len(x_train_1)))
# + colab={"base_uri": "https://localhost:8080/"} id="4Wm6BJSDrsQv" outputId="33bd3a8e-f82f-4626-8168-9929d9e561b1"
#iv
y_train_nonlabel=np.copy(y_train_1)
y_train_nonlabel[np.arange(len(x_train_1))[280:]]=-1
print(y_train_nonlabel)
| FINAL_Practical/AL_ML_advanced/codes/Q2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] colab_type="text" id="Tce3stUlHN0L"
# ##### Copyright 2019 The TensorFlow Authors.
# + cellView="form" colab={} colab_type="code" id="tuOe1ymfHZPu"
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + [markdown] colab_type="text" id="qFdPvlXBOdUN"
# # Better performance with the tf.data API
# + [markdown] colab_type="text" id="MfBg1C5NB3X0"
# <table class="tfo-notebook-buttons" align="left">
# <td>
# <a target="_blank" href="https://www.tensorflow.org/guide/data_performance"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />View on TensorFlow.org</a>
# </td>
# <td>
# <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/guide/data_performance.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Run in Google Colab</a>
# </td>
# <td>
# <a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/en/guide/data_performance.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />View source on GitHub</a>
# </td>
# <td>
# <a href="https://storage.googleapis.com/tensorflow_docs/docs/site/en/guide/data_performance.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Download notebook</a>
# </td>
# </table>
# + [markdown] colab_type="text" id="xHxb-dlhMIzW"
# ## Overview
#
# GPUs and TPUs can radically reduce the time required to execute a single training step.
# Achieving peak performance requires an efficient input pipeline that delivers data for the next step before the current step has finished.
# The `tf.data` API helps to build flexible and efficient input pipelines.
# This document demonstrates how to use the `tf.data` API to build highly performant TensorFlow input pipelines.
#
# Before you continue, read the "[Build TensorFlow input pipelines](./data.ipynb)" guide, to learn how to use the `tf.data` API.
# + [markdown] colab_type="text" id="UhNtHfuxCGVy"
# ## Resources
#
# * [Build TensorFlow input pipelines](./data.ipynb)
# * `tf.data.Dataset` API
# * [Analyze `tf.data` performance with the TF Profiler](./data_performance_analysis.md)
# + [markdown] colab_type="text" id="MUXex9ctTuDB"
# ## Setup
# + colab={} colab_type="code" id="IqR2PQG4ZaZ0"
import tensorflow as tf
import time
# + [markdown] colab_type="text" id="QthTHCKF-jKD"
# Throughout this guide, you will iterate across a dataset and measure the performance.
# Making reproducible performance benchmarks can be difficult, different factors impacting it:
#
# - the current CPU load,
# - the network traffic,
# - complex mechanisms like cache, etc.
#
# Hence, to provide a reproducible benchmark, build an artificial example.
# + [markdown] colab_type="text" id="3bU5gsSI-jKF"
# ### The dataset
#
# Define a class inheriting from `tf.data.Dataset` called `ArtificialDataset`.
# This dataset:
#
# - generates `num_samples` samples (default is 3)
# - sleeps for some time before the first item to simulate opening a file
# - sleeps for some time before producing each item to simulate reading data from a file
# + colab={} colab_type="code" id="zUQv4kCd-jKH"
class ArtificialDataset(tf.data.Dataset):
def _generator(num_samples):
# Opening the file
time.sleep(0.03)
for sample_idx in range(num_samples):
# Reading data (line, record) from the file
time.sleep(0.015)
yield (sample_idx,)
def __new__(cls, num_samples=3):
return tf.data.Dataset.from_generator(
cls._generator,
output_types=tf.dtypes.int64,
output_shapes=(1,),
args=(num_samples,)
)
# + [markdown] colab_type="text" id="O9y1WjNv-jKL"
# This dataset is similar to the `tf.data.Dataset.range` one, adding a fixed delay at the beginning and between each sample.
# + [markdown] colab_type="text" id="FGK1Y4jn-jKM"
# ### The training loop
#
# Write a dummy training loop that measures how long it takes to iterate over a dataset.
# Training time is simulated.
# + colab={} colab_type="code" id="MIaM3u00-jKP"
def benchmark(dataset, num_epochs=2):
start_time = time.perf_counter()
for epoch_num in range(num_epochs):
for sample in dataset:
# Performing a training step
time.sleep(0.01)
tf.print("Execution time:", time.perf_counter() - start_time)
# + [markdown] colab_type="text" id="KK58SuXS-jKT"
# ## Optimize performance
#
# To exhibit how performance can be optimized, you will improve the performance of the `ArtificialDataset`.
# + [markdown] colab_type="text" id="Xi8t26y7-jKV"
# ### The naive approach
#
# Start with a naive pipeline using no tricks, iterating over the dataset as-is.
# + colab={} colab_type="code" id="_gP7J1y4-jKY"
benchmark(ArtificialDataset())
# + [markdown] colab_type="text" id="Lxeat5dH-jKf"
# Under the hood, this is how your execution time was spent:
#
# 
#
# You can see that performing a training step involves:
#
# - opening a file if it hasn't been opened yet,
# - fetching a data entry from the file,
# - using the data for training.
#
# However, in a naive synchronous implementation like here, while your pipeline is fetching the data, your model is sitting idle.
# Conversely, while your model is training, the input pipeline is sitting idle.
# The training step time is thus the sum of all, opening, reading and training time.
#
# The next sections build on this input pipeline, illustrating best practices for designing performant TensorFlow input pipelines.
# + [markdown] colab_type="text" id="mfukBGNz-jKh"
# ### Prefetching
#
# Prefetching overlaps the preprocessing and model execution of a training step.
# While the model is executing training step `s`, the input pipeline is reading the data for step `s+1`.
# Doing so reduces the step time to the maximum (as opposed to the sum) of the training and the time it takes to extract the data.
#
# The `tf.data` API provides the `tf.data.Dataset.prefetch` transformation.
# It can be used to decouple the time when data is produced from the time when data is consumed.
# In particular, the transformation uses a background thread and an internal buffer to prefetch elements from the input dataset ahead of the time they are requested.
# The number of elements to prefetch should be equal to (or possibly greater than) the number of batches consumed by a single training step.
# You could either manually tune this value, or set it to `tf.data.experimental.AUTOTUNE` which will prompt the
# `tf.data` runtime to tune the value dynamically at runtime.
#
# Note that the prefetch transformation provides benefits any time there is an opportunity to overlap the work of a "producer" with the work of a "consumer."
# + colab={} colab_type="code" id="DHpUVqH1-jKi"
benchmark(
ArtificialDataset()
.prefetch(tf.data.experimental.AUTOTUNE)
)
# + [markdown] colab_type="text" id="h7z_kzo--jKn"
# 
#
# This time you can see that while the training step is running for sample 0, the input pipeline is reading the data for the sample 1, and so on.
# + [markdown] colab_type="text" id="52QMKfaY-jKq"
# ### Parallelizing data extraction
#
# In a real-world setting, the input data may be stored remotely (for example, GCS or HDFS).
# A dataset pipeline that works well when reading data locally might become bottlenecked on I/O when reading data remotely because of the following differences between local and remote storage:
#
# * **Time-to-first-byte:** Reading the first byte of a file from remote storage can take orders of magnitude longer than from local storage.
# * **Read throughput:** While remote storage typically offers large aggregate bandwidth, reading a single file might only be able to utilize a small fraction of this bandwidth.
#
# In addition, once the raw bytes are loaded into memory, it may also be necessary to deserialize and/or decrypt the data (e.g. [protobuf](https://developers.google.com/protocol-buffers/)), which requires additional computation.
# This overhead is present irrespective of whether the data is stored locally or remotely, but can be worse in the remote case if data is not prefetched effectively.
#
# To mitigate the impact of the various data extraction overheads, the `tf.data.Dataset.interleave` transformation can be used to parallelize the data loading step, interleaving the contents of other datasets (such as data file
# readers).
# The number of datasets to overlap can be specified by the `cycle_length` argument, while the level of parallelism can be specified by the `num_parallel_calls` argument. Similar to the `prefetch` transformation, the `interleave` transformation supports `tf.data.experimental.AUTOTUNE` which will delegate the decision about what level of parallelism to use to the `tf.data` runtime.
# + [markdown] colab_type="text" id="gs8O8Vbu-jKu"
# #### Sequential interleave
#
# The default arguments of the `tf.data.Dataset.interleave` transformation make it interleave single samples from two datasets sequentially.
# + colab={} colab_type="code" id="fDH12GiK-jKw"
benchmark(
tf.data.Dataset.range(2)
.interleave(ArtificialDataset)
)
# + [markdown] colab_type="text" id="78CsSOnf-jK0"
# 
#
# This plot allows to exhibit the behavior of the `interleave` transformation, fetching samples alternatively from the two datasets available.
# However, no performance improvement is involved here.
# + [markdown] colab_type="text" id="j3cqqmYl-jK2"
# #### Parallel interleave
#
# Now use the `num_parallel_calls` argument of the `interleave` transformation.
# This loads multiple datasets in parallel, reducing the time waiting for the files to be opened.
# + colab={} colab_type="code" id="a3FQcTPY-jK4"
benchmark(
tf.data.Dataset.range(2)
.interleave(
ArtificialDataset,
num_parallel_calls=tf.data.experimental.AUTOTUNE
)
)
# + [markdown] colab_type="text" id="RxRLPB6C-jLA"
# 
#
# This time, the reading of the two datasets is parallelized, reducing the global data processing time.
# + [markdown] colab_type="text" id="5ZCLFWyv-jLB"
# ### Parallelizing data transformation
#
# When preparing data, input elements may need to be pre-processed.
# To this end, the `tf.data` API offers the `tf.data.Dataset.map` transformation, which applies a user-defined function to each element of the input dataset.
# Because input elements are independent of one another, the pre-processing can be parallelized across multiple CPU cores.
# To make this possible, similarly to the `prefetch` and `interleave` transformations, the `map` transformation provides the `num_parallel_calls` argument to specify the level of parallelism.
#
# Choosing the best value for the `num_parallel_calls` argument depends on your hardware, characteristics of your training data (such as its size and shape), the cost of your map function, and what other processing is happening on the CPU at the same time.
# A simple heuristic is to use the number of available CPU cores.
# However, as for the `prefetch` and `interleave` transformation, the `map` transformation supports `tf.data.experimental.AUTOTUNE` which will delegate the decision about what level of parallelism to use to the `tf.data` runtime.
# + colab={} colab_type="code" id="GSkKetpx-jLD"
def mapped_function(s):
# Do some hard pre-processing
tf.py_function(lambda: time.sleep(0.03), [], ())
return s
# + [markdown] colab_type="text" id="wiU7W_QC-jLI"
# #### Sequential mapping
#
# Start by using the `map` transformation without parallelism as a baseline example.
# + colab={} colab_type="code" id="ZSBvDpJG-jLL"
benchmark(
ArtificialDataset()
.map(mapped_function)
)
# + [markdown] colab_type="text" id="ngwMTDb6-jLR"
# 
#
# As for the [naive approach](#The-naive-approach), here the times spent for opening, reading, pre-processing (mapping) and training steps sum together for a single iteration.
# + [markdown] colab_type="text" id="U-10PE1D-jLU"
# #### Parallel mapping
#
# Now, use the same pre-processing function but apply it in parallel on multiple samples.
# + colab={} colab_type="code" id="F8AYLZbg-jLV"
benchmark(
ArtificialDataset()
.map(
mapped_function,
num_parallel_calls=tf.data.experimental.AUTOTUNE
)
)
# + [markdown] colab_type="text" id="-MoJklzP-jLe"
# 
#
# Now, you can see on the plot that the pre-processing steps overlap, reducing the overall time for a single iteration.
# + [markdown] colab_type="text" id="ZY1Q9kJO-jLh"
# ### Caching
#
# The `tf.data.Dataset.cache` transformation can cache a dataset, either in memory or on local storage.
# This will save some operations (like file opening and data reading) from being executed during each epoch.
# + colab={} colab_type="code" id="xieLApaI-jLi"
benchmark(
ArtificialDataset()
.map( # Apply time consuming operations before cache
mapped_function
).cache(
),
5
)
# + [markdown] colab_type="text" id="KeMgW9XI-jLn"
# 
#
# When you cache a dataset, the transformations before the `cache` one (like the file opening and data reading) are executed only during the first epoch.
# The next epochs will reuse the data cached by the`cache` transformation.
#
# If the user-defined function passed into the `map` transformation is expensive, apply the `cache` transformation after the `map` transformation as long as the resulting dataset can still fit into memory or local storage.
# If the user-defined function increases the space required to store the dataset beyond the cache capacity, either apply it after the `cache` transformation or consider pre-processing your data before your training job to reduce resource usage.
# + [markdown] colab_type="text" id="i3NtGI3r-jLp"
# ### Vectorizing mapping
#
# Invoking a user-defined function passed into the `map` transformation has overhead related to scheduling and executing the user-defined function.
# We recommend vectorizing the user-defined function (that is, have it operate over a batch of inputs at once) and apply the `batch` transformation _before_ the `map` transformation.
#
# To illustrate this good practice, your artificial dataset is not suitable.
# The scheduling delay is around 10 microseconds (10e-6 seconds), far less than the tens of milliseconds used in the `ArtificialDataset`, and thus its impact is hard to see.
#
# For this example, use the base `tf.data.Dataset.range` function and simplify the training loop to its simplest form.
# + colab={} colab_type="code" id="xqtiYPmb-jLt"
fast_dataset = tf.data.Dataset.range(10000)
def fast_benchmark(dataset, num_epochs=2):
start_time = time.perf_counter()
for _ in tf.data.Dataset.range(num_epochs):
for _ in dataset:
pass
tf.print("Execution time:", time.perf_counter() - start_time)
def increment(x):
return x+1
# + [markdown] colab_type="text" id="Fj2gmsMT-jL5"
# #### Scalar mapping
# + colab={} colab_type="code" id="Imn3SslJ-jMA"
fast_benchmark(
fast_dataset
# Apply function one item at a time
.map(increment)
# Batch
.batch(256)
)
# + [markdown] colab_type="text" id="BWUNbPqv-jMF"
# 
#
# The plot above illustrate what is going on (with less samples).
# You can see that the mapped function is applied for each sample.
# While this function is very fast, it has some overhead that impact the time performance.
# + [markdown] colab_type="text" id="tDVSM0A--jMG"
# #### Vectorized mapping
# + colab={} colab_type="code" id="nAw1mDLw-jMI"
fast_benchmark(
fast_dataset
.batch(256)
# Apply function on a batch of items
# The tf.Tensor.__add__ method already handle batches
.map(increment)
)
# + [markdown] colab_type="text" id="DbMteMY9-jMO"
# 
#
# This time, the mapped function is called once and applies to a batch of sample.
# While the function could takes more time to execute, the overhead appear only once, improving the overall time performance.
# + [markdown] colab_type="text" id="hfueG0Wj-jMR"
# ### Reducing memory footprint
#
# A number of transformations, including `interleave`, `prefetch`, and `shuffle`,
# maintain an internal buffer of elements. If the user-defined function passed
# into the `map` transformation changes the size of the elements, then the
# ordering of the map transformation and the transformations that buffer elements
# affects the memory usage. In general, we recommend choosing the order that
# results in lower memory footprint, unless different ordering is desirable for
# performance.
#
# #### Caching partial computations
#
# It is recommended to cache the dataset after the `map` transformation except if this transformation makes the data too big to fit in memory.
# A trade-off can be achieved if your mapped function can be split in two parts: a time consuming one and a memory consuming part.
# In this case, you can chain your transformations like below:
#
# ```python
# dataset.map(time_consuming_mapping).cache().map(memory_consuming_mapping)
# ```
#
# This way, the time consuming part is only executed during the first epoch, and you avoid using too much cache space.
# + [markdown] colab_type="text" id="MYOHG69M-jMT"
# ## Best practice summary
#
# Here is a summary of the best practices for designing performant TensorFlow
# input pipelines:
#
# * [Use the `prefetch` transformation](#Pipelining) to overlap the work of a producer and consumer.
# * [Parallelize the data reading transformation](#Parallelizing-data-extraction) using the `interleave` transformation.
# * [Parallelize the `map` transformation](#Parallelizing-data-transformation) by setting the `num_parallel_calls` argument.
# * [Use the `cache` transformation](#Caching) to cache data in memory during the first epoch
# * [Vectorize user-defined functions](#Map-and-batch) passed in to the `map` transformation
# * [Reduce memory usage](#Reducing-memory-footprint) when applying the `interleave`, `prefetch`, and `shuffle` transformations.
# + [markdown] colab_type="text" id="mP_EMFsQ-jMU"
# ## Reproducing the figures
#
# Note: The rest of this notebook is about how to reproduce the above figures, feel free to play around with this code, but understanding it is not an essential part of this tutorial.
#
# To go deeper in the `tf.data.Dataset` API understanding, you can play with your own pipelines.
# Below is the code used to plot the images from this guide.
# It can be a good starting point, showing some workarounds for common difficulties such as:
#
# - Execution time reproducibility;
# - Mapped functions eager execution;
# - `interleave` transformation callable.
# + colab={} colab_type="code" id="7M_jFLer-jMV"
import itertools
from collections import defaultdict
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
# + [markdown] colab_type="text" id="Z3pjnxtK-jMa"
# ### The dataset
#
# Similar to the `ArtificialDataset` you can build a dataset returning the time spent in each step.
# + colab={} colab_type="code" id="OgGl4U7t-jMc"
class TimeMeasuredDataset(tf.data.Dataset):
# OUTPUT: (steps, timings, counters)
OUTPUT_TYPES = (tf.dtypes.string, tf.dtypes.float32, tf.dtypes.int32)
OUTPUT_SHAPES = ((2, 1), (2, 2), (2, 3))
_INSTANCES_COUNTER = itertools.count() # Number of datasets generated
_EPOCHS_COUNTER = defaultdict(itertools.count) # Number of epochs done for each dataset
def _generator(instance_idx, num_samples):
epoch_idx = next(TimeMeasuredDataset._EPOCHS_COUNTER[instance_idx])
# Opening the file
open_enter = time.perf_counter()
time.sleep(0.03)
open_elapsed = time.perf_counter() - open_enter
for sample_idx in range(num_samples):
# Reading data (line, record) from the file
read_enter = time.perf_counter()
time.sleep(0.015)
read_elapsed = time.perf_counter() - read_enter
yield (
[("Open",), ("Read",)],
[(open_enter, open_elapsed), (read_enter, read_elapsed)],
[(instance_idx, epoch_idx, -1), (instance_idx, epoch_idx, sample_idx)]
)
open_enter, open_elapsed = -1., -1. # Negative values will be filtered
def __new__(cls, num_samples=3):
return tf.data.Dataset.from_generator(
cls._generator,
output_types=cls.OUTPUT_TYPES,
output_shapes=cls.OUTPUT_SHAPES,
args=(next(cls._INSTANCES_COUNTER), num_samples)
)
# + [markdown] colab_type="text" id="YQqDP4jk-jMj"
# This dataset provides samples of shape `[[2, 1], [2, 2], [2, 3]]` and of type `[tf.dtypes.string, tf.dtypes.float32, tf.dtypes.int32]`.
# Each sample is:
# ```
# (
# [("Open"), ("Read")],
# [(t0, d), (t0, d)],
# [(i, e, -1), (i, e, s)]
# )
# ```
#
# Where:
#
# - `Open` and `Read` are steps identifiers
# - `t0` is the timestamp when the corresponding step started
# - `d` is the time spent in the corresponding step
# - `i` is the instance index
# - `e` is the epoch index (number of times the dataset has been iterated)
# - `s` is the sample index
# + [markdown] colab_type="text" id="IQK913bB-jMm"
# ### The iteration loop
#
# Make the iteration loop a little bit more complicated to aggregate all timings.
# This will only work with datasets generating samples as detailed above.
# + colab={} colab_type="code" id="zAy-K_Cq-jMn"
def timelined_benchmark(dataset, num_epochs=2):
# Initialize accumulators
steps_acc = tf.zeros([0, 1], dtype=tf.dtypes.string)
times_acc = tf.zeros([0, 2], dtype=tf.dtypes.float32)
values_acc = tf.zeros([0, 3], dtype=tf.dtypes.int32)
start_time = time.perf_counter()
for epoch_num in range(num_epochs):
epoch_enter = time.perf_counter()
for (steps, times, values) in dataset:
# Record dataset preparation informations
steps_acc = tf.concat((steps_acc, steps), axis=0)
times_acc = tf.concat((times_acc, times), axis=0)
values_acc = tf.concat((values_acc, values), axis=0)
# Simulate training time
train_enter = time.perf_counter()
time.sleep(0.01)
train_elapsed = time.perf_counter() - train_enter
# Record training informations
steps_acc = tf.concat((steps_acc, [["Train"]]), axis=0)
times_acc = tf.concat((times_acc, [(train_enter, train_elapsed)]), axis=0)
values_acc = tf.concat((values_acc, [values[-1]]), axis=0)
epoch_elapsed = time.perf_counter() - epoch_enter
# Record epoch informations
steps_acc = tf.concat((steps_acc, [["Epoch"]]), axis=0)
times_acc = tf.concat((times_acc, [(epoch_enter, epoch_elapsed)]), axis=0)
values_acc = tf.concat((values_acc, [[-1, epoch_num, -1]]), axis=0)
time.sleep(0.001)
tf.print("Execution time:", time.perf_counter() - start_time)
return {"steps": steps_acc, "times": times_acc, "values": values_acc}
# + [markdown] colab_type="text" id="jw_WSQC8-jMs"
# ### The plotting method
#
# Finally, define a function able to plot a timeline given the values returned by the `timelined_benchmark` function.
# + colab={} colab_type="code" id="1j73RxiP-jMw"
def draw_timeline(timeline, title, width=0.5, annotate=False, save=False):
# Remove invalid entries (negative times, or empty steps) from the timelines
invalid_mask = np.logical_and(timeline['times'] > 0, timeline['steps'] != b'')[:,0]
steps = timeline['steps'][invalid_mask].numpy()
times = timeline['times'][invalid_mask].numpy()
values = timeline['values'][invalid_mask].numpy()
# Get a set of different steps, ordered by the first time they are encountered
step_ids, indices = np.stack(np.unique(steps, return_index=True))
step_ids = step_ids[np.argsort(indices)]
# Shift the starting time to 0 and compute the maximal time value
min_time = times[:,0].min()
times[:,0] = (times[:,0] - min_time)
end = max(width, (times[:,0]+times[:,1]).max() + 0.01)
cmap = mpl.cm.get_cmap("plasma")
plt.close()
fig, axs = plt.subplots(len(step_ids), sharex=True, gridspec_kw={'hspace': 0})
fig.suptitle(title)
fig.set_size_inches(17.0, len(step_ids))
plt.xlim(-0.01, end)
for i, step in enumerate(step_ids):
step_name = step.decode()
ax = axs[i]
ax.set_ylabel(step_name)
ax.set_ylim(0, 1)
ax.set_yticks([])
ax.set_xlabel("time (s)")
ax.set_xticklabels([])
ax.grid(which="both", axis="x", color="k", linestyle=":")
# Get timings and annotation for the given step
entries_mask = np.squeeze(steps==step)
serie = np.unique(times[entries_mask], axis=0)
annotations = values[entries_mask]
ax.broken_barh(serie, (0, 1), color=cmap(i / len(step_ids)), linewidth=1, alpha=0.66)
if annotate:
for j, (start, width) in enumerate(serie):
annotation = "\n".join([f"{l}: {v}" for l,v in zip(("i", "e", "s"), annotations[j])])
ax.text(start + 0.001 + (0.001 * (j % 2)), 0.55 - (0.1 * (j % 2)), annotation,
horizontalalignment='left', verticalalignment='center')
if save:
plt.savefig(title.lower().translate(str.maketrans(" ", "_")) + ".svg")
# + [markdown] colab_type="text" id="xto6GNdO-jM1"
# ### Use wrappers for mapped function
#
# To run mapped function in an eager context, you have to wrap them inside a `tf.py_function` call.
# + colab={} colab_type="code" id="39v7JD4L-jM2"
def map_decorator(func):
def wrapper(steps, times, values):
# Use a tf.py_function to prevent auto-graph from compiling the method
return tf.py_function(
func,
inp=(steps, times, values),
Tout=(steps.dtype, times.dtype, values.dtype)
)
return wrapper
# + [markdown] colab_type="text" id="7eJRCinb-jM5"
# ### Pipelines comparison
# + colab={} colab_type="code" id="YwX4ndHE-jM6"
_batch_map_num_items = 50
def dataset_generator_fun(*args):
return TimeMeasuredDataset(num_samples=_batch_map_num_items)
# + [markdown] colab_type="text" id="EwxJT2aR-jNA"
# #### Naive
# + colab={} colab_type="code" id="wLKgurx_-jNC"
@map_decorator
def naive_map(steps, times, values):
map_enter = time.perf_counter()
time.sleep(0.001) # Time consuming step
time.sleep(0.0001) # Memory consuming step
map_elapsed = time.perf_counter() - map_enter
return (
tf.concat((steps, [["Map"]]), axis=0),
tf.concat((times, [[map_enter, map_elapsed]]), axis=0),
tf.concat((values, [values[-1]]), axis=0)
)
naive_timeline = timelined_benchmark(
tf.data.Dataset.range(2)
.flat_map(dataset_generator_fun)
.map(naive_map)
.batch(_batch_map_num_items, drop_remainder=True)
.unbatch(),
5
)
# + [markdown] colab_type="text" id="EJqUMDsO-jNG"
# ### Optimized
# + colab={} colab_type="code" id="HYHcwabr-jNH"
@map_decorator
def time_consuming_map(steps, times, values):
map_enter = time.perf_counter()
time.sleep(0.001 * values.shape[0]) # Time consuming step
map_elapsed = time.perf_counter() - map_enter
return (
tf.concat((steps, tf.tile([[["1st map"]]], [steps.shape[0], 1, 1])), axis=1),
tf.concat((times, tf.tile([[[map_enter, map_elapsed]]], [times.shape[0], 1, 1])), axis=1),
tf.concat((values, tf.tile([[values[:][-1][0]]], [values.shape[0], 1, 1])), axis=1)
)
@map_decorator
def memory_consuming_map(steps, times, values):
map_enter = time.perf_counter()
time.sleep(0.0001 * values.shape[0]) # Memory consuming step
map_elapsed = time.perf_counter() - map_enter
# Use tf.tile to handle batch dimension
return (
tf.concat((steps, tf.tile([[["2nd map"]]], [steps.shape[0], 1, 1])), axis=1),
tf.concat((times, tf.tile([[[map_enter, map_elapsed]]], [times.shape[0], 1, 1])), axis=1),
tf.concat((values, tf.tile([[values[:][-1][0]]], [values.shape[0], 1, 1])), axis=1)
)
optimized_timeline = timelined_benchmark(
tf.data.Dataset.range(2)
.interleave( # Parallelize data reading
dataset_generator_fun,
num_parallel_calls=tf.data.experimental.AUTOTUNE
)
.batch( # Vectorize your mapped function
_batch_map_num_items,
drop_remainder=True)
.map( # Parallelize map transformation
time_consuming_map,
num_parallel_calls=tf.data.experimental.AUTOTUNE
)
.cache() # Cache data
.map( # Reduce memory usage
memory_consuming_map,
num_parallel_calls=tf.data.experimental.AUTOTUNE
)
.prefetch( # Overlap producer and consumer works
tf.data.experimental.AUTOTUNE
)
.unbatch(),
5
)
# + colab={} colab_type="code" id="b_CSUbxL-jNK"
draw_timeline(naive_timeline, "Naive", 15)
# + colab={} colab_type="code" id="DoovY7qr-jNR"
draw_timeline(optimized_timeline, "Optimized", 15)
| site/en-snapshot/guide/data_performance.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/", "height": 70} colab_type="code" id="U9UlZOvJm8_4" outputId="27569d23-5c67-4927-8f00-b3abe5dad5e8"
# install CoMut and packages - DO NOT CHANGE THIS CELL
# !pip -q install comut
from comut import comut
from comut import fileparsers
import palettable
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
# %config InlineBackend.figure_format = 'retina'
# if running in Colab, import tutorial data
import sys
if 'google.colab' in sys.modules:
# !apt-get install subversion -q > /dev/null
# !svn export -q --force https://github.com/vanallenlab/comut/trunk/examples/tutorial_data
plt.rcParams['font.family'] = 'Liberation Sans' # Colab lacks most fonts, so download the notebook locally to change fonts
# + [markdown] colab_type="text" id="yqLiabYfdeYI"
# # Colab Quickstart
#
# This notebook contains easily editable code to generate basic comutation plots with CoMut. The example is currently loaded with example data but can be changed to user data.
#
# # Specify MAF file and generate comut
#
# First, you need to upload your merged MAF file to this notebook, which should contain the mutation data for all your samples. Click the folder icon to the left to open the files menu. Drag and drop your MAF file from your computer into this area - it should upload to google drive, though you may need to refresh to see it. You can then specify the path to your MAF below (it will not have the tutorial_data/melanoma_example prefix).
#
# You should also choose the genes you want to visualize and any optional parameters for the comut. Then run the entire notebook by clicking `Runtime -> Run All` in the upper left. Your comut will be created, and you can download it right clicking in the files menu and clicking `Download` (you may need to refresh the files menu to see it).
#
# __Note__: If you're viewing this file locally and not in colab, simply replace the maf path with your own. Your comut will be saved to the directory of this notebook.
# + colab={} colab_type="code" id="Yzdn6CypgTas"
# don't change the names of these variables
maf_path = 'tutorial_data/melanoma_example/melanoma.maf' # change this to the path of your MAF, e.g. 'mutation.maf'
genes = ['CDKN2A', 'TP53', 'NF1', 'NRAS', 'BRAF'] # change this list to the genes you want in the comut, from bottom to top
# + [markdown] colab_type="text" id="lO0wv7t0f3OE"
# # Optional parameters
#
# These parameters change the appearance of the comut. Change nothing to use the defaults. For more information on these parameters, visit the [documentation](https://github.com/vanallenlab/comut/blob/master/examples/documentation.ipynb) of CoMut.
# + colab={} colab_type="code" id="aBGK9YkAjHMh"
figsize = (10,3) # the size of the figure - changes the shape of the squares in the comut
dpi = 300 # change the output resolution
extension = '.svg' # extension for saving - can otherwise be .pdf, .png, .jpg, etc
# You can provide a list of samples to order your comut (from left to right). If none is provided, it will be calculated from your MAF.
samples = None
x_padding = 0.04 # the x distance between patches in comut
y_padding = 0.04 # the y distance between patches in comut
tri_padding = 0.03 # the distance between triangles in comut
# mapping of mutation type to color. Only these mutation types are shown. Can be any valid matplotlib color, e.g. 'blue', #ffa500, or (1,1,1).
vivid_10 = palettable.cartocolors.qualitative.Vivid_10.mpl_colors
mut_mapping = {'Missense': vivid_10[5], 'Nonsense': vivid_10[0], 'In frame indel': vivid_10[1],
'Frameshift indel': vivid_10[4], 'Splice site': vivid_10[9]}
# + [markdown] colab_type="text" id="bUsF36SJpq1S"
# # Creating the basic comut
# + [markdown] colab_type="text" id="CQGnRGuzpdg9"
# First we create the CoMut object and define the samples (if samples weren't defined, it will take them from the MAF).
# + colab={} colab_type="code" id="sOoKG72Aj3mq"
example_comut = comut.CoMut()
example_comut.samples = samples
# + [markdown] colab_type="text" id="h6C-3q7apgFm"
# Then we read in the MAF and subset it to nonsilent mutations
# + colab={"base_uri": "https://localhost:8080/", "height": 460} colab_type="code" id="7_SpteymddSY" outputId="1e527d27-5f7a-4c6d-8b07-173f67b7d384"
# read in the maf
mutation_data = pd.read_csv(maf_path, sep = '\t')
# Use a helper function to parse the maf. This requires a Tumor_Sample_Barcode, Hugo_Symbol, and Variant_Classification column. It will
# rename variant types to be more visually appealing (eg Missense_Mutation -> Missense). Only certain coding variants are shown.
mutation_df = fileparsers.parse_maf(mutation_data)
mutation_df.head(2)
# + [markdown] colab_type="text" id="5odqqHj-pnCH"
# Then we add the mutation data to the CoMut object and plot it, saving the figure
# + colab={} colab_type="code" id="CArL-sFHpvca"
# add data to the CoMut object
example_comut.add_categorical_data(mutation_df, name = 'Mutation type', category_order = genes, mapping = mut_mapping, tick_style = 'italic')
# plot comut and add unified legend
example_comut.plot_comut(x_padding = x_padding, y_padding = y_padding, tri_padding = tri_padding, figsize = figsize)
example_comut.add_unified_legend()
# + [markdown] colab_type="text" id="zJW4IK6Vs2XB"
# Then the comut can be saved. An svg is often desirable because it is infinitely scaleable (as you zoom in, resolution does not decrease).
# + colab={} colab_type="code" id="fKd29rZgqiRY"
example_comut.figure.savefig('mutation_comut' + extension, dpi = dpi, bbox_inches = 'tight')
# + [markdown] colab_type="text" id="mOViVVLwOfbQ"
# # How to add clinical data
#
# The example below adds two rows of clinical data to the comut - purity (a continuous variable) and primary type (a categorical variable). Should you wish to add your own clinical data, simply upload your data and mirror the current code. 1) Read in any uploaded files with pandas, 2) specify a color mapping, and 3) add data to the comut with `comut.add_continuous_data` or `comut.add_categorical_data`
#
# First we read in the data as dataframes:
# + colab={} colab_type="code" id="s8vhottTOfws"
purity_data = pd.read_csv('tutorial_data/melanoma_example/purity.tsv', sep = '\t') # the path to the purity data
purity_data.head(2)
# + colab={} colab_type="code" id="jzmqXZr5Oozm"
primary_type_data = pd.read_csv('tutorial_data/melanoma_example/primary_type.tsv', sep = '\t') # path to primary type data
primary_type_data.head(2)
# + [markdown] colab_type="text" id="uhN10unMO8PX"
# Then we define color mappings for purity and primary type that define how they appear in the comut:
# + colab={} colab_type="code" id="91BhWehPOvR8"
# color map for continuous values. Could alternatively be a matplotlib colormap eg 'viridis'
purp_7 = palettable.cartocolors.sequential.Purp_7.mpl_colormap
primary_type_mapping = {'Skin':vivid_10[6], 'Acral': vivid_10[2], 'Occult': vivid_10[-1], 'Mucosal': vivid_10[4]}
# + [markdown] colab_type="text" id="p0cAnMkOO-gn"
# Then we add the clinical data to the CoMut object and plot it
# + colab={} colab_type="code" id="nYvgEl5kO5-K"
example_comut.add_continuous_data(purity_data, name = 'Purity', mapping = purp_7)
example_comut.add_categorical_data(primary_type_data, name='Primary type', mapping = primary_type_mapping)
# plot CoMut and save
example_comut.plot_comut(x_padding = x_padding, y_padding = y_padding, tri_padding = tri_padding, hspace = 0.03, figsize = figsize)
example_comut.add_unified_legend()
example_comut.figure.savefig('mutation_comut_clinical' + extension, bbox_inches = 'tight', dpi = dpi)
# + [markdown] colab_type="text" id="aLm7n1R6PE_C"
# # How to Add Bar Graphs
#
# The example below continues and adds a bar graph on top of the comut. The process for adding your own bar data is the same as below - 1) upload and read in the file with pandas, 2) define a mapping, and 3) add it to the comut object and plot.
#
# First we read in the data as a dataframe:
# + colab={} colab_type="code" id="tYZvB1B-PAmR"
mut_burden_data = pd.read_csv('tutorial_data/melanoma_example/mutational_burden.tsv', sep = '\t')
mut_burden_data.head(2)
# + [markdown] colab_type="text" id="gFUh4ZetPLOA"
# Then we define a mapping for how the categories should appear in the comut:
# + colab={} colab_type="code" id="CDVJfUYxPJIZ"
# map non-sample columns in dataframe to colors
burden_mapping = {'Clonal':vivid_10[8], 'Subclonal':purp_7(0.5)}
# + [markdown] colab_type="text" id="x-VmuxPCPPP6"
# Then we add the data to the CoMut object and plot it:
# + colab={} colab_type="code" id="yv6J0gEnPNXR"
example_comut.add_bar_data(mut_burden_data, name = 'Mutation clonality', mapping = burden_mapping, stacked = True,
ylabel = 'Nonsyn.\nMutations')
# plot CoMut
example_comut.plot_comut(x_padding = x_padding, y_padding = y_padding, tri_padding = tri_padding, hspace = 0.03, figsize = figsize)
example_comut.add_unified_legend(axis_name = 'Mutation clonality', ncol = 2)
example_comut.figure.savefig('mutation_comut_clinical_bar' + extension, bbox_inches = 'tight', dpi = dpi)
# + [markdown] colab_type="text" id="QFy73DgGPSuw"
# # How to Add a Side Bar Graph
#
# We continue the example by adding a side bar indicating the number of samples mutated in a gene. The process remains the same - 1) upload and read in the file with pandas, 2) define a mapping, and 3) add it to the comut and plot.
#
# First we read in the data:
# + colab={} colab_type="code" id="IgWCCkz-PQYZ"
mut_freq = pd.read_csv('tutorial_data/melanoma_example/mutation_frequency.tsv', sep = '\t')
mut_freq.head(2)
# + [markdown] colab_type="text" id="iYUUYLWNPW-l"
# Then we define a mapping for how the side bar graph should appear in the comut:
# + colab={} colab_type="code" id="pCqJz2yjPT2N"
side_mapping = {'Mutated samples': 'darkgrey'}
# + [markdown] colab_type="text" id="2hgTJsT2Pabj"
# Then we add the data to the comut and plot it
# + colab={} colab_type="code" id="1PvwIOGBPZm1"
example_comut.add_side_bar_data(mut_freq, paired_name = 'Mutation type', name = 'Mutated samples', position = 'left',
mapping = side_mapping, xlabel = 'Mutated samples')
# plot comut and save
example_comut.plot_comut(x_padding = x_padding, y_padding = y_padding, tri_padding = tri_padding, hspace = 0.03, figsize = figsize, widths = [0.5, 5])
example_comut.add_unified_legend(axis_name = 'Mutation clonality', ncol = 2)
example_comut.figure.savefig('mutation_comut_clinical_bar_side' + extension, bbox_inches = 'tight', dpi = dpi)
| examples/quickstart.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# ## A three-state master equation
#
# Consider the following kinetic model:
#
# <img src="three-state.pdf">
#
# This model is represented by the following non-zero rates: $k_{21}$ = 100 s$^{-1}$, $k_{12}$ = 200 s$^{-1}$, $k_{32}$ = 10 s$^{-1}$, $k_{23}$ = 1 s$^{-1}$, and a resulting rate matrix of
#
# $$
# \mathbf{K} =
# \begin{pmatrix}
# -\sum_{i \neq 1} k_{i1} & k_{12} & k_{13} \\
# k_{21} & -\sum_{i \neq 2}k_{i2} & k_{23} \\
# k_{31} & k_{32} & -\sum_{i \neq 3}k_{i3}
# \end{pmatrix} = \begin{pmatrix}
# -100 & 200 & 0 \\
# 100 & -210 & 1 \\
# 0 & 10 & -1
# \end{pmatrix}
# $$
#
#
#
#
#
import numpy as np
K = np.array( [[-100., 200., 0.], [100., -210., 1.], [0., 10., -1.]])
# +
from scipy.linalg import eig
### Get the right eigenvectors ###
evals, evecs = eig(K)
# sort evals and evecs from |\lambda_i| smallest (0) to biggest
I = np.argsort(np.abs(evals))
print I
evals = np.real(evals[I])
evecs = evecs[:,I]
print "evals", evals
print "evecs", evecs
### Get the left eigenvectors ###
left_evals, left_evecs = eig(np.transpose(K))
# sort evals and evecs from |\lambda_i| smallest (0) to biggest
I = np.argsort(np.abs(left_evals))
print I
left_evals = np.real(left_evals[I])
left_evecs = left_evecs[:,I]
print "evals", left_evals
print "left_evecs", left_evecs
# modify the equilibrium evecs to reflect equilibirum populations
evecs[:,0] = evecs[:,0]/evecs[:,0].sum()
left_evecs[:,0] = np.ones( evecs[:,0].shape )
print 'evecs[:,0]', evecs[:,0], 'left_evecs[:,0]', left_evecs[:,0]
# flip signs of the amplitudes are negative
p0 = np.array((1.,0.,0.))
amplitudes = []
for i in range(3):
amp = np.dot(left_evecs[:,i],p0)
print 'amp', i, ':', amp
if amp < 0.0:
evecs[:,i] = - evecs[:,i]
left_evecs[:,i] = -left_evecs[:,i]
# Normalize the non-equilibrium left eigenvectors
for i in range(1,3):
product = np.dot(left_evecs[:,i],evecs[:,i])
print 'product', i, ':', product
left_evecs[:,i] = left_evecs[:,i]/product
## Check that left and right products are \delta_ij
print np.dot(left_evecs[:,0], evecs[:,0])
print np.dot(left_evecs[:,1], evecs[:,1])
print np.dot(left_evecs[:,2], evecs[:,2])
# -
from matplotlib import pyplot as plt
# %matplotlib inline
plt.figure(figsize=(2,6))
for i in range(1,3):
plt.plot([0,1],[evals[i],evals[i]],'k-')
plt.xticks([])
plt.ylabel('$\lambda_i$ (s$^{-1}$)', fontsize=24)
plt.tight_layout()
plt.savefig('spectrum.pdf')
# +
plt.figure(figsize=(6,6))
for i in range(3):
# plot eigenmodes
plt.subplot(3,2,2*i+1)
pos_evec = np.maximum(evecs[:,i], np.zeros(evecs[:,i].shape))
print 'pos_evec', pos_evec
if sum(pos_evec) > 0:
plt.bar([1,2,3], pos_evec, color='b')
neg_evec = np.minimum(evecs[:,i], np.zeros(evecs[:,i].shape))
print 'neg_evec', neg_evec
if sum(neg_evec) < 0:
plt.bar([1,2,3], neg_evec, color='r')
#plt.xticklabels([1,2,3])
plt.plot([1.0,4.0],[0,0],'k-')
plt.xticks([1.5, 2.5, 3.5], ["1", "2", "3"], fontsize=14)
plt.ylim(-1,1)
plt.ylabel('$\\psi_%d^R$'%(i+1), fontsize=24)
# plot relaxation with amplitude
plt.subplot(3,2,2*i+2)
amp = np.dot(left_evecs[:,i],p0)
print amp
t = np.arange(0,1.,0.001) # in seconds
y = amp*np.exp(evals[i]*t)
plt.plot(t,y,'k-', linewidth=2)
if i == 2:
plt.xlabel('time (s)')
plt.xlim(0,0.1)
else:
plt.xlim(0,1)
plt.ylim(0,1.1)
plt.yticks([0,1])
plt.tight_layout()
#plt.ylabel('$\lambda_i$ (s$^{-1}$)')
plt.savefig('evecs.pdf')
# -
f = np.array([1,2,3,4])
np.minimum(f, np.zeros(f.shape))
# +
# Left evecs
from scipy.linalg import eig
# -
0.57735027*0.57735027
| images_for_wiki/math_background/notes-on-chemical-master-eq/three-state-ME.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Tabular Data Explanation Benchmarking: Xgboost Regression
# This notebook demonstrates how to use the benchmark utility to benchmark the performance of an explainer for tabular data. In this demo, we showcase explanation performance for tree explainer. The metrics used to evaluate are "keep positive" and "keep negative". The masker used is Independent Masker but can also generalize to other tabular maskers.
#
# The new benchmark utility uses the new API with MaskedModel as wrapper around user-imported model and evaluates masked values of inputs.
import numpy as np
import pandas as pd
import xgboost
import shap
import shap.benchmark as benchmark
from sklearn.model_selection import train_test_split
# ### Load Data and Model
# +
# create trained model for prediction function
untrained_model = xgboost.XGBRegressor(n_estimators=100, subsample=0.3)
X,y = shap.datasets.boston()
X = X.values
test_size = 0.3
random_state = 0
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=random_state)
model = untrained_model.fit(X_train, y_train)
# -
# ### Define Explainer Masker
# use Independent masker as default
masker = shap.maskers.Independent(X)
# ### Create Explainer Object
# tree explainer is used
explainer = shap.Explainer(model, masker)
# ### Run SHAP Explanation
shap_values = explainer(X)
# ### Define Metrics (Sort Order & Perturbation Method)
sort_order = 'positive'
perturbation = 'keep'
# ### Benchmark Explainer
sp = benchmark.perturbation.SequentialPerturbation(explainer.model, explainer.masker, sort_order, perturbation)
xs, ys, auc = sp.model_score(shap_values, X)
sp.plot(xs, ys, auc)
sort_order = 'negative'
perturbation = 'keep'
sp = benchmark.perturbation.SequentialPerturbation(explainer.model, explainer.masker, sort_order, perturbation)
xs, ys, auc = sp.model_score(shap_values, X)
sp.plot(xs, ys, auc)
| notebooks/benchmark/tabular/Tabular Prediction Benchmark Demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Import modules
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# %matplotlib inline
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
import RESSPyLab
# -
# # 1 - Load an experiment
# Make a list of pandas dataframes with (clean) experimental data from a csv file. This is done with the pandas package from data in csv files. Two columns should be included in the csv file with true strain ("e_true") and true stress ("Sigma_true").
# +
testFileNames=['example_1.csv']
listCleanTests=[]
for testFileName in testFileNames:
test=pd.read_csv(testFileName)
listCleanTests.append(test)
# -
# # 2 - Determine Voce and Chaboche material parameters with either VCopt_SVD or VCopt_J
# There are two arguments to VCopt: an initial starting point for the parameters ("x_0") and the list of tests previously assembled.
#
# The parameters are gathered in list in the following order:
#
# [E, sy0, Qinf, b, C_1, gamma_1, C_2, gamma_2, ..., ..., C_k, gamma_k]
#
# A recommended initial point is an elastic perfectly plastic model with the nominal values of the elastic modulus and the yield stress. All other values are, therefore, set to zero. For numerical purposes a minimum 1e-1 is used.
#
# The examples herein are from an S355J2 steel. Nominal values are therefore: E=200e3MPa sy0=355MPa
# +
x_0=[200e3,355,1e-1,1e-1,1e-1,1e-1]
sol=RESSPyLab.VCopt_SVD(x_0,listCleanTests)
print(sol)
# +
x_0=[200e3,355,1e-1,1e-1,1e-1,1e-1]
sol=RESSPyLab.VCopt_J(x_0,listCleanTests)
print(sol)
# -
# # 3 - Use the solution point to plot experiment vs simulation
simCurve=RESSPyLab.VCsimCurve(sol,test)
plt.plot(test['e_true'],test['Sigma_true'],c='r',label='Test')
plt.plot(simCurve['e_true'],simCurve['Sigma_true'],c='k',label='RESSPyLab')
plt.legend(loc='best')
plt.xlabel('True strain')
plt.ylabel('True stress')
# # 4 - Example with multiple tests
# +
testFileNames=['example_1.csv','example_2.csv']
listCleanTests=[]
for testFileName in testFileNames:
test=pd.read_csv(testFileName)
listCleanTests.append(test)
# +
x_0=[200e3,355,1e-1,1e-1,1e-1,1e-1]
sol=RESSPyLab.VCopt_SVD(x_0,listCleanTests)
print(sol)
# +
x_0=[200e3,355,1e-1,1e-1,1e-1,1e-1]
sol=RESSPyLab.VCopt_J(x_0,listCleanTests)
print(sol)
# +
test=pd.read_csv('example_1.csv')
simCurve=RESSPyLab.VCsimCurve(sol,test)
plt.plot(test['e_true'],test['Sigma_true'],c='r',label='Test')
plt.plot(simCurve['e_true'],simCurve['Sigma_true'],c='k',label='RESSPyLab')
plt.legend(loc='best')
plt.xlabel('True strain')
plt.ylabel('True stress')
# +
test=pd.read_csv('example_2.csv')
simCurve=RESSPyLab.VCsimCurve(sol,test)
plt.plot(test['e_true'],test['Sigma_true'],c='r',label='Test')
plt.plot(simCurve['e_true'],simCurve['Sigma_true'],c='k',label='RESSPyLab')
plt.legend(loc='best')
plt.xlabel('True strain')
plt.ylabel('True stress')
# -
| examples/Old_RESSPyLab_Parameter_Calibration_Orientation_Notebook.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + executionInfo={"elapsed": 320, "status": "ok", "timestamp": 1634022117213, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhE0cXJBikzHY7xUavBkbPRseKZ_N-obrw0xIoLhQ=s64", "userId": "04127040763952829247"}, "user_tz": -630} id="fzCydXdn7G3B"
import sys
sys.path.append("../")
import os
import numpy as np
import pandas as pd
import scipy.optimize as optim
from sklearn.preprocessing import StandardScaler
# + [markdown] id="d0NjeOPx75zO"
# # HELPER FUNCTIONS
# + executionInfo={"elapsed": 317, "status": "ok", "timestamp": 1634022119775, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhE0cXJBikzHY7xUavBkbPRseKZ_N-obrw0xIoLhQ=s64", "userId": "04127040763952829247"}, "user_tz": -630} id="G0Yk1odW7956"
import numpy as np
from scipy.special import softmax
import pandas as pd
np.random.seed(509)
def loss_x(x_new, x_initial):
"""
Constrains the mapping to Z to be good description of X.
Prototpyes should retain as much initial info as possible.
difference is measured by squared sum of difference
ARGS:
x_new - Prototypes
x_initial - raw data
"""
return np.mean(np.sum(np.square((x_new - x_initial))))
def loss_y(y_true, y_predicted):
"""
This loss term requires that the prediction of y is as accurate as possible:
Computes log loss
ARGS:
y_true - (num_examples, )
y_predicted - (num_examples, )
"""
# logarithm is undefined in 0 which means y cant be 0 or 1 => we clip it
y_true = np.clip(y_true, 1e-6, 0.999)
y_predicted = np.clip(y_predicted, 1e-6, 0.999)
log_loss = np.sum(y_true * np.log(y_predicted) +
(1. - y_true) * np.log(1. - y_predicted)) / len(y_true)
return -log_loss
def loss_z(M_k_sensitive, M_k_non_sensitive):
"""
Ensures statistical parity
Calculates L1 distance
Args:
M_k_sensitive - (num_prototypes, )
M_k_non_sensitive - (num_prototypes, )
"""
return np.sum(np.abs(M_k_sensitive - M_k_non_sensitive))
def distances(X, v, alpha):
"""
Calculates distance between initial data and each of the prototypes
Formula -> euclidean(x, v * alpha) (alpha is weight for each feature)
ARGS:
X - (num_examples, num_features)
v - (num_prototypes, num_features)
alpha - (num_features, 1)
returns:
dists - (num_examples, num_prototypes)
"""
num_examples = X.shape[0]
num_prototypes = v.shape[0]
dists = np.zeros(shape=(num_examples, num_prototypes))
# X = X.values # converting to NumPy, this is needed in case you pass dataframe
for i in range(num_examples):
dist = np.square(X[i] - v) # squarred distance
dist_alpha = np.multiply(dist, alpha) # multiplying by weights
sum_ = np.sum(dist_alpha, axis=1)
dists[i] = sum_
return dists
def M_nk(dists):
"""
define Mn,k as the probability that x maps to v
Given the definitions of the prototypes as points in
the input space, a set of prototypes induces a natural
probabilistic mapping from X to Z via the softmax
Since we already have distances calcutated we just map them to probabilities
NOTE:
minus distance because smaller the distance better the mapping
ARGS:
dists - (num_examples, num_prototypes)
Return :
mappings - (num_examples, num_prototypes)
"""
return softmax(-dists, axis=1) # specifying axis is important
def M_k(M_nk):
"""
Calculate mean of the mapping for each prototype
ARGS:
M_nk - (num_examples, num_prototypes)
Returns:
M_k - mean of the mappings (num_prototypes, )
"""
return np.mean(M_nk, axis=0)
def x_n_hat(M_nk, v):
"""
Gets new representation of the data,
Performs simple dot product
ARGS:
M_nk - (num_examples, num_prototypes)
v - (num_prototypes, num_features)
Returns:
x_n_hat - (num_examples, num_features)
"""
return M_nk @ v
def y_hat(M_nk, w):
"""
Function calculates labels in the new representation space
Performs simple dot product
ARGS:
M_nk - (num_examples, num_prototypes)
w - (num_prototypes, )
returns:
y_hat - (num_examples, )
"""
return M_nk @ w
def optim_objective(params, data_sensitive, data_non_sensitive, y_sensitive,
y_non_sensitive, inference=False, NUM_PROTOTYPES=10, A_x=0.01, A_y=0.1, A_z=0.5,
print_every=100):
"""
Function gathers all the helper functions to calculate overall loss
This is further passed to l-bfgs optimizer
ARGS:
params - vector of length (2 * num_features + NUM_PROTOTYPES + NUM_PROTOTYPES * num_features)
data_sensitive - instances belonging to senstive group (num_sensitive_examples, num_features)
data_non_sensitive - similar to data_sensitive (num_non_senitive_examplesm num_features)
y_sensitive - labels for sensitive group (num_sensitive_examples, )
y_non_sensitive - similar to y_sensitive
inference - (optional) if True than will return new dataset instead of loss
NUM_PROTOTYPES - (optional), two_year_recid 10
A_x - (optional) hyperparameters for loss_X, two_year_recid 0.01
A_y - (optional) hyperparameters for loss_Y, two_year_recid 1
A_z - (optional) hyperparameters for loss_Z, two_year_recid 0.5
print_every - (optional) how often to print loss, two_year_recid 100
returns:
if inference - False :
float - A_x * L_x + A_y * L_y + A_z * L_z
if inference - True:
x_hat_sensitive, x_hat_non_sensitive, y_hat_sensitive, y_hat_non_sensitive
"""
optim_objective.iters += 1
num_features = data_sensitive.shape[1]
# extract values for each variable from params vector
alpha_non_sensitive = params[:num_features]
alpha_sensitive = params[num_features:2 * num_features]
w = params[2 * num_features:2 * num_features + NUM_PROTOTYPES]
v = params[2 * num_features + NUM_PROTOTYPES:].reshape(NUM_PROTOTYPES, num_features)
dists_sensitive = distances(data_sensitive, v, alpha_sensitive)
dists_non_sensitive = distances(data_non_sensitive, v, alpha_non_sensitive)
# get probabilities of mappings
M_nk_sensitive = M_nk(dists_sensitive)
M_nk_non_sensitive = M_nk(dists_non_sensitive)
# M_k only used for calcilating loss_y(statistical parity)
M_k_sensitive = M_k(M_nk_sensitive)
M_k_non_sensitive = M_k(M_nk_non_sensitive)
L_z = loss_z(M_k_sensitive, M_k_non_sensitive) # stat parity
# get new representation of data
x_hat_sensitive = x_n_hat(M_nk_sensitive, v)
x_hat_non_sensitive = x_n_hat(M_nk_non_sensitive, v)
# calculates how close new representation is to original data
L_x_sensitive = loss_x(data_sensitive, x_hat_sensitive)
L_x_non_sensitive = loss_x(data_non_sensitive, x_hat_non_sensitive)
# get new values for labels
y_hat_sensitive = y_hat(M_nk_sensitive, w)
y_hat_non_sensitive = y_hat(M_nk_non_sensitive, w)
# ensure how good new predictions are(log_loss)
L_y_sensitive = loss_y(y_sensitive, y_hat_sensitive)
L_y_non_sensitive = loss_y(y_non_sensitive, y_hat_non_sensitive)
L_x = L_x_sensitive + L_x_non_sensitive
L_y = L_y_sensitive + L_y_non_sensitive
loss = A_x * L_x + A_y * L_y + A_z * L_z
if optim_objective.iters % print_every == 0:
print(f'loss on iteration {optim_objective.iters} : {loss}, L_x - {L_x * A_x} L_y - {L_y * A_y} L_z - {L_z * A_z}')
if not inference:
return loss
if inference:
return x_hat_sensitive, x_hat_non_sensitive, y_hat_sensitive, y_hat_non_sensitive
optim_objective.iters = 0
# + executionInfo={"elapsed": 355, "status": "ok", "timestamp": 1634022123785, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhE0cXJBikzHY7xUavBkbPRseKZ_N-obrw0xIoLhQ=s64", "userId": "04127040763952829247"}, "user_tz": -630} id="xO2k-oJ49Oes"
# + executionInfo={"elapsed": 405, "status": "ok", "timestamp": 1634022124578, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhE0cXJBikzHY7xUavBkbPRseKZ_N-obrw0xIoLhQ=s64", "userId": "04127040763952829247"}, "user_tz": -630} id="HYX_khar9SAO"
# + [markdown] id="h1CCt7Tq8RQ-"
# # REPAIRER
# + colab={"base_uri": "https://localhost:8080/"} executionInfo={"elapsed": 1387, "status": "ok", "timestamp": 1634022163420, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GhE0cXJBikzHY7xUavBkbPRseKZ_N-obrw0xIoLhQ=s64", "userId": "04127040763952829247"}, "user_tz": -630} id="1u3DTYPw9X6w" outputId="8b4685ef-de65-476f-b606-ed4c3d5bd272"
from google.colab import drive
drive.mount('/content/gdrive', force_remount=True)
# + colab={"background_save": true, "base_uri": "https://localhost:8080/"} id="Gakq0-Z78PYe" outputId="5510b2c7-86ab-4853-eea3-3073ef627499"
# seperation into sensitive and non sensitive
for count in range(42,51,1):
train_url=r'/content/gdrive/MyDrive/Datasets/SurveyData/DATASET/Compas/Train'
train_path= os.path.join(train_url ,("Train"+ str(count)+ ".csv"))
data= pd.read_csv(train_path)
first_column = data.pop('two_year_recid')
data.insert(0, 'two_year_recid', first_column)
data_sensitive = data.loc[data.race==0]
data_non_sensitive = data[data.race ==1]
y_sensitive = data_sensitive.two_year_recid
y_non_sensitive = data_non_sensitive.two_year_recid
print (f'Dataset contains {data.shape[0]} examples and {data.shape[1]} features')
print (f'From which {data_sensitive.shape[0]} belong to sensitive group and {data_non_sensitive.shape[0]} to non nensitive group ')
del data_sensitive['two_year_recid']
del data_non_sensitive['two_year_recid']
# Standard Scaling
data_sensitive = StandardScaler().fit_transform(data_sensitive)
data_non_sensitive = StandardScaler().fit_transform(data_non_sensitive)
NUM_PROTOTYPES = 10
num_features = data_sensitive.shape[1]
params = np.random.uniform(size=(num_features * 2 + NUM_PROTOTYPES + NUM_PROTOTYPES * num_features))
# here we generate random weight for each of the features both for sensitive data
# and for non sensitive, hence num_features*2(in paper this is denoted as alpha)
# alphas are used for calculating distances
# Then NUM_PROTOTYPES is a weight for each prototype, this is multiplied with
# M_nk s and used for calculating y_hat
# Next is NUM_PROTOTYPES * num_features which is v(in paper), this is also used
# for calculating distances
bnd = [] # This is needed for l-bfgs algorithm
for i, _ in enumerate(params):
if i < num_features * 2 or i >= num_features * 2 + NUM_PROTOTYPES:
bnd.append((None, None))
else:
bnd.append((0, 1))
new_params = optim.fmin_l_bfgs_b(optim_objective, x0=params, epsilon=1e-5,
args=(data_sensitive, data_non_sensitive,
y_sensitive, y_non_sensitive),
bounds=bnd, approx_grad=True, maxfun=1_000,
maxiter=1_000)[0]
x_hat_senitive, x_hat_nons, y_hat_sens, y_hat_nons = optim_objective(new_params,data_sensitive, data_non_sensitive,
y_sensitive, y_non_sensitive, inference=True)
FairP= np.hstack ([y_hat_sens[:, None],x_hat_senitive ] )
FairNP= np.hstack ([ y_hat_nons[:, None],x_hat_nons ])
#print(FairNP )
#print(FairP)
FdfP= pd.DataFrame(FairP )
#FdfP.to_csv("FairProtected.csv")
FdfNP= pd.DataFrame (FairNP )
#FdfNP.to_csv("FairUnprotected.csv")
FairData= FdfP.append(FdfNP ,ignore_index= True)
FairData.columns= list(data.columns)
FairData ['race']= (FairData['race'] >= FairData['race'].mean()).astype(int)
FairData['two_year_recid']= (FairData['two_year_recid'] >= FairData['two_year_recid'].mean()).astype(int)
path= r"/content/gdrive/MyDrive/Datasets/SurveyData/RESULTS/LFR/FairData/Compas/Train"
TrainOuput= os.path.join(path ,("Train" + str(count)+ ".csv"))
FairData.to_csv(TrainOuput, index= False )
# FairData.to_csv("Train 1.csv")
# print(FairData )
#FairData.to_csv("FairData.csv")
print ('Done')
# + colab={"background_save": true, "base_uri": "https://localhost:8080/"} id="FhC_0CNsAuiI"
# seperation into sensitive and non sensitive
for count in range(16,51,1):
Test_url=r'/content/gdrive/MyDrive/Datasets/SurveyData/DATASET/Compas/Test'
Test_path= os.path.join(Test_url ,("Test"+ str(count)+ ".csv"))
data= pd.read_csv(Test_path)
first_column = data.pop('two_year_recid')
data.insert(0, 'two_year_recid', first_column)
data_sensitive = data.loc[data.race==0]
data_non_sensitive = data[data.race ==1]
y_sensitive = data_sensitive.two_year_recid
y_non_sensitive = data_non_sensitive.two_year_recid
print (f'Dataset contains {data.shape[0]} examples and {data.shape[1]} features')
print (f'From which {data_sensitive.shape[0]} belong to sensitive group and {data_non_sensitive.shape[0]} to non nensitive group ')
del data_sensitive['two_year_recid']
del data_non_sensitive['two_year_recid']
# Standard Scaling
data_sensitive = StandardScaler().fit_transform(data_sensitive)
data_non_sensitive = StandardScaler().fit_transform(data_non_sensitive)
NUM_PROTOTYPES = 10
num_features = data_sensitive.shape[1]
params = np.random.uniform(size=(num_features * 2 + NUM_PROTOTYPES + NUM_PROTOTYPES * num_features))
# here we generate random weight for each of the features both for sensitive data
# and for non sensitive, hence num_features*2(in paper this is denoted as alpha)
# alphas are used for calculating distances
# Then NUM_PROTOTYPES is a weight for each prototype, this is multiplied with
# M_nk s and used for calculating y_hat
# Next is NUM_PROTOTYPES * num_features which is v(in paper), this is also used
# for calculating distances
bnd = [] # This is needed for l-bfgs algorithm
for i, _ in enumerate(params):
if i < num_features * 2 or i >= num_features * 2 + NUM_PROTOTYPES:
bnd.append((None, None))
else:
bnd.append((0, 1))
new_params = optim.fmin_l_bfgs_b(optim_objective, x0=params, epsilon=1e-5,
args=(data_sensitive, data_non_sensitive,
y_sensitive, y_non_sensitive),
bounds=bnd, approx_grad=True, maxfun=1_000,
maxiter=1_000)[0]
x_hat_senitive, x_hat_nons, y_hat_sens, y_hat_nons = optim_objective(new_params,data_sensitive, data_non_sensitive,
y_sensitive, y_non_sensitive, inference=True)
FairP= np.hstack ([y_hat_sens[:, None],x_hat_senitive ] )
FairNP= np.hstack ([ y_hat_nons[:, None],x_hat_nons ])
#print(FairNP )
#print(FairP)
FdfP= pd.DataFrame(FairP )
#FdfP.to_csv("FairProtected.csv")
FdfNP= pd.DataFrame (FairNP )
#FdfNP.to_csv("FairUnprotected.csv")
FairData= FdfP.append(FdfNP ,ignore_index= True)
FairData.columns= list(data.columns)
FairData ['race']= (FairData['race'] >= FairData['race'].mean()).astype(int)
FairData['two_year_recid']= (FairData['two_year_recid'] >= FairData['two_year_recid'].mean()).astype(int)
path= r"/content/gdrive/MyDrive/Datasets/SurveyData/RESULTS/LFR/FairData/Compas/Test"
TestOuput= os.path.join(path ,("Test" + str(count)+ ".csv"))
FairData.to_csv(TestOuput, index= False )
# FairData.to_csv("Test 1.csv")
# print(FairData )
#FairData.to_csv("FairData.csv")
print ('Done')
| Fairness_Survey/ALGORITHMS/LFR/Fair Maker/CompasLFR.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:root] *
# language: python
# name: conda-root-py
# ---
# ## Observations and Insights
# +
# Observations from this homework:
# 1. Capomulin and Ramicane produce smaller tumor volumes at the end timepoints than Infubinol and Ceftamin.
# 2. Mouse g316 (on Capomulin) had a significant decrease in tumor volume at the last timepoint.
# 3. There is a correlation between weight and average tumor volume in mice.
# -
#
# +
# Dependencies and Setup
import matplotlib.pyplot as plt
import pandas as pd
import scipy.stats as st
import numpy as np
# Study data files
mouse_metadata_path = "data/Mouse_metadata.csv"
study_results_path = "data/Study_results.csv"
# Read the mouse data and the study results
mouse_metadata = pd.read_csv(mouse_metadata_path)
study_results = pd.read_csv(study_results_path)
mouse_metadata.head()
# -
study_results.head()
# +
# Combine the data into a single dataset
# Display the data table for previews
merge_df = pd.merge(mouse_metadata, study_results, on="Mouse ID", how="outer")
merge_df
# -
# Checking the number of mice.
#merge_df.count()
mouse_count = merge_df["Mouse ID"].unique()
len(mouse_count)
# +
# Getting the duplicate mice by ID number that shows up for Mouse ID and Timepoint.
duplicate_mice_ID = merge_df[merge_df.duplicated(['Mouse ID', 'Timepoint'])]
duplicate_mice_ID
# -
# Optional: Get all the data for the duplicate mouse ID.
print(merge_df[merge_df.duplicated(subset=['Mouse ID', 'Timepoint'], keep=False)])
# Create a clean DataFrame by dropping the duplicate mouse by its ID.
cleaned_df = merge_df.loc[merge_df["Mouse ID"] !="g989"]
cleaned_df
# +
# Checking the number of mice in the clean DataFrame.
number_of_mice =len(cleaned_df["Mouse ID"].unique())
number_of_mice
# -
# ## Summary Statistics
# +
# Generate a summary statistics table of mean, median, variance, standard deviation, and SEM of the tumor volume for each regimen
# Use groupby and summary statistical methods to calculate the following properties of each drug regimen:
# mean, median, variance, standard deviation, and SEM of the tumor volume.
# Assemble the resulting series into a single summary dataframe.
drug_regimen_df = cleaned_df.groupby("Drug Regimen")
drug_regimen_df.head()
#For each drug:
#drug_regimen_df.describe()
mean_tv = drug_regimen_df["Tumor Volume (mm3)"].mean()
median_tv = drug_regimen_df["Tumor Volume (mm3)"].median()
variance_tv = drug_regimen_df["Tumor Volume (mm3)"].var()
stdev_tv = drug_regimen_df["Tumor Volume (mm3)"].std()
sem_tv = drug_regimen_df["Tumor Volume (mm3)"].sem()
drug_summary = ({"Mean Tumor Volume": mean_tv,
"Median Tumor Volume": median_tv,
"Variance of Tumor Volume": variance_tv,
"Standard Deviation of Tumor Volume": stdev_tv,
"SEM of Tumor Volume": sem_tv})
drug_regimen_summary = pd.DataFrame(drug_summary)
drug_regimen_summary.head()
# -
# Generate a summary statistics table of mean, median, variance, standard deviation, and SEM of the tumor volume for each regimen
drug_regimen_df = cleaned_df.groupby("Drug Regimen")
drug_regimen_df.head()
# Using the aggregation method, produce the same summary statistics in a single line
drug_regimen_df.agg({"Tumor Volume (mm3)":['mean','median','var','std','sem']})
# ## Bar and Pie Charts
# +
# Generate a bar plot showing the total number of unique mice tested on each drug regimen using pandas.
drug_regimen_summary["Number of Mice"]= drug_regimen_df["Mouse ID"].nunique()
drug_regimen_summary
mice_and_drug = drug_regimen_df["Mouse ID"].nunique()
mice_and_drug
mice_and_drug.plot(kind="bar", facecolor="red")
plt.title("Number of Mice per Drug Regimen")
plt.ylabel("Number of Mice")
plt.xlabel("Drug")
plt.tight_layout()
# +
# Generate a bar plot showing the total number of unique mice tested on each drug regimen using pyplot.
count_mice = pd.DataFrame(cleaned_df.groupby("Drug Regimen")["Mouse ID"].count()).reset_index()
count_mice
plt.figure
plt.bar(count_mice['Drug Regimen'],mice_and_drug,color='c',alpha=0.5, align="center")
tick_locations = [value for value in count_mice['Drug Regimen']]
plt.xticks(tick_locations, rotation='vertical')
plt.title("Mice per Drug Regimen")
plt.xlabel("Drug Regimen")
plt.ylabel("Number of Mice")
# +
# Generate a pie plot showing the distribution of female versus male mice using pandas
plot = cleaned_df['Sex'].value_counts().plot.pie(y='Sex', autopct='%1.1f%%')
plt.title("Mouse Distribution")
plt.xlabel("")
plt.ylabel("")
# -
cleaned_df["Sex"].value_counts()
# +
# Generate a pie plot showing the distribution of female versus male mice using pyplot
labels = 'Male', 'Female'
sizes = [958, 922]
explode = (0, 0.1)
fig1, ax1 = plt.subplots()
ax1.pie(sizes, explode=explode, labels=labels,
shadow=True, startangle=45, autopct='%1.1f%%')
ax1.axis('equal')
plt.show()
# -
# ## Quartiles, Outliers and Boxplots
drug_regimen_summary
# +
# Calculate the final tumor volume of each mouse across four of the treatment regimens:
# Capomulin, Ramicane, Infubinol, and Ceftamin
# Start by getting the last (greatest) timepoint for each mouse
last_timepoint_df = cleaned_df.sort_values("Timepoint", ascending = False)
last_timepoint_df = last_timepoint_df.drop_duplicates("Mouse ID", keep='first')
last_timepoint_df
# Merge this group df with the original dataframe to get the tumor volume at the last timepoint
capomulin_df = last_timepoint_df[last_timepoint_df["Drug Regimen"]== "Capomulin"]
ramicane_df = last_timepoint_df[last_timepoint_df["Drug Regimen"]== "Ramicane"]
infubinol_df = last_timepoint_df[last_timepoint_df["Drug Regimen"]== "Infubinol"]
ceftamin_df = last_timepoint_df[last_timepoint_df["Drug Regimen"]== "Ceftamin"]
# +
# Put treatments into a list for for loop (and later for plot labels)
treatments = ["Capomulin", "Ramicane", "Infubinol","Ceftamin"]
# Create empty list to fill with tumor vol data (for plotting)
tumor_vol_data = []
# Calculate the IQR and quantitatively determine if there are any potential outliers.
# Locate the rows which contain mice on each drug and get the tumor volumes
# add subset
# Determine outliers using upper and lower bounds
cap_vol_data = capomulin_df["Tumor Volume (mm3)"]
quartiles = cap_vol_data.quantile([.25,.5,.75])
lowerq = quartiles[0.25]
upperq = quartiles[0.75]
iqr = upperq-lowerq
print(f"The lower quartile of capomulin is: {lowerq}")
print(f"The upper quartile of capomulin is: {upperq}")
print(f"The interquartile range of capomulin is: {iqr}")
print(f"The median of capomulin is: {quartiles[0.5]} ")
lower_bound = lowerq - (1.5*iqr)
upper_bound = upperq + (1.5*iqr)
print(f"Values below {lower_bound} could be outliers.")
print(f"Values above {upper_bound} could be outliers.")
# +
# Calculate the IQR and quantitatively determine if there are any potential outliers.
# Locate the rows which contain mice on each drug and get the tumor volumes
# add subset
# Determine outliers using upper and lower bounds
ram_vol_data = ramicane_df["Tumor Volume (mm3)"]
quartiles = ram_vol_data.quantile([.25,.5,.75])
lowerq = quartiles[0.25]
upperq = quartiles[0.75]
iqr = upperq-lowerq
print(f"The lower quartile of ramicane is: {lowerq}")
print(f"The upper quartile of ramicane is: {upperq}")
print(f"The interquartile range of ramicane is: {iqr}")
print(f"The median of ramicane is: {quartiles[0.5]} ")
lower_bound = lowerq - (1.5*iqr)
upper_bound = upperq + (1.5*iqr)
print(f"Values below {lower_bound} could be outliers.")
print(f"Values above {upper_bound} could be outliers.")
# +
# Calculate the IQR and quantitatively determine if there are any potential outliers.
# Locate the rows which contain mice on each drug and get the tumor volumes
# add subset
# Determine outliers using upper and lower bounds
inf_vol_data = infubinol_df["Tumor Volume (mm3)"]
quartiles = inf_vol_data.quantile([.25,.5,.75])
lowerq = quartiles[0.25]
upperq = quartiles[0.75]
iqr = upperq-lowerq
print(f"The lower quartile of infubinol is: {lowerq}")
print(f"The upper quartile of infubinol is: {upperq}")
print(f"The interquartile range of infubinol is: {iqr}")
print(f"The median of infubinol is: {quartiles[0.5]} ")
lower_bound = lowerq - (1.5*iqr)
upper_bound = upperq + (1.5*iqr)
print(f"Values below {lower_bound} could be outliers.")
print(f"Values above {upper_bound} could be outliers.")
# +
# Calculate the IQR and quantitatively determine if there are any potential outliers.
# Locate the rows which contain mice on each drug and get the tumor volumes
# add subset
# Determine outliers using upper and lower bounds
ceft_vol_data = ceftamin_df["Tumor Volume (mm3)"]
quartiles = ceft_vol_data.quantile([.25,.5,.75])
lowerq = quartiles[0.25]
upperq = quartiles[0.75]
iqr = upperq-lowerq
print(f"The lower quartile of ceftamin is: {lowerq}")
print(f"The upper quartile of ceftamin is: {upperq}")
print(f"The interquartile range of ceftamin is: {iqr}")
print(f"The median of ceftamin is: {quartiles[0.5]} ")
lower_bound = lowerq - (1.5*iqr)
upper_bound = upperq + (1.5*iqr)
print(f"Values below {lower_bound} could be outliers.")
print(f"Values above {upper_bound} could be outliers.")
# +
# Generate a box plot of the final tumor volume of each mouse across four regimens of interest
tumor_vol_data = [cap_vol_data, ram_vol_data, inf_vol_data, ceft_vol_data]
fig1, ax1 = plt.subplots()
ax1.set_title("Tumor Volume")
ax1.set_xlabel("Drug Name")
ax1.set_ylabel("Volume in mm3")
ax1.boxplot(tumor_vol_data, labels = treatments)
plt.savefig("boxplot")
plt.show()
# -
# ## Line and Scatter Plots
# +
# Generate a line plot of tumor volume vs. time point for a mouse treated with Capomulin
mouse_g316 = cleaned_df[cleaned_df["Mouse ID"]=="g316"]
g316_time = mouse_g316["Timepoint"]
g316_tumor = mouse_g316["Tumor Volume (mm3)"]
plt.title("Mouse g316 on Capomulin")
plt.xlabel("Timepoint")
plt.ylabel("Tumor volume in mm3")
plt.show
plt.plot(g316_time, g316_tumor, color="green", label="Mouse g316")
# +
# Generate a scatter plot of average tumor volume vs. mouse weight for the Capomulin regimen
capomulin_df2 = cleaned_df[cleaned_df["Drug Regimen"]== "Capomulin"]
capomulin_df2.head()
avg_group_cap = capomulin_df2.groupby("Mouse ID").mean()
avg_group_cap
mouse_weight = avg_group_cap["Weight (g)"]
avg_vol_cap = avg_group_cap["Tumor Volume (mm3)"]
plt.scatter(mouse_weight, avg_vol_cap)
plt.xlabel("Weight (g)")
plt.ylabel("Tumor Volume (mm3)")
plt.title("Mice on Capomulin")
# -
# ## Correlation and Regression
# +
# Calculate the correlation coefficient and linear regression model
# for mouse weight and average tumor volume for the Capomulin regimen
corr=round(st.pearsonr(avg_group_cap['Weight (g)'],avg_group_cap['Tumor Volume (mm3)'])[0],2)
print(f"The correlation between mouse weight and average tumor volume is {corr}")
# +
lr=st.linregress(avg_group_cap['Weight (g)'],avg_group_cap['Tumor Volume (mm3)'])
lr
# +
slope=0.9544396890241045
intercept=21.552160532685015
y_values = avg_group_cap['Weight (g)']*slope+ intercept
plt.scatter(avg_group_cap['Weight (g)'],avg_group_cap['Tumor Volume (mm3)'])
plt.plot(avg_group_cap['Weight (g)'],y_values,color="red")
plt.xlabel('Weight(g)')
plt.ylabel('Average Tumore Volume (mm3)')
plt.title("The Relationship Between Mouse Weight and Tumor Volume with Capomulin")
line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
plt.annotate(line_eq,(6,10),fontsize=15,color="red")
plt.savefig('linregression')
plt.show()
# -
| Instructions/Pymaceuticals/pymaceuticals_starter.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernel_info:
# name: python3
# kernelspec:
# display_name: Python [conda env:PythonData] *
# language: python
# name: conda-env-PythonData-py
# ---
# %matplotlib inline
from matplotlib import style
style.use('fivethirtyeight')
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import datetime as dt
# # Reflect Tables into SQLAlchemy ORM
# Python SQL toolkit and Object Relational Mapper
import sqlalchemy
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
# create engine to hawaii.sqlite
engine = create_engine("sqlite:///Resources/hawaii.sqlite")
# reflect an existing database into a new model
Base = automap_base()
# reflect the tables
Base.prepare(engine, reflect=True)
# View all of the classes that automap found
Base.classes.keys()
# Save references to each table
Measurement = Base.classes.measurement
Station = Base.classes.station
# Create our session (link) from Python to the DB
session = Session(engine)
from sqlalchemy import create_engine, inspect
inspector = inspect(engine)
inspector.get_table_names()
columns=inspector.get_columns('measurement')
for column in columns:
print(column['name'], column['type'])
columns=inspector.get_columns('station')
for column in columns:
print(column['name'], column['type'])
# # Exploratory Precipitation Analysis
# Find the most recent date in the data set.
recent_date=session.query(Measurement.date).order_by(Measurement.date.desc()).first()
print(recent_date)
# +
# Design a query to retrieve the last 12 months of precipitation data and plot the results.
# Starting from the most recent data point in the database.
# Calculate the date one year from the last date in data set.
last_year = dt.date(2017, 8, 23) - dt.timedelta(days=365)
print(last_year)
# Perform a query to retrieve the data and precipitation scores
scores = session.query(Measurement.prcp, Measurement.date).\
filter(Measurement.date> last_year).\
order_by(Measurement.date).all()
# Save the query results as a Pandas DataFrame and set the index to the date column
scores_df = pd.DataFrame(scores).set_index('date')
# Sort the dataframe by date
# I have aready soretd by date in the query section by performing 0rder_by
scores_df.dropna()
scores_df
# Use Pandas Plotting with Matplotlib to plot the data
scores_df.plot()
plt.xlabel("Date")
plt.ylabel("Precipitation in Inches")
plt.title("Precipitation (8/24/16 to 8/23/17)")
plt.legend(["Precipitation"])
plt.xticks(rotation=90)
plt.show()
# -
# Use Pandas to calcualte the summary statistics for the precipitation data
scores_df.describe()
# # Exploratory Station Analysis
# Design a query to calculate the total number stations in the dataset
stations = session.query(Station.id).count()
print(stations)
# Design a query to find the most active stations (i.e. what stations have the most rows?)
# List the stations and the counts in descending order.
active_station = session.query(Measurement.station, func.count(Measurement.id)).group_by(Measurement.station).\
order_by(func.count(Measurement.id).desc()).all()
active_station
# Using the most active station id from the previous query, calculate the lowest, highest, and average temperature.
station_number = 'USC00519281'
session.query(func.min(Measurement.tobs),
func.max(Measurement.tobs),
func.avg(Measurement.tobs)).filter(Measurement.station == station_number).all()
# +
# Using the most active station id
# Query the last 12 months of temperature observation data for this station and plot the results as a histogram
observation_data= session.query(Measurement.date, Measurement.tobs).filter(Measurement.station=='USC00519281'). filter(Measurement.date > last_year)
observation_df=pd.DataFrame(observation_data).rename(columns={ 0:"date",
1: "tobs"})
plt.hist(observation_df["tobs"], bins=12)
plt.xlabel("Temperature")
plt.ylabel("Observation")
plt.title("Station analysis for station USC00519281")
plt.show()
# -
# # Close session
# Close Session
session.close()
| climate_starter.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %matplotlib inline
import time
import pandas as pd
import numpy as np
from sklearn import cluster
from sklearn.cluster import AgglomerativeClustering
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
data = pd.read_csv("drone_delivery_v1.csv", sep=";",header = 0, index_col = None,)
df = pd.DataFrame(data)
df.head(5)
# -
df.plot.scatter(x='x', y='y', s=3);
f1 = data['x'].values
f2 = data['y'].values
X = np.array(list(zip(f1, f2)))
# Number of clusters
n_clusters = 10
start = time.time()
kmeans = KMeans(n_clusters)
kmeans = kmeans.fit(X)
end = time.time()
centroids = kmeans.cluster_centers_
print(centroids)
print(end-start)
# <h3>Increasing number of depots greatly increases time it takes to calculate centroids</h3>
df2 = df
df2['group'] = kmeans.labels_
df2.head(10)
df2.plot.scatter(x='x', y='y', c='group', s=3, colormap='viridis')
start = time.time()
clustering = AgglomerativeClustering(n_clusters, affinity='euclidean', linkage='ward')
clustering.fit_predict(X)
end = time.time()
print(clustering.labels_)
plt.scatter(X[:,0],X[:,1], c=clustering.labels_, cmap='rainbow')
print(end-start)
# <h3>Changing number of depots doesn't change running time that much.</h3>
# Because depots are based through adding up smaller centroids borders between each cluster is much more dynamic than using kmeans approach.
| labs/exc2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import torch
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
import torch.optim as optim
import numpy as np
import pandas as pd
# Hyperparameters
input_size = 28*28
num_classes = 10
num_epochs = 5
batch_size = 100
lr = 0.01
# +
# F-MNIST dataset
# Normalizer
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,))
])
trainset = torchvision.datasets.FashionMNIST(root='./data', train=True,
download=False, transform=transform)
testset = torchvision.datasets.FashionMNIST(root='./data', train=False,
download=False, transform=transform)
# Data loader
train_loader_fmnist = torch.utils.data.DataLoader(trainset, batch_size=4,
shuffle=True, num_workers=2)
test_loader_fmnist = torch.utils.data.DataLoader(testset, batch_size=4,
shuffle=False, num_workers=2)
# -
# Logistic regression model.
model = torch.nn.Sequential(
torch.nn.Flatten(),
torch.nn.Linear(input_size, num_classes),
torch.nn.LogSoftmax(dim=1)
)
# Use NLL since we include softmax as part of model.
criterion = nn.NLLLoss()
# +
# Train and test functions.
def train(model, train_loader, optimizer, num_epochs, criterion, input_size, log_interval):
model.train()
for epoch in range(num_epochs):
print('Epoch {}'.format(epoch+1))
for i, (images, labels) in enumerate(train_loader):
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = model(images)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# Log the loss.
if i % log_interval == 0:
print('Current loss: {}'.format(loss))
def test(model, test_loader,criterion):
model.eval()
test_acc = 0
total_data = 0
loss = 0
with torch.no_grad():
for _, (images, labels) in enumerate(test_loader):
output = model(images)
pred = output.argmax(dim=1, keepdim=True)
test_acc += pred.eq(labels.view_as(pred)).sum().item()
total_data += len(images)
loss = criterion(output, labels)
print('Loss: {}'.format(loss))
test_acc /= total_data
print('Test accuracy over {} data points: {}%'.format(total_data, test_acc * 100))
return loss.item()
# -
test_losses = []
# # SGD
optimizer = torch.optim.SGD(model.parameters(), lr=lr)
train(model, train_loader_fmnist, optimizer, num_epochs, criterion, input_size, 100)
test_loss = test(model, test_loader_fmnist, criterion)
test_losses.append(test_loss)
# # SGD Momentum
optimizer = torch.optim.SGD(model.parameters(), lr=lr, momentum=0.9)
train(model, train_loader_fmnist, optimizer, num_epochs, criterion, input_size, 100)
test_loss = test(model, test_loader_fmnist, criterion)
test_losses.append(test_loss)
# # SGD Nesterov
optimizer = optim.SGD(model.parameters(), lr=lr, momentum=0.9, nesterov=True)
train(model, train_loader_fmnist, optimizer, num_epochs, criterion, input_size, 100)
test_loss = test(model, test_loader_fmnist, criterion)
test_losses.append(test_loss)
# # Adagrad
optimizer = optim.Adagrad(model.parameters(), lr=lr)
train(model, train_loader_fmnist, optimizer, num_epochs, criterion, input_size, 100)
test_loss = test(model, test_loader_fmnist, criterion)
test_losses.append(test_loss)
# # RMSProp
optimizer = optim.RMSprop(model.parameters(), lr=0.001)
train(model, train_loader_fmnist, optimizer, num_epochs, criterion, input_size, 100)
test_loss = test(model, test_loader_fmnist, criterion)
test_losses.append(test_loss)
# # Adam
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
train(model, train_loader_fmnist, optimizer, num_epochs, criterion, input_size, 100)
test_loss = test(model, test_loader_fmnist, criterion)
test_losses.append(test_loss)
col = ['SGD','Momentum','Nesterov','Adagrad','RMSProp','Adam']
df = pd.DataFrame(data=[test_losses], columns=col)
df
df.to_csv('logistic_regression_fmnist_loss.csv')
# # Normalize loss
# +
test_losses = np.asarray(test_losses)
normalized_test_losses = []
mean = np.mean(test_losses)
minus_mean = test_losses - mean
normalized_test_losses.append((minus_mean)/np.linalg.norm(minus_mean))
print(normalized_test_losses)
# -
col = ['SGD','Momentum','Nesterov','Adagrad','RMSProp','Adam']
df = pd.DataFrame(data=normalized_test_losses, columns=col, index = ['Logistic regression FMNIST'])
df
df.to_csv('logistic_regression_fmnist_normalized_loss.csv')
| logistic_regression/indiv_task_notebooks/logistic_regression_fmnist.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## 2020년 11월 11일 수요일
# ### leetCode - Find Lucky Integer in an Array (Python)
# ### 문제 : https://leetcode.com/problems/find-lucky-integer-in-an-array/
# ### 블로그 : https://somjang.tistory.com/entry/leetCode-1394-Find-Lucky-Integer-in-an-Array-Python
# ### 첫번째 시도
# +
from collections import Counter
class Solution:
def findLucky(self, arr: List[int]) -> int:
answer = -1
cnt = Counter(arr).items()
answer_check = [ item[0] for item in cnt if item[0] == item[1] ]
if answer_check != []:
answer = max(answer_check)
return answer
| DAY 201 ~ 300/DAY257_[leetCode] Find Lucky Integer in an Array (Python).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: JuliaPro_v1.4.2-1 1.4.2
# language: julia
# name: juliapro_v1.4.2-1-1.4
# ---
# # Hangover Cure
#
# People looking for hangover cure on google trends: 90 days
#
# @date: 30-Aug-2020 | @author: katnoria | @lang: julia
using CSV
using DataFrames
using Plots
using Dates
using Measures
# +
# import Pkg;
# Pkg.add("Measures")
# -
fname = "../data/us-90days.csv"
df = DataFrame(CSV.File(fname))
describe(df)
typeof(df.Day)
head(df)
transform!(df, :Day => x -> Dates.dayofweek.(x))
df.dayname = Dates.dayname.(df.Day)
df.dayofweek = Dates.dayofweek.(df.Day)
describe(df)
Dates.dayname(Date(2010,1,1))
weekends_df = filter(row -> row[:Day_function] >= 6, df)
# # Plot
gr(size = (1000, 500), legend = false)
xticks = collect(weekends_df.Day[1]:Dates.Day(7):weekends_df.Day[lastindex(weekends_df.Day)])
xlabels = Dates.format.(xticks, "d-u")
# vspan(weekends_df.Day, color="#e7e6eb", xticks=xticks, xrotation=25, legend=false, size=(1000,400))
# vspan(weekends_df.Day, color="#e7e6eb", xticks=xticks, xrotation=25, legend=false, size=(1000,400))
vspan(weekends_df.Day, color="#e7e6eb")
plot!(weekends_df[:, 1], weekends_df[:, 2], color="red", xticks=(xticks, xlabels), xrotation=25, legend=false, size=(1000,400))
# This fixes the issue of vspan overriding the ylabels
vline!(weekends_df.Day, color="#e7e6eb",alpha=0.0, legend=true)
plot!(title="Data is beautiful: \n Hangovers are not", margin=10mm, ylabel="Google Search Volume (Indexed, 100 = Maximum)", yguidefontsize=8)
annotate!(2020-06-6, 60, text("Saturday & Sunday", :blue, "right", 3))
# +
# plot!(weekends_df.Day, weekends_df.maxY, line=:stem, legend=false)
# -
# xticks = (Dates.value.(tickdays), Date.(tickdays))
(Dates.value.(tickdays), Date.(tickdays))
| hangover-cure/julia/hangover-julia.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Open Issue Age
# This is the reference implementation for [Open Issue Age](https://github.com/chaoss/wg-evolution/blob/master/metrics/issues-open-age.md),
# a metric specified by the
# [Evolution Working Group](https://github.com/chaoss/wg-evolution) of the
# [CHAOSS project](https://chaoss.community).
#
# Have a look at [README.md](../README.md) to find out how to run this notebook (and others in this directory) as well as to get a better understanding of the purpose of the implementations.
#
# The implementation is described in two parts (see below):
#
# * Class for computing Open Issue Age
# * An explanatory analysis of the class' functionality
#
# Some more auxiliary information in this notebook:
#
# * Examples of the use of the implementation
# As discussed in the [README](../README.md) file, the scripts required to analyze the data fetched by Perceval are located in the `code_df` package. Due to python's import system, to import modules from a package which is not in the current directory, we have to either add the package to `PYTHONPATH` or simply append a `..` to `sys.path`, so that `code_df` can be successfully imported.
from datetime import datetime
import matplotlib.pyplot as plt
import sys
sys.path.append('..')
from code_df import utils
from code_df.issue import Issue
# %matplotlib inline
class OpenIssueAge(Issue):
"""
Class for Age of Open Issues
"""
def _flatten(self, item):
"""
Flatten a raw issue fetched by Perceval into a flat dictionary.
A list with a single flat directory will be returned.
That dictionary will have the elements we need for computing metrics.
The list may be empty, if for some reason the issue should not
be considered.
:param item: raw item fetched by Perceval (dictionary)
:returns: list of a single flat dictionary
"""
creation_date = utils.str_to_date(item['data']['created_at'])
if self.since and (self.since > creation_date):
return []
if self.until and (self.until < creation_date):
return []
flat = {
'repo': item['origin'],
'hash': item['data']['id'],
'category': "issue",
'author': item['data']['user']['login'],
'created_date': creation_date,
'current_status': item['data']['state']
}
if flat['current_status'] == 'open':
flat['open_issue_age'] = \
(datetime.now() - flat['created_date']).days
else:
return []
return [flat]
def compute(self):
"""
Compute the average open issue age for all issues in the Perceval data.
:returns avg_open_issue_age: the average age of open
issues
"""
avg_open_issue_age = self.df['open_issue_age'].mean()
return avg_open_issue_age
def _agg(self, df, period):
"""
Perform an aggregation operation on a DataFrame to find
the average age of open issues created in a every
interval of the period specified in the time_series method,
like 'M', 'W',etc.
It computes the mean of the 'open_issue_age' column of the
DataFrame.
:param df: a pandas DataFrame on which the aggregation will be
applied.
:param period: A string which can be any one of the pandas time
series rules:
'W': week
'M': month
'D': day
:returns df: The aggregated dataframe, where aggregations have
been performed on the "open_issue_age" column
"""
df = df.resample(period).agg({'open_issue_age': 'mean'})
df['open_issue_age'] = df['open_issue_age'].fillna(0)
return df
# ## Performing the Analysis
# We'll perform two kinds of analysis here:
# - Counting the average age of open issues
# - Change of average open issue age over time
# ### Counting the average age of open issues
# First, we read the JSON file `issues.json`, present in the `implementations` directory, one level up. We make use of the `read_json_file` utility function. Notice the filter being used here on `items`. The GitHub API considers all pull requests to be issues. Any pull request represented as an issue has a 'pull_request' attribute, which is used to filter them out from the issue data.
# +
items = utils.read_json_file('../issues.json')
items = [item for item in items if 'pull_request' not in item['data']]
# -
# Let's use the `compute` method to count the total number of valid issues made. First, we will do it without passing any since and until dates.
# Next, we can pass in the start and end dates as a tuple. The format would be `%Y-%m-%d`.
#
# Lets calculate the average age for all open issues first. Then, we can do it by passing a start date. Here, only those issues will be considered that were created after the start date we passed via the variable `date_since`.
#
# While printing the output, we will keep the precision to only two decimals.
# +
date_since = datetime.strptime("2018-09-07", "%Y-%m-%d")
open_issue_age = OpenIssueAge(items)
print("The average age of all open issues is {:.2f} days."
.format(open_issue_age.compute()))
open_issue_age_interval = OpenIssueAge(items, (date_since, None))
print("The average age of open issues created after 2018-09-07 is {:.2f} days."
.format(open_issue_age_interval.compute()))
# -
# ## Average age of open issues over time
# Using the `timeseries` method, we can see how the average age of open issues changes on every periodic interval of time. This interval can be a week, a month or any other valid pandas timeseries period. This kind of analysis is useful in finding trends over time, as we will see in the cell below.
#
# Lets use the `open_issue_age_interval` object we created above, where we passed an opening date to the object via `date_since`.
weekly_df = open_issue_age_interval.time_series('W')
# Lets see what the dataframe returned by `timeseries` looks like. As you will notice, the dataframe has rows corresponding to each and every week between the start and end dates. To do this, we simply set the `created_date` column of the DataFrame `open_issue_age_interval.df`, as its index and then `resample` it to whatever time period we need. In this case, we have used `W`.
weekly_df
# Lets plot the dataframe `weekly_df` using matplotlib.pyplot. We use the `seaborn` theme and plot a simple line plot --- average open issue age vs time interval. Using the `plt.fill_between` method allows us to "fill up" the area between the line plot and the x axis.
plt.style.use('seaborn')
weekly_df.plot(y='open_issue_age', use_index=True, marker='.')
plt.fill_between(y1=weekly_df['open_issue_age'], y2=0, x=weekly_df.index)
plt.title("Open Issue Age Timeseries");
# The lows in the graph above mean that either no issues were created that week, or that all created issues were closed.
# Lets try and do the same thing, but this time, we'll do it on a monthy basis.
monthly_df = open_issue_age_interval.time_series('M')
monthly_df
# As predicted, the `monthly_df` dataframe has significantly lesser rows than the dataframe we just observed --- `weekly_df`. Each row corresponds to a month between the `since` and `until` finish dates we passed to while instantiating the `OpenIssueAge` class.
#
# Lets do one last thing: plot the graph for `monthly_df`.
plt.style.use('seaborn')
monthly_df.plot(y='open_issue_age', use_index=True, marker='.')
plt.title("Average Open Issue Age");
| implementations/notebooks_df/open_issue_age.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Clean Slate: Estimating offenses eligible for expungement under varying conditions
# > Prepared by [<NAME>](https://github.com/laurafeeney) for Code for Boston's [Clean Slate project](https://github.com/codeforboston/clean-slate).
#
# ## Purpose & Notes
# Clean and merge Middlesex DA data with expungement eligibility data.
# This data was sourced from the Middlesex DA website: https://www.middlesexda.com/public-information/pages/prosecution-data-and-statistics
#
# Description from website: "The following is data from our Damion Case Management System pertaining to prosecution statistics for the time period from January 1, 2014, through January 1, 2020."
#
# The download is available as an Excel file. Opening excel in Python was too slow, so I manually converted it to csv, and imported via csv.
#
# Note: This did not have age or DOB.
#
# The Middlesex DA site says this should be prosecutions for 2014 - 2019. However, not all offense dates nor all disposition dates are within this timeline.
#
# ### Merging
# The Charges have a slightly different format, character set, and phrasing of charges than the Suffolk and NW data, which we obtained first. Will need some manual updating to get expungment info for all charges.
#
# ### Still to do
# * clean up sex and murder columns -- needs to be filled in for the unmatched from PCD / MCL / extra info.
#
# -----
# +
import pandas as pd
pd.set_option("display.max_rows", 200)
import numpy as np
import regex as re
import glob, os
import datetime
from datetime import date
#print(os.getcwd())
os.chdir("../../data/raw")
#print(os.getcwd())
# -
#ms_raw = pd.read_excel('damion_database_2014-2019_6.30.xlsx') # too slow to do this way
ms_raw = pd.read_csv('damion_database_2014-2019_6.30.csv')
columns = ['Case Number', 'Offense Date', 'Date of Filing', 'Court Location',
'Charge/Crime Code', 'Charge/Crime Description', 'Charge/Crime Type',
'Disposition Description', 'Disposition Date']
ms = ms_raw[columns].replace()
ms.head()
# ### Cleaning and variable prep
# +
ms.rename(columns={"Charge/Crime Description":"Charge"}, inplace=True)
# Label CMR offenses (Code of Mass Regulations)
ms['CMRoffense'] = False
ms.loc[ms['Charge'].str.contains("CMR"), 'CMRoffense'] = True
#Extract Chapter, Section, and Paragrah (I think the third one would be paragraph? It isn't always populated)
chsec = ms['Charge/Crime Code'].str.split("/", n = 2, expand = True)
ms['Chapter'] = chsec[0]
ms['Section'] = chsec[1]
ms['Paragraph'] = chsec[2]
# Remove weird A character, and create a version with no spaces and no extra characters. This file has different spacing
# than NW and Suffolk or Master Crime List descriptions.
ms['Charge_noA'] = ms['Charge'].map(lambda x: x.replace('Â',""))
ms['Charge_alnum'] = ms['Charge_noA'].str.replace(r'\W+', '', )
ms['Charge_alnum2'] = ms['Charge_alnum'].str.replace("[^a-zA-Z0-9 -]","",)
# Proxy for age -- using a juvenile court
ms['JuvenileC'] = False
ms.loc[ms['Court Location'].str.contains("JU"), 'JuvenileC'] = True
# -
# ### dates. Supposed to be 2014-2019
# +
reference_date = datetime.date(2020, 9, 1) # using "today.date() wouldn't be stable"
ms['Offense Date'] = pd.to_datetime(ms['Offense Date'], errors='coerce').dt.date
import math
ms['years_since_offense'] = (reference_date - ms['Offense Date'])/pd.Timedelta(1, 'D')/365
ms['years_since_offense'] = ms['years_since_offense'].fillna(999).astype(int)
ms['years_since_offense'].loc[ms['years_since_offense']<999].describe()
# +
print("The earliest offense date is: ", min(ms['Offense Date']))
print("The max offense date is: ", max(ms['Offense Date']), "\n")
print("Distribution of years since offense:", "\n", ms['years_since_offense'].describe(), "\n")
before_2013 = ms['Case Number'][ms['Offense Date']<datetime.date(2013,1,1)].nunique()
before_2014 = ms['Case Number'][ms['Offense Date']<datetime.date(2014,1,1)].nunique()
after_2014 = ms['Case Number'][ms['Offense Date']>=datetime.date(2014,1,1)].nunique()
after_2013 = ms['Case Number'][ms['Offense Date']>=datetime.date(2013,1,1)].nunique()
print("There are", before_2014, "cases with offense date prior to Jan 1, 2014",
"and", before_2013, "cases before 2013")
print("Percent of cases before 2014:",round(before_2014*100/after_2014,2), "\n"
"Percent before 2013:", round(before_2013*100/after_2013,2))
# +
#ms.to_csv('../../data/raw/ms.csv', index=False)
# msc -- Charges with value counts
msc = ms['Charge'].value_counts().rename_axis('Charge').reset_index(name='Nvalues')
msc['Charge_noA'] = msc['Charge'].map(lambda x: x.replace('Â',""))
msc['Charge_alnum'] = msc['Charge_noA'].str.replace(r'\W+', '', )
# -
# ### Datasets with expungement info
# These were created from the Suffolk and NW data, in for example [MA_Data-1_Raw.ipynb](https://github.com/codeforboston/clean-slate/blob/master/analyses/notebooks/MA_Data-1_Raw.ipynb).
# This follows a similar process as [MA_Data-2_MergeCharges_alt.ipynb](https://github.com/codeforboston/clean-slate/blob/master/analyses/notebooks/MA_Data-2_MergeCharges_alt.ipynb).
#
# Prosecution charges detailed is the result of matching the NW and Suffolk charges with each other, and then matching that with the [Master Crime List](https://drive.google.com/file/d/11iD3ilejUW28NE6DdUaUkkp3PoPauhCj/view?usp=sharing), then manually filling in missing expungeabiliyt information, and rationalizing duplicate rows. Duplicate rows occurred because within a chapter and section, some charges may be expungeable while others may not.
#
# +
#read in procesuction charges detailed file
PCD = pd.read_csv('../../data/processed/prosecution_charges_detailed.csv', encoding='cp1252')
PCD.rename(columns={"Expungeable.":"Expungeable"}, inplace=True)
columns = ['Charge', 'Chapter', 'Section', 'Expungeable', 'sex', 'murder', 'extra_criteria']
PCD = PCD[columns]
PCD['Charge_noA'] = PCD['Charge'].map(lambda x: x.replace('Â',""))
PCD['Charge_alnum'] = PCD['Charge_noA'].str.replace(r'\W+', '', )
#PCD.info()
#read in additional info that fills in blanks from PCD. This was manually done by cross-referncing with MCL
# Then we confirmed with Sana for things that weren't obvious.
addtl_exp = pd.read_csv('../../data/raw/missing_expungeability_08-02.csv', encoding='cp1252')
addtl_exp.rename(columns={"Expungeable.":"Expungeable"}, inplace=True)
columns = ['Charge', 'Expungeable',
'Reason not expungeable', 'Analysis notes']
addtl_exp = addtl_exp[columns]
addtl_exp['Expungeable'].replace({'yes': 'Yes', 'no': 'No', 'na': 'NA--CMR'}, inplace=True)
# merge these two expungement info datasets
exp = PCD.merge(addtl_exp, on='Charge', how='left')
def replace_values_merge(df, new, x, y):
df[new] = df[x]
df[new].fillna(df[y], inplace=True)
df.drop([x,y], axis=1, inplace=True)
#print(df[new].value_counts(dropna=False))
replace_values_merge(exp, "Expungeable", "Expungeable_x", "Expungeable_y")
exp['CMRoffense'] = False
exp.loc[exp['Charge'].str.contains("CMR"), 'CMRoffense'] = True
exp.loc[(exp['Expungeable'] == "Yes") & (exp['CMRoffense'] == True), ['Expungeable']] = "NotApplicable"
exp.loc[(exp['Charge'] == "BURGLARY, UNARMED & ASSAULT c266 §14"),['Expungeable']] = "No" # not expungealbe
exp.loc[(exp['Charge'] == "ATTEMPT TO COMMIT CRIME c274 §6"),['Expungeable']] = "Attempt" # need more info
exp.loc[(exp['Charge'] == "ATTEMPT TO COMMIT CRIME, HABITUAL c274 §6"),['Expungeable']] = "Attempt" # need more info
exp.info()
# +
# Merge data on expungement. Unique offenses.
ms_merged = msc.merge(exp, on='Charge_alnum', how='left', indicator = True)
print(ms_merged._merge.value_counts())
ms_merged['unmatched'] = ms_merged['_merge']=="left_only"
replace_values_merge(ms_merged, "Charge", "Charge_x", "Charge_y")
ms_merged = ms_merged.drop(columns = ['Charge_noA_x', 'Charge_noA_y',
'_merge'])
ms_merged.info()
# +
#indicate CMRs
ms_merged.loc[ms_merged['Charge'].str.contains("CMR"), 'CMRoffense'] = 'yes'
ms_merged.CMRoffense.fillna("no", inplace=True)
ms_merged.loc[ms_merged['CMRoffense'] == "yes", ['Expungeable']] = "NA - CMR"
#drop unneeded columns and save list of charges without expungement info
columns = ['Charge', 'Charge_alnum', 'Nvalues', 'Chapter', 'Section',
'Expungeable', 'CMRoffense', 'unmatched']
ms_unmatched = ms_merged[columns].loc[ (ms_merged['unmatched']==True) &
ms_merged['Expungeable'].isnull()]
#ms_unmatched.to_csv('../../data/raw/unmatched_middlesex_to_clean.csv', index=True)
ms_merged.info()
# -
# Data exported to csv, saved in the google drive, and farmed out to the team to compare against the Master Crime List to fill in missing expungement information.
ms_new_data = pd.read_csv('../../data/raw/unmatched_middlesex_clean.csv')
# +
ms_new_data['Charge_alnum2'] = ms_new_data['Charge_alnum'].str.replace("[^a-zA-Z0-9 -]","",)
ms_new_data = ms_new_data.drop(columns = ['Charge', "Charge_alnum", 'Nvalues', 'Group #', 'Name of updater', 'Index'])
ms_merged['Charge_alnum2'] = ms_merged['Charge_alnum'].str.replace("[^a-zA-Z0-9 -]","",)
ms_merged_full = ms_merged.merge(ms_new_data, on='Charge_alnum2', how='outer', indicator=True)
print(ms_merged_full._merge.value_counts(dropna=False))
ms_merged_full.drop(['_merge'], axis=1, inplace=True)
# +
replace_values_merge(ms_merged_full, "Expungeable", "Expungeable_x", "Expungeable_y")
replace_values_merge(ms_merged_full, "Reason not expungeable", "Reason not expungeable_x", "Reason not expungeable_y")
ms_merged_full = ms_merged_full.drop(columns = ['Charge', "Charge_alnum", 'Chapter', 'Section', 'Nvalues', 'CMRoffense'])
# -
ms_merged_charges = ms.merge(ms_merged_full, on='Charge_alnum2', how='outer', indicator=True)
print(ms_merged_charges['_merge'].value_counts())
ms_merged_charges.drop(['_merge'], axis=1, inplace=True)
columns = ['Case Number', 'Offense Date', 'Court Location', 'Charge', 'Charge/Crime Type',
'Disposition Description', 'CMRoffense', 'Chapter', 'Section', 'Paragraph', 'JuvenileC',
'years_since_offense', 'sex', 'murder', 'Expungeable']
ms_merged_charges = ms_merged_charges[columns]
ms_merged_charges.info()
ms_merged_charges.to_csv('../../data/processed/merged_ms.csv', index=False)
| notebooks/Middlesex_Clean.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # for Loops
#
# A <code>for</code> loop acts as an iterator in Python; it goes through items that are in a *sequence* or any other iterable item. Objects that we've learned about that we can iterate over include strings, lists, tuples, and even built-in iterables for dictionaries, such as keys or values.
#
# We've already seen the <code>for</code> statement a little bit in past lectures but now let's formalize our understanding.
#
# Here's the general format for a <code>for</code> loop in Python:
#
# for item in object:
# statements to do stuff
#
# The variable name used for the item is completely up to the coder, so use your best judgment for choosing a name that makes sense and you will be able to understand when revisiting your code. This item name can then be referenced inside your loop, for example if you wanted to use <code>if</code> statements to perform checks.
#
# Let's go ahead and work through several example of <code>for</code> loops using a variety of data object types. We'll start simple and build more complexity later on.
#
# ## Example 1
# Iterating through a list
# We'll learn how to automate this sort of list in the next lecture
list1 = [1,2,3,4,5,6,7,8,9,10]
for num in list1:
print(num)
# Great! Hopefully this makes sense. Now let's add an <code>if</code> statement to check for even numbers. We'll first introduce a new concept here--the modulo.
# ### Modulo
# The modulo allows us to get the remainder in a division and uses the % symbol. For example:
17 % 5
# This makes sense since 17 divided by 5 is 3 remainder 2. Let's see a few more quick examples:
# 3 Remainder 1
10 % 3
# 2 Remainder 4
18 % 7
# 2 no remainder
4 % 2
# Notice that if a number is fully divisible with no remainder, the result of the modulo call is 0. We can use this to test for even numbers, since if a number modulo 2 is equal to 0, that means it is an even number!
#
# Back to the <code>for</code> loops!
#
# ## Example 2
# Let's print only the even numbers from that list!
for num in list1:
if num % 2 == 0:
print(num)
# We could have also put an <code>else</code> statement in there:
for num in list1:
if num % 2 == 0:
print(num)
else:
print('Odd number')
# ## Example 3
# Another common idea during a <code>for</code> loop is keeping some sort of running tally during multiple loops. For example, let's create a <code>for</code> loop that sums up the list:
# +
# Start sum at zero
list_sum = 0
for num in list1:
list_sum = list_sum + num
print(list_sum)
# -
# Great! Read over the above cell and make sure you understand fully what is going on. Also we could have implemented a <code>+=</code> to perform the addition towards the sum. For example:
# +
# Start sum at zero
list_sum = 0
for num in list1:
list_sum += num
print(list_sum)
# -
# ## Example 4
# We've used <code>for</code> loops with lists, how about with strings? Remember strings are a sequence so when we iterate through them we will be accessing each item in that string.
for letter in 'This is a string.':
print(letter)
# ## Example 5
# Let's now look at how a <code>for</code> loop can be used with a tuple:
# +
tup = (1,2,3,4,5)
for t in tup:
print(t)
# -
# ## Example 6
# Tuples have a special quality when it comes to <code>for</code> loops. If you are iterating through a sequence that contains tuples, the item can actually be the tuple itself, this is an example of *tuple unpacking*. During the <code>for</code> loop we will be unpacking the tuple inside of a sequence and we can access the individual items inside that tuple!
list2 = [(2,4),(6,8),(10,12)]
for tup in list2:
print(tup)
# Now with unpacking!
for (t1,t2) in list2:
print(t1)
# Cool! With tuples in a sequence we can access the items inside of them through unpacking! The reason this is important is because many objects will deliver their iterables through tuples. Let's start exploring iterating through Dictionaries to explore this further!
# ## Example 7
d = {'k1':1,'k2':2,'k3':3}
for item in d:
print(item)
# Notice how this produces only the keys. So how can we get the values? Or both the keys and the values?
#
# We're going to introduce three new Dictionary methods: **.keys()**, **.values()** and **.items()**
#
# In Python each of these methods return a *dictionary view object*. It supports operations like membership test and iteration, but its contents are not independent of the original dictionary – it is only a view. Let's see it in action:
# Create a dictionary view object
d.items()
# Since the .items() method supports iteration, we can perform *dictionary unpacking* to separate keys and values just as we did in the previous examples.
# Dictionary unpacking
for k,v in d.items():
print(k)
print(v)
# If you want to obtain a true list of keys, values, or key/value tuples, you can *cast* the view as a list:
list(d.keys())
# Remember that dictionaries are unordered, and that keys and values come back in arbitrary order. You can obtain a sorted list using sorted():
sorted(d.values())
# ## Conclusion
#
# We've learned how to use for loops to iterate through tuples, lists, strings, and dictionaries. It will be an important tool for us, so make sure you know it well and understood the above examples.
#
# [More resources](http://www.tutorialspoint.com/python/python_for_loop.htm)
| Exercise py/03-for Loops.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.7.3 64-bit
# language: python
# name: python37364bit31dfbc0e68ab47c2be432e09052b515f
# ---
# +
import pickle
import numpy as np
import pandas as pd
with open('history.pkl', 'rb') as f:
df_list = pickle.load(f)
print(f'Stage 0: {len(df_list)} items')
df_adj_close = pd.DataFrame()
i = 0
for df in df_list:
df_adj_close[f'close_{i}'] = df['Adj Close']
i = i + 1
# df_adj_close
# let's diff it.
df_adj_close = df_adj_close.fillna(0)
df2 = df_adj_close.diff()
# drop first row
df2 = df2[1:]
df2 = np.sign(df2)
# +
import yfinance as yf
start_date = '2018-12-22'
end_date = '2021-12-24'
i = 0
dax = yf.download('^GDAXI',
start=start_date,
end=end_date,
progress=False)
# -
# a - d / (a+d)
ad_ratio = df2.sum(axis=1) /df2.abs().sum(axis=1)
ad_ratio[700:]
dax['ad_ratio'] = ad_ratio
x1 = dax[['Adj Close','ad_ratio']]
import matplotlib as plt
plt.rcParams['figure.figsize'] = [25, 10]
x1['Adj Close'].plot()
plt.pyplot.figure()
x1['ad_ratio'].plot()
| advance_decline_ratio.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/diegoalmanzaxd/daa_2021_1/blob/master/26_Octubre.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="47ARhT22mkGW" outputId="66a96425-d264-4ccf-c59c-c97884044575" colab={"base_uri": "https://localhost:8080/", "height": 34}
def ejemplo11( n ):
count = 0
i = n
while i > 1 :
count += 1
i = i // 2
return count
print(ejemplo11(16))
# T(n) = 2 + (2 Log 2 n)
# + id="1RXi6wrvmp1x" outputId="83062095-40eb-4c43-b678-161bda009fd3" colab={"base_uri": "https://localhost:8080/", "height": 34}
def ejemplo12( n ):
contador = 0
for x in range(n):
contador += ejemplo11(x)
return contador
ejemplo12(10)
# + id="xWg1-lY0mqO9" outputId="054a5a4b-e3b4-4bbc-ecd6-621bd0da50dc" colab={"base_uri": "https://localhost:8080/", "height": 34}
def ejemplo12_bis( n=5 ):
contador = 0
contador = contador + ejemplo11(0) # 0
contador = contador + ejemplo11(1) # 0
contador = contador + ejemplo11(2) # 1
contador = contador + ejemplo11(3) # 1
contador = contador + ejemplo11(4) # 2
return contador
ejemplo12_bis( 5 )
# + id="xg2WIaFxmqme" outputId="36d11a53-beb6-4f41-cfc5-6de42eb2a3a8" colab={"base_uri": "https://localhost:8080/", "height": 85}
def ejemplo13( x ):
bandera = x
contador = 0
while( bandera >= 10):
print(f" x = { bandera } ")
bandera /= 10
contador = contador + 1
print(contador)
# T(x) = log10 x +1
ejemplo13( 1000 )
# + id="WVybKQAomrbW" outputId="f2621952-11a5-4bc7-f54f-b380d01a9529" colab={"base_uri": "https://localhost:8080/", "height": 34}
def ejemplo14( n ):
y = n
z = n
contador = 0
while y >= 3: #3
y /= 3 # 1
contador += 1 # cont =3
while z >= 3: #27
z /= 3
contador += 1
return contador
print(ejemplo14( 27 ))
# + id="HqS9vGWZo12N" outputId="cf36f155-8bcf-43ae-f5da-334146b61a65" colab={"base_uri": "https://localhost:8080/", "height": 34}
def ejemplo15( n ):
contador = 0
for i in range( n ) :
for j in range( n ) :
contador += 1
while n > 1 :
contador += 1
n /= 2
return contador
print(ejemplo15(10))
| 26_Octubre.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# ### Model Interpretation for Pretrained ImageNet Model using RISE
#
# This notebook demonstrates how to apply RISE algorithms on pretrained ImageNet model using a car image and visualizes the attributions for each pixel/super-pixel by displaying them on the image.<br>
#
# RISE is short for Randomized Input Sampling for Explanation of Black-box Models. It estimates importance empirically by probing the model with randomly masked versions of the input image and obtaining the corresponding outputs.<br>
#
# More details about this method can be found in the paper https://arxiv.org/abs/1806.07421.<br>
#
import warnings
warnings.filterwarnings('ignore') # disable warnings relateds to versions of tf
import numpy as np
# keras model and preprocessing tools
from tensorflow.keras.applications.resnet50 import ResNet50, preprocess_input, decode_predictions
from keras import backend as K
from keras.preprocessing import image
# dianna library for explanation
import dianna
from dianna import visualization
# for plotting
# %matplotlib inline
from matplotlib import pyplot as plt
# #### 1 - Loading the model and the dataset
# Loads pretrained ImageNet model and the image to be explained.
# Initialize the pretrained model.
class Model():
def __init__(self):
K.set_learning_phase(0)
self.model = ResNet50()
self.input_size = (224, 224)
def run_on_batch(self, x):
return self.model.predict(x)
model = Model()
# Load and preprocess image.
def load_img(path):
img = image.load_img(path, target_size=model.input_size)
x = image.img_to_array(img)
x = preprocess_input(x)
return img, x
# Call the function to load an image of a single instance in the test data from the `img` folder.
img, x = load_img('./img/bee.jpg')
plt.imshow(img)
# #### 2 - Compute attributions and visualize the saliency
# Compute attributions using RISE and visualize the saliency on the image.
# RISE masks random portions of the input image and passes this image through the model — the mask that damages accuracy the most is the most “important” portion.<br>
# To call the explainer and generate saliency map, the user need to specified the number of masks being randomly generated (`n_masks`), the resolution of features in masks (`feature_res`) and for each mask and each feature in the image, the probability of being kept unmasked (`p_keep`).
heatmaps = dianna.explain_image(model.run_on_batch, x, method="RISE",
labels=[i for i in range(1000)],
n_masks=1000, feature_res=6, p_keep=.1,
axis_labels={2: 'channels'})
# Make predictions and select the top prediction.
#
# +
def class_name(idx):
return decode_predictions(np.eye(1, 1000, idx))[0][0][1]
# print the name of predicted class, taking care of adding a batch axis to the model input
class_name(np.argmax(model.model.predict(x[None, ...])))
# -
# Visualize the saliency map on the image for the predicted class.
# + tags=[]
class_idx = np.argmax(model.model.predict(x[None, ...]))
print(f'Explanation for `{class_name(class_idx)}`')
visualization.plot_image(heatmaps[class_idx], image.img_to_array(img)/255., heatmap_cmap='jet')
# -
# #### 3 - Conclusions
# These saliency maps are generated by passing multiple randomly masked input to the black-box model and averaging their importance scores. The idea behind this is that whenever a mask preserves important parts of the image it gets higher score. <br>
#
# The example here shows that the RISE method evaluates the importance of each pixel/super pixel to the classification. Pixels depicting the sports car are highlighted by the XAI approach, which implies how the model classifies the item in the image. The results are reasonable based on human visual preception of the testing image.
| tutorials/rise_imagenet.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
import keras.backend as K
K.set_image_dim_ordering('th')
import numpy as np
np.random.seed(123)
# Next, we'll import the Sequential model type from Keras. This is simply a linear stack of neural network layers, and it's perfect for the type of feed-forward CNN we're building in this tutorial.
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.utils import np_utils
from keras.datasets import mnist
#load pre-shffuled MNIST data into train and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()
print(X_train.shape)
from matplotlib import pyplot as plt
plt.imshow(X_train[0])
X_train = X_train.reshape(X_train.shape[0],1,28,28)
X_test = X_test.reshape(X_test.shape[0], 1, 28, 28)
print(X_train.shape)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
print(y_train.shape)
print(y_train[:10])
Y_train=np_utils.to_categorical(y_train, 10)
Y_test = np_utils.to_categorical(y_test, 10)
print(Y_train.shape)
# Step 7: Define model architecture.
# Now we're ready to define our model architecture. In actual R&D work, researchers will spend a considerable amount of time studying model architectures.
#
# To keep this tutorial moving along, we're not going to discuss the theory or math here. This alone is a rich and meaty field, and we recommend the CS231n class mentioned earlier for those who want to learn more.
#
# Plus, when you're just starting out, you can just replicate proven architectures from academic papers or use existing examples. Here's a list of example implementations in Keras.
#
# Let's start by declaring a sequential model format:
model = Sequential()
model.add(Convolution2D(32, 3, 3, activation='relu', input_shape=(1,28,28)))
print(model.output_shape)
model.add(Convolution2D(32, 3, 3, activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(10, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))
# +
model = Sequential()
model.add(Convolution2D(32, 3, 3, activation='relu', input_shape=(1,28,28)))
model.add(Convolution2D(32, 3, 3, activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))
# -
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
model.fit(X_train, Y_train,
batch_size=32, nb_epoch=10, verbose=1)
score = model.evaluate(X_test, Y_test, verbose=0)
| kerasTutorial/FirstMNISTProgram.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="EKOTlwcmxmej"
# # DeBERTa Fine-Tuning on CoLA with SMART and SiFT
#
# This notebook was orginally created by <NAME> and <NAME>. We made changes for SiFT and SMART, as well as our custom BERT/DeBERTa class.
# + [markdown] id="jJKaoairpdRa"
# ##Data and Importing Modules
# + id="DEfSbAA4QHas" colab={"base_uri": "https://localhost:8080/"} outputId="8b6beb69-f4a7-4aa1-efee-d88a5b7bd8c7"
import tensorflow as tf
# Get the GPU device name.
device_name = tf.test.gpu_device_name()
# The device name should look like the following:
if device_name == '/device:GPU:0':
print('Found GPU at: {}'.format(device_name))
else:
raise SystemError('GPU device not found')
# + id="oYsV4H8fCpZ-" colab={"base_uri": "https://localhost:8080/"} outputId="48040675-d522-46a3-c07d-7d26d2059f37"
import torch
# If there's a GPU available...
if torch.cuda.is_available():
# Tell PyTorch to use the GPU.
device = torch.device("cuda")
print('There are %d GPU(s) available.' % torch.cuda.device_count())
print('We will use the GPU:', torch.cuda.get_device_name(0))
# If not...
else:
print('No GPU available, using the CPU instead.')
device = torch.device("cpu")
# + id="0NmMdkZO8R6q" colab={"base_uri": "https://localhost:8080/"} outputId="feed803f-b098-4780-c7ed-b64307526f26"
# !pip install transformers
# + id="5m6AnuFv0QXQ" colab={"base_uri": "https://localhost:8080/"} outputId="316f7b60-3091-4b7b-b9e9-234dc74c4218"
# !pip install wget
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="FBXpjYJ0iHcp" outputId="a0a3aed1-3d8d-4b78-9659-47806d2a1473"
import pandas as pd
import requests
import io
url = "https://raw.githubusercontent.com/wangluheng328/SiFT-Project/main/Data/fourth.csv"
download = requests.get(url).content
df = pd.read_csv(io.StringIO(download.decode('utf-8')),index_col=0)
df.head()
# + id="B5K7JlQ-BPts"
df1 = df[df['label']==1]
df0 = df[df['label']==0]
df0 = df0[:2200]
df_ori = pd.concat([df0, df1], axis = 0, join = 'inner')
df_ori = df_ori.sample(frac = 1).reset_index(drop = True)
df = df_ori[:2000]
# + colab={"base_uri": "https://localhost:8080/", "height": 0} id="MvfL3MLjBsqb" outputId="082a163c-bd59-4d8b-8565-e52ebd552e5d"
df.head()
# + id="tOPpgw4Gi6hc"
sentences = df.tweet.values
labels = df.label.values
# + colab={"base_uri": "https://localhost:8080/"} id="0RAaRTCcDCOA" outputId="e74e3ed5-f1a4-4257-dcbb-d04445a0a497"
labels.sum()
# + [markdown] id="EFSJzwI5pujc"
# ## Tokenization and DataLoader
# + id="Z474sSC6oe7A" colab={"base_uri": "https://localhost:8080/"} outputId="8445c84b-80d2-43be-ff04-20ee1c558d87"
from transformers import DebertaTokenizer
print('Loading DeBERTa tokenizer...')
tokenizer = DebertaTokenizer.from_pretrained('microsoft/deberta-base', do_lower_case=True)
# + id="dLIbudgfh6F0" colab={"base_uri": "https://localhost:8080/"} outputId="018ac02c-8ca2-4c72-855c-b1e831f9bcdb"
print(' Original: ', sentences[0])
print('Tokenized: ', tokenizer.tokenize(sentences[0]))
print('Token IDs: ', tokenizer.convert_tokens_to_ids(tokenizer.tokenize(sentences[0])))
# + id="cKsH2sU0OCQA" colab={"base_uri": "https://localhost:8080/"} outputId="d250a792-baaa-46e0-c15c-022ffacb1b88"
max_len = 0
for sent in sentences:
input_ids = tokenizer.encode(sent, add_special_tokens=True)
max_len = max(max_len, len(input_ids))
print('Max sentence length: ', max_len)
# + id="2bBdb3pt8LuQ" colab={"base_uri": "https://localhost:8080/"} outputId="aa53772a-0444-449f-f89a-1313426aa85c"
input_ids = []
attention_masks = []
for sent in sentences:
encoded_dict = tokenizer.encode_plus(
sent,
add_special_tokens = True,
max_length = 256,
pad_to_max_length = True,
return_attention_mask = True,
return_tensors = 'pt',
)
input_ids.append(encoded_dict['input_ids'])
attention_masks.append(encoded_dict['attention_mask'])
input_ids = torch.cat(input_ids, dim=0)
attention_masks = torch.cat(attention_masks, dim=0)
labels = torch.tensor(labels)
print('Original: ', sentences[0])
print('Token IDs:', input_ids[0])
# + id="GEgLpFVlo1Z-" colab={"base_uri": "https://localhost:8080/"} outputId="c482dc8d-a632-45b5-fd3b-dc815aed62d1"
from torch.utils.data import TensorDataset, random_split
dataset = TensorDataset(input_ids, attention_masks, labels)
train_size = int(0.9 * len(dataset))
val_size = len(dataset) - train_size
train_dataset, val_dataset = random_split(dataset, [train_size, val_size])
print('{:>5,} training samples'.format(train_size))
print('{:>5,} validation samples'.format(val_size))
# + id="XGUqOCtgqGhP"
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
batch_size = 5
train_dataloader = DataLoader(
train_dataset,
sampler = RandomSampler(train_dataset),
batch_size = batch_size
)
validation_dataloader = DataLoader(
val_dataset,
sampler = SequentialSampler(val_dataset),
batch_size = batch_size
)
# + [markdown] id="73S4P4SMp6hX"
# ## Custom Deberta Class and Initialization
# + id="UOteWAT-Adqx"
from transformers import DebertaForSequenceClassification, AdamW, DebertaConfig, DebertaPreTrainedModel, DebertaModel
from transformers.models.deberta.modeling_deberta import *
#from transformers.modeling_outputs import BaseModelOutputWithPoolingAndCrossAttentions
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
class CustomDebertaForClassification(DebertaForSequenceClassification):
def __init__(self, config):
super().__init__(config)
#self.bert = BertForSequenceClassification(config).from_pretrained("bert-base-uncased",num_labels = 2,output_attentions = False, output_hidden_states = False)
self.embeddings = self.deberta.embeddings
self.encoder = self.deberta.encoder
self.z_steps = 0 #copied from DebertaModel source code
def embed(self, input_ids=None,
mask=None,
token_type_ids=None,
position_ids=None,
inputs_embeds=None
):
# See: BERTModel.forward
return self.embeddings(
input_ids=input_ids,
token_type_ids=token_type_ids,
position_ids=position_ids,
mask=mask,
inputs_embeds=inputs_embeds
)
def predict(self,embedding_output,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_extended_attention_mask=None,
past_key_values=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
return_dict=True):
encoder_outputs = self.encoder(
embedding_output,
attention_mask,
output_hidden_states=True,
output_attentions=output_attentions,
return_dict=return_dict
)
encoded_layers = encoder_outputs[1]
if self.z_steps > 1:
hidden_states = encoded_layers[-2]
layers = [self.encoder.layer[-1] for _ in range(self.z_steps)]
query_states = encoded_layers[-1]
rel_embeddings = self.encoder.get_rel_embedding()
attention_mask = self.encoder.get_attention_mask(attention_mask)
rel_pos = self.encoder.get_rel_pos(embedding_output)
for layer in layers[1:]:
query_states = layer(
hidden_states,
attention_mask,
return_att=False,
query_states=query_states,
relative_pos=rel_pos,
rel_embeddings=rel_embeddings,
)
encoded_layers.append(query_states)
sequence_output = encoded_layers[-1]
# if not return_dict:
# return (sequence_output,) + encoder_outputs[(1 if output_hidden_states else 2) :]
outputs = BaseModelOutput(
last_hidden_state=sequence_output,
hidden_states=encoder_outputs.hidden_states if output_hidden_states else None,
attentions=encoder_outputs.attentions,
)
pooled_output = self.pooler(outputs[0])
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
return logits
# + colab={"base_uri": "https://localhost:8080/"} id="IdNBO5qk2-i_" outputId="e0de005c-0e97-4ca1-e7a8-49079bfa7674"
#@title
model = CustomDebertaForClassification.from_pretrained(
"microsoft/deberta-base",
num_labels = 2,
output_attentions = False,
output_hidden_states = False,
)
model.cuda()
# + [markdown] id="hmSpMRD5qaqE"
# ##Noise Function
# + id="pG5DszcpDAjw"
from torch.nn import LayerNorm
import torch.nn.functional as F
def normalize_embed(embed):
embed_mean = torch.mean(embed,dim=(1,2))
embed_std = torch.std(embed, dim=(1,2))
embed_clone = torch.clone(embed)
for i in range(0,embed_clone.size()[0]):
# embed_clone[i] = torch.div(torch.sub(embed_clone[i],embed_mean[i]),embed_std[i])
embed_clone[i] = (embed_clone[i] - embed_mean[i]) / embed_std[i]
return embed_clone, embed_mean, embed_std
def denormalize_embed(embed, embed_mean, embed_std):
for i in range(0,embed.size()[0]):
# embed[i] = (embed[i] - embed_mean[i]) / embed_std[i]
embed[i] = (embed[i] * embed_std[i]) + embed_mean[i]
return embed
def stable_kl(logit, target, epsilon=1e-6, reduce=True):
logit = logit.view(-1, logit.size(-1)).float()
target = target.view(-1, target.size(-1)).float()
bs = logit.size(0)
p = F.log_softmax(logit, 1).exp()
y = F.log_softmax(target, 1).exp()
rp = -(1.0/(p + epsilon) -1 + epsilon).detach().log()
ry = -(1.0/(y + epsilon) -1 + epsilon).detach().log()
if reduce:
return (p* (rp- ry) * 2).sum() / bs
else:
return (p* (rp- ry) * 2).sum()
def _norm_grad(grad, epsilon = 1e-6, eff_grad=None, sentence_level=False):
if sentence_level:
direction = grad / (grad.abs().max((-2, -1), keepdim=True)[0] + epsilon)
else:
direction = grad / (grad.abs().max(-1, keepdim=True)[0] + epsilon)
eff_direction = eff_grad / (grad.abs().max(-1, keepdim=True)[0] + epsilon)
return direction, eff_direction
def noise(embed, model, attention_mask, step_size, normalize=False, k=1, mean=0, std=0.01):
if normalize == True:
# LNorm = LayerNorm(embed.size(),elementwise_affine=False)
# normalized_embed = LNorm(embed)
normalized_embed, embed_mean, embed_std = normalize_embed(embed)
logits = model.predict(normalized_embed,attention_mask)
noise = torch.normal(mean=0, std=0.01,size=(normalized_embed.size()[0],normalized_embed.size()[1],normalized_embed.size()[2]))
noise = noise.to(device)
noise.requires_grad_()
noised_normalized_embeddings = normalized_embed+noise
adv_logits = model.predict(noised_normalized_embeddings, attention_mask)
adv_loss = stable_kl(adv_logits, logits.detach(), reduce=False)
delta_grad, = torch.autograd.grad(adv_loss, noise, only_inputs=True, retain_graph=False)
norm = delta_grad.norm()
# if (torch.isnan(norm) or torch.isinf(norm)):
# return 0
eff_delta_grad = delta_grad * step_size
delta_grad = noise + delta_grad * step_size
noise, eff_noise = _norm_grad(delta_grad, eff_grad=eff_delta_grad, sentence_level=0)
noise = noise.detach()
noised_normalized_embeddings = normalized_embed+noise
denormalize_noised_embed = denormalize_embed(noised_normalized_embeddings,embed_mean, embed_std)
return denormalize_noised_embed
else:
logits = model.predict(embed,attention_mask)
noise = torch.normal(mean=0, std=0.01,size=(embed.size()[0],embed.size()[1],embed.size()[2]))
noise = noise.to(device)
noise.requires_grad_()
noised_embeddings = embed+noise
adv_logits = model.predict(noised_embeddings, attention_mask)
adv_loss = stable_kl(adv_logits, logits.detach(), reduce=False)
delta_grad, = torch.autograd.grad(adv_loss, noise, only_inputs=True, retain_graph=False)
norm = delta_grad.norm()
# if (torch.isnan(norm) or torch.isinf(norm)):
# return 0
eff_delta_grad = delta_grad * step_size
delta_grad = noise + delta_grad * step_size
noise, eff_noise = _norm_grad(delta_grad, eff_grad=eff_delta_grad, sentence_level=0)
noise = noise.detach()
noised_embeddings = embed+noise
return noised_embeddings
# + [markdown] id="bunW4qF4qSyZ"
# ## Optimizer, Scheduler, and Some Other Training Prep
# + id="GLs72DuMODJO"
#@title
optimizer = AdamW(model.parameters(),
lr = 2e-5,
eps = 1e-8
)
# + id="-p0upAhhRiIx"
#@title
from transformers import get_linear_schedule_with_warmup
epochs = 3
total_steps = len(train_dataloader) * epochs
scheduler = get_linear_schedule_with_warmup(optimizer,
num_warmup_steps = 0,
num_training_steps = total_steps
)
# + id="9cQNvaZ9bnyy"
#@title
import numpy as np
def flat_accuracy(preds, labels):
pred_flat = np.argmax(preds, axis=1).flatten()
labels_flat = labels.flatten()
return np.sum(pred_flat == labels_flat) / len(labels_flat)
# + id="gpt6tR83keZD"
#@title
import time
import datetime
def format_time(elapsed):
elapsed_rounded = int(round((elapsed)))
return str(datetime.timedelta(seconds=elapsed_rounded))
# + id="ScjvBSBfHtBc"
MODE = "SMART-adv-only"
# + [markdown] id="mCSpuOXLqor-"
# ##Training Loop with Validation
# + id="6J-FYdx6nFE_" colab={"base_uri": "https://localhost:8080/"} outputId="72befe43-17ea-47c1-cec2-d18ea305f108"
import random
import numpy as np
seed_val = 42
random.seed(seed_val)
np.random.seed(seed_val)
torch.manual_seed(seed_val)
torch.cuda.manual_seed_all(seed_val)
training_stats = []
total_t0 = time.time()
# For each epoch...
for epoch_i in range(0, epochs):
# ========================================
# Training
# ========================================
# Perform one full pass over the training set.
print("")
print('======== Epoch {:} / {:} ========'.format(epoch_i + 1, epochs))
print('Training...')
# Measure how long the training epoch takes.
t0 = time.time()
total_train_loss = 0
model.train()
# For each batch of training data...
for step, batch in enumerate(train_dataloader):
# Progress update every 40 batches.
if step % 40 == 0 and not step == 0:
elapsed = format_time(time.time() - t0)
print(' Batch {:>5,} of {:>5,}. Elapsed: {:}.'.format(step, len(train_dataloader), elapsed))
b_input_ids = batch[0].to(device)
b_input_mask = batch[1].to(device)
b_labels = batch[2].to(device)
model.zero_grad()
embed = model.embed(input_ids = b_input_ids,mask = b_input_mask)
preds = model.predict(embedding_output = embed,attention_mask = b_input_mask)
loss_fct = CrossEntropyLoss()
regular_loss = loss_fct(preds.view(-1,2), b_labels.view(-1))
loss_list = [regular_loss]
if MODE in ["SMART-adv-only", "SIFT"]:
normalise = True if MODE == "SIFT" else False
noised_embeddings = noise(embed, model, b_input_mask, 1e-3, normalize=normalise, k=1)
adv_logits = model.predict(noised_embeddings, b_input_mask)
adv_loss = stable_kl(preds.view(-1,2), adv_logits.view(-1,2))
loss_list.append(adv_loss)
loss = sum(loss_list)
# END MODEL
total_train_loss += loss.item()
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
optimizer.step()
scheduler.step()
avg_train_loss = total_train_loss / len(train_dataloader)
training_time = format_time(time.time() - t0)
print("")
print(" Average training loss: {0:.2f}".format(avg_train_loss))
print(" Training epcoh took: {:}".format(training_time))
# ========================================
# Validation
# ========================================
# After the completion of each training epoch, measure our performance on
# our validation set.
print("")
print("Running Validation...")
t0 = time.time()
model.eval()
total_eval_accuracy = 0
total_eval_loss = 0
nb_eval_steps = 0
# Evaluate data for one epoch
for batch in validation_dataloader:
b_input_ids = batch[0].to(device)
b_input_mask = batch[1].to(device)
b_labels = batch[2].to(device)
with torch.no_grad():
result = model(b_input_ids,
token_type_ids=None,
attention_mask=b_input_mask,
labels=b_labels,
return_dict=True)
loss = result.loss
logits = result.logits
total_eval_loss += loss.item()
logits = logits.detach().cpu().numpy()
label_ids = b_labels.to('cpu').numpy()
total_eval_accuracy += flat_accuracy(logits, label_ids)
avg_val_accuracy = total_eval_accuracy / len(validation_dataloader)
print(" Accuracy: {0:.2f}".format(avg_val_accuracy))
avg_val_loss = total_eval_loss / len(validation_dataloader)
validation_time = format_time(time.time() - t0)
print(" Validation Loss: {0:.2f}".format(avg_val_loss))
print(" Validation took: {:}".format(validation_time))
training_stats.append(
{
'epoch': epoch_i + 1,
'Training Loss': avg_train_loss,
'Valid. Loss': avg_val_loss,
'Valid. Accur.': avg_val_accuracy,
'Training Time': training_time,
'Validation Time': validation_time
}
)
print("")
print("Training complete!")
print("Total training took {:} (h:mm:ss)".format(format_time(time.time()-total_t0)))
# + [markdown] id="VQTvJ1vRP7u4"
# Let's view the summary of the training process.
# + id="6O_NbXFGMukX" colab={"base_uri": "https://localhost:8080/", "height": 168} outputId="c0f9c29c-ac04-45fa-f493-5595ef8e6a94"
import pandas as pd
pd.set_option('precision', 2)
df_stats = pd.DataFrame(data=training_stats)
df_stats = df_stats.set_index('epoch')
df_stats
# + id="68xreA9JAmG5" colab={"base_uri": "https://localhost:8080/", "height": 427} outputId="e75a8141-f69e-4335-8000-02c42fbf2619"
import matplotlib.pyplot as plt
% matplotlib inline
import seaborn as sns
# Use plot styling from seaborn.
sns.set(style='darkgrid')
# Increase the plot size and font size.
sns.set(font_scale=1.5)
plt.rcParams["figure.figsize"] = (12,6)
# Plot the learning curve.
plt.plot(df_stats['Training Loss'], 'b-o', label="Training")
plt.plot(df_stats['Valid. Loss'], 'g-o', label="Validation")
# Label the plot.
plt.title("Training & Validation Loss")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.legend()
plt.xticks([1, 2, 3, 4])
plt.show()
# + [markdown] id="mkyubuJSOzg3"
# ##Performance On Test Set
# + [markdown] id="Tg42jJqqM68F"
# ### Data Preparation
#
# + [markdown] id="xWe0_JW21MyV"
#
# We'll need to apply all of the same steps that we did for the training data to prepare our test data set.
# + id="mAN0LZBOOPVh" colab={"base_uri": "https://localhost:8080/"} outputId="c7ea6007-c6d8-4f35-96a1-59bea7641081"
import pandas as pd
df = df_ori[2000:]
# Report the number of sentences.
print('Number of test sentences: {:,}\n'.format(df.shape[0]))
# Create sentence and label lists
sentences = df.tweet.values
labels = df.label.values
# Tokenize all of the sentences and map the tokens to thier word IDs.
input_ids = []
attention_masks = []
for sent in sentences:
encoded_dict = tokenizer.encode_plus(
sent,
add_special_tokens = True,
max_length = 256,
pad_to_max_length = True,
return_attention_mask = True,
return_tensors = 'pt',
)
input_ids.append(encoded_dict['input_ids'])
attention_masks.append(encoded_dict['attention_mask'])
input_ids = torch.cat(input_ids, dim=0)
attention_masks = torch.cat(attention_masks, dim=0)
labels = torch.tensor(labels)
batch_size = 32
prediction_data = TensorDataset(input_ids, attention_masks, labels)
prediction_sampler = SequentialSampler(prediction_data)
prediction_dataloader = DataLoader(prediction_data, sampler=prediction_sampler, batch_size=batch_size)
# + [markdown] id="16lctEOyNFik"
# ###Evaluate on Test Set
#
# + [markdown] id="rhR99IISNMg9"
#
# With the test set prepared, we can apply our fine-tuned model to generate predictions on the test set.
# + id="Hba10sXR7Xi6" colab={"base_uri": "https://localhost:8080/"} outputId="91a19988-b62c-410e-d319-26a7d7e5e561"
# Prediction on test set
print('Predicting labels for {:,} test sentences...'.format(len(input_ids)))
model.eval()
predictions , true_labels = [], []
for batch in prediction_dataloader:
batch = tuple(t.to(device) for t in batch)
b_input_ids, b_input_mask, b_labels = batch
with torch.no_grad():
result = model(b_input_ids,
token_type_ids=None,
attention_mask=b_input_mask,
return_dict=True)
logits = result.logits
logits = logits.detach().cpu().numpy()
label_ids = b_labels.to('cpu').numpy()
predictions.append(logits)
true_labels.append(label_ids)
print(' DONE.')
# + id="hWcy0X1hirdx" colab={"base_uri": "https://localhost:8080/"} outputId="26e9c93c-e878-410c-e84d-a489d7c3b5e5"
print('Positive samples: %d of %d (%.2f%%)' % (df.label.sum(), len(df.label), (df.label.sum() / len(df.label) * 100.0)))
# + id="oCYZa1lQ8Jn8" colab={"base_uri": "https://localhost:8080/"} outputId="d2ff509f-1100-47f3-d4bf-55206ba52ce3"
# Combine the results across all batches.
flat_predictions = np.concatenate(predictions, axis=0)
# For each sample, pick the label (0 or 1) with the higher score.
flat_predictions = np.argmax(flat_predictions, axis=1).flatten()
# Combine the correct labels for each batch into a single list.
flat_true_labels = np.concatenate(true_labels, axis=0)
print(sum(flat_predictions == flat_true_labels)/len(flat_predictions))
# + [markdown] id="03n-Q5lipMan"
# ## Test on UCI with/without Further Training
# + [markdown] id="HMVge1-YOww5"
# ### Data
# + id="0RxdwXhIpTAQ" colab={"base_uri": "https://localhost:8080/", "height": 0} outputId="ee4aef86-79a5-41e8-a263-c1a76dbb501d"
url = "https://raw.githubusercontent.com/wangluheng328/SiFT-Project/main/Data/amazon_cells_labelled.txt"
download = requests.get(url).content
df_amazon = pd.read_csv(io.StringIO(download.decode('utf-8')),delimiter = '\t', delim_whitespace= False, names = ('Sentence', 'Label'))
df_amazon.head()
# + id="d7Zyw07QpwNN" colab={"base_uri": "https://localhost:8080/", "height": 0} outputId="4f5adbe8-0b5e-418e-965c-2f356219e199"
url = "https://raw.githubusercontent.com/wangluheng328/SiFT-Project/main/Data/yelp_labelled.txt"
download = requests.get(url).content
df_yelp = pd.read_csv(io.StringIO(download.decode('utf-8')),delimiter = '\t', delim_whitespace= False, names = ('Sentence', 'Label'))
df_yelp.head()
# + id="hc8moBqOp2QL" colab={"base_uri": "https://localhost:8080/", "height": 0} outputId="dfc900d3-460a-422b-fe85-157d2a54cb4d"
url = "https://raw.githubusercontent.com/wangluheng328/SiFT-Project/main/Data/imdb_labelled.txt"
download = requests.get(url).content
df_imdb = pd.read_csv(io.StringIO(download.decode('utf-8')),delimiter = '\t', delim_whitespace= False, names = ('Sentence', 'Label'))
df_imdb.head()
# + id="jO1_TVbfqH4w" colab={"base_uri": "https://localhost:8080/", "height": 0} outputId="61dd1403-036d-45d5-c30c-39ad05958550"
uci = pd.concat([df_imdb, df_amazon, df_yelp], axis = 0, join = 'inner')
uci = uci.sample(frac = 1).reset_index(drop = True)
uci.head()
# + [markdown] id="3RjAHhEqO1O7"
# ### Further Training
# + id="PQFbHqwEF9IL"
# uci_further = uci[:270]
# uci_test = uci[270:]
# + id="9yhsX0zGGWA2"
# optimizer = AdamW(model.parameters(),
# lr = 2e-5,
# eps = 1e-8
# )
# from transformers import get_linear_schedule_with_warmup
# epochs = 3
# total_steps = len(train_dataloader) * epochs
# scheduler = get_linear_schedule_with_warmup(optimizer,
# num_warmup_steps = 0,
# num_training_steps = total_steps
# )
# sentences = uci_further.Sentence.values
# labels = uci_further.Label.values
# input_ids = []
# attention_masks = []
# for sent in sentences:
# encoded_dict = tokenizer.encode_plus(
# sent,
# add_special_tokens = True,
# max_length = 256,
# pad_to_max_length = True,
# return_attention_mask = True,
# return_tensors = 'pt',
# )
# input_ids.append(encoded_dict['input_ids'])
# attention_masks.append(encoded_dict['attention_mask'])
# input_ids = torch.cat(input_ids, dim=0)
# attention_masks = torch.cat(attention_masks, dim=0)
# labels = torch.tensor(labels)
# from torch.utils.data import TensorDataset, random_split
# dataset = TensorDataset(input_ids, attention_masks, labels)
# train_size = int(0.9 * len(dataset))
# val_size = len(dataset) - train_size
# train_dataset, val_dataset = random_split(dataset, [train_size, val_size])
# print('{:>5,} training samples'.format(train_size))
# print('{:>5,} validation samples'.format(val_size))
# from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
# batch_size = 5
# train_dataloader = DataLoader(
# train_dataset,
# sampler = RandomSampler(train_dataset),
# batch_size = batch_size
# )
# validation_dataloader = DataLoader(
# val_dataset,
# sampler = SequentialSampler(val_dataset),
# batch_size = batch_size
# )
# import random
# import numpy as np
# seed_val = 42
# random.seed(seed_val)
# np.random.seed(seed_val)
# torch.manual_seed(seed_val)
# torch.cuda.manual_seed_all(seed_val)
# training_stats = []
# total_t0 = time.time()
# # For each epoch...
# for epoch_i in range(0, epochs):
# # ========================================
# # Training
# # ========================================
# # Perform one full pass over the training set.
# print("")
# print('======== Epoch {:} / {:} ========'.format(epoch_i + 1, epochs))
# print('Training...')
# # Measure how long the training epoch takes.
# t0 = time.time()
# total_train_loss = 0
# model.train()
# # For each batch of training data...
# for step, batch in enumerate(train_dataloader):
# # Progress update every 40 batches.
# if step % 40 == 0 and not step == 0:
# elapsed = format_time(time.time() - t0)
# print(' Batch {:>5,} of {:>5,}. Elapsed: {:}.'.format(step, len(train_dataloader), elapsed))
# b_input_ids = batch[0].to(device)
# b_input_mask = batch[1].to(device)
# b_labels = batch[2].to(device)
# model.zero_grad()
# embed = model.embed(input_ids = b_input_ids,mask = b_input_mask)
# preds = model.predict(embedding_output = embed,attention_mask = b_input_mask)
# loss_fct = CrossEntropyLoss()
# regular_loss = loss_fct(preds.view(-1,2), b_labels.view(-1))
# loss_list = [regular_loss]
# if MODE in ["SMART-adv-only", "SIFT"]:
# normalise = True if MODE == "SIFT" else False
# noised_embeddings = noise(embed, model, b_input_mask, 1e-3, normalize=normalise, k=1)
# adv_logits = model.predict(noised_embeddings, b_input_mask)
# adv_loss = stable_kl(preds.view(-1,2), adv_logits.view(-1,2))
# loss_list.append(adv_loss)
# loss = sum(loss_list)
# # END MODEL
# total_train_loss += loss.item()
# loss.backward()
# torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)
# optimizer.step()
# scheduler.step()
# avg_train_loss = total_train_loss / len(train_dataloader)
# training_time = format_time(time.time() - t0)
# print("")
# print(" Average training loss: {0:.2f}".format(avg_train_loss))
# print(" Training epcoh took: {:}".format(training_time))
# # ========================================
# # Validation
# # ========================================
# # After the completion of each training epoch, measure our performance on
# # our validation set.
# print("")
# print("Running Validation...")
# t0 = time.time()
# model.eval()
# total_eval_accuracy = 0
# total_eval_loss = 0
# nb_eval_steps = 0
# # Evaluate data for one epoch
# for batch in validation_dataloader:
# b_input_ids = batch[0].to(device)
# b_input_mask = batch[1].to(device)
# b_labels = batch[2].to(device)
# with torch.no_grad():
# result = model(b_input_ids,
# token_type_ids=None,
# attention_mask=b_input_mask,
# labels=b_labels,
# return_dict=True)
# loss = result.loss
# logits = result.logits
# total_eval_loss += loss.item()
# logits = logits.detach().cpu().numpy()
# label_ids = b_labels.to('cpu').numpy()
# total_eval_accuracy += flat_accuracy(logits, label_ids)
# avg_val_accuracy = total_eval_accuracy / len(validation_dataloader)
# print(" Accuracy: {0:.2f}".format(avg_val_accuracy))
# avg_val_loss = total_eval_loss / len(validation_dataloader)
# validation_time = format_time(time.time() - t0)
# print(" Validation Loss: {0:.2f}".format(avg_val_loss))
# print(" Validation took: {:}".format(validation_time))
# training_stats.append(
# {
# 'epoch': epoch_i + 1,
# 'Training Loss': avg_train_loss,
# 'Valid. Loss': avg_val_loss,
# 'Valid. Accur.': avg_val_accuracy,
# 'Training Time': training_time,
# 'Validation Time': validation_time
# }
# )
# print("")
# print("Training complete!")
# print("Total training took {:} (h:mm:ss)".format(format_time(time.time()-total_t0)))
# + [markdown] id="vgzBiz15Uy37"
# ### Testing on The Remaining
# + id="I_e6ugkbqq0p" colab={"base_uri": "https://localhost:8080/"} outputId="7f668254-9a94-4f50-a8bd-ec1c1be4c0af"
import pandas as pd
# Report the number of sentences.
print('Number of test sentences: {:,}\n'.format(uci.shape[0]))
# Create sentence and label lists
sentences = uci.Sentence.values
labels = uci.Label.values
# Tokenize all of the sentences and map the tokens to thier word IDs.
input_ids = []
attention_masks = []
for sent in sentences:
encoded_dict = tokenizer.encode_plus(
sent,
add_special_tokens = True,
max_length = 256,
pad_to_max_length = True,
return_attention_mask = True,
return_tensors = 'pt',
)
input_ids.append(encoded_dict['input_ids'])
attention_masks.append(encoded_dict['attention_mask'])
input_ids = torch.cat(input_ids, dim=0)
attention_masks = torch.cat(attention_masks, dim=0)
labels = torch.tensor(labels)
batch_size = 32
prediction_data = TensorDataset(input_ids, attention_masks, labels)
prediction_sampler = SequentialSampler(prediction_data)
prediction_dataloader = DataLoader(prediction_data, sampler=prediction_sampler, batch_size=batch_size)
# + id="RwLgyw49q5W7" colab={"base_uri": "https://localhost:8080/"} outputId="1b950d9d-7576-4475-ec29-e7501d24d3be"
print('Predicting labels for {:,} test sentences...'.format(len(input_ids)))
model.eval()
predictions , true_labels = [], []
for batch in prediction_dataloader:
batch = tuple(t.to(device) for t in batch)
b_input_ids, b_input_mask, b_labels = batch
with torch.no_grad():
result = model(b_input_ids,
token_type_ids=None,
attention_mask=b_input_mask,
return_dict=True)
logits = result.logits
logits = logits.detach().cpu().numpy()
label_ids = b_labels.to('cpu').numpy()
#print(logits.sum())
predictions.append(logits)
true_labels.append(label_ids)
print(' DONE.')
# + id="XXkdm29TrB0j" colab={"base_uri": "https://localhost:8080/"} outputId="12864ba5-6364-4d69-948a-42880b05fff4"
# Combine the results across all batches.
flat_predictions = np.concatenate(predictions, axis=0)
# For each sample, pick the label (0 or 1) with the higher score.
flat_predictions = np.argmax(flat_predictions, axis=1).flatten()
# Combine the correct labels for each batch into a single list.
flat_true_labels = np.concatenate(true_labels, axis=0)
print(np.sum(flat_predictions == flat_true_labels) / len(flat_true_labels))
| DeBERTa Experiments/TwitterUCI_Deberta_SMART_0_.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# load these packages
import pyspark
from pyspark.ml import feature, classification
from pyspark.ml import Pipeline, pipeline
from pyspark.sql import functions as fn
import numpy as np
from pyspark.sql import SparkSession
from pyspark.ml import feature, regression, evaluation, Pipeline
from pyspark.sql import functions as fn, Row
import matplotlib.pyplot as plt
spark = SparkSession.builder.getOrCreate()
sc = spark.sparkContext
import pandas as pd
# We will analyze the Mid-atlantic wage dataset (https://rdrr.io/cran/ISLR/man/Wage.html).
# + deletable=false editable=false nbgrader={"checksum": "13b497143a441ab980a4d0df52131055", "grade": false, "grade_id": "cell-3969fb69476d2b82", "locked": true, "schema_version": 1, "solution": false}
# read-only
drop_cols = ['_c0', 'logwage', 'sex', 'region']
wage_df = spark.read.csv('/datasets/ISLR/Wage.csv', header=True, inferSchema=True).drop(*drop_cols)
training_df, validation_df, testing_df = wage_df.randomSplit([0.6, 0.3, 0.1], seed=0)
wage_df.printSchema()
# -
# explore the data
wage_df.limit(10).toPandas()
# # 1: Codify the data using transformers
#
# Create a fitted pipeline to the entire data `wage_df` and call it `pipe_feat`. This pipeline should codify the columns `maritl`, `race`, `education`, `jobclass`, `health`, and `health_ins`. The codification should be a combination of a `StringIndexer` and a `OneHotEncoder`. For example, for `maritl`, `StringIndexer` should create a column `maritl_index` and `OneHotEncoder` should create a column `maritl_feat`. Investigate the parameters of `StringIndexer` so that the labels are indexed alphabetically in ascending order so that, for example, the 1st index for `maritl_index` corresponds to `1. Never Married`, the 2nd index corresponds to `2. Married`, and so forth. Also, investigate the parameters of `OneHotEncoder` so that there are no columns dropped as it is usually done for dummy variables. This is, marital status should have one column for each of the classes.
#
# The pipeline should create a column `features` that combines `year`, `age`, and all codified columns.
# + deletable=false nbgrader={"checksum": "b6f4272b29b08b91879e9ceff83ffde9", "grade": false, "grade_id": "cell-06ca40bbc2363d61", "locked": false, "schema_version": 1, "solution": true}
from pyspark.ml import Pipeline
from pyspark.ml.feature import StringIndexer, OneHotEncoder, VectorAssembler
cols = ['maritl','race','education','jobclass','health','health_ins']
indexers = [
StringIndexer(inputCol=c, outputCol="{0}_index".format(c),stringOrderType="alphabetAsc",handleInvalid="error")
for c in cols
]
encoders = [
OneHotEncoder(
inputCol=c + '_index',
outputCol="{0}_feat".format(c),dropLast=False)
for c in cols
]
numericCols = ['year','age']
assembler = VectorAssembler(
inputCols=numericCols+["{0}_feat".format(c) for c in cols],
outputCol="features"
)
pipe_feat = Pipeline(stages=indexers + encoders + [assembler]).fit(wage_df)
#raise NotImplementedError()
# -
# investigate the results
pipe_feat.transform(wage_df).limit(5).toPandas().T
# + deletable=false editable=false nbgrader={"checksum": "c5fa22305ca72ca8f8009792f166f24d", "grade": true, "grade_id": "cell-f021fdae60597e9f", "locked": true, "points": 20, "schema_version": 1, "solution": false}
# (20 pts)
assert set(type(pm) for pm in pipe_feat.stages) == {feature.OneHotEncoder, feature.StringIndexerModel, feature.VectorAssembler}
assert len(pipe_feat.transform(wage_df).first().features) == 22
# -
#
#
# Create three pipelines that contain three different random forest regressions that take in all features from the `wage_df` to predict `wage`. These pipelines should have as first stage the pipeline created in question 1 and should be fitted to the training data.
#
# - `pipe_rf1`: Random forest with `maxDepth=1` and `numTrees=60`
# - `pipe_rf2`: Random forest with `maxDepth=3` and `numTrees=40`
# - `pipe_rf3`: Random forest with `maxDepth=6`, `numTrees=20`
# + deletable=false nbgrader={"checksum": "937fda91b6f78f26ff1cefcb0f4766f3", "grade": false, "grade_id": "cell-81a05842530a4bf5", "locked": false, "schema_version": 1, "solution": true}
# create the fitted pipelines `pipe_rf1`, `pipe_rf2`, and `pipe_rf3` here
# YOUR CODE HERE
from pyspark.ml.regression import RandomForestRegressor
rf_1 = RandomForestRegressor(featuresCol="features", labelCol="wage",maxDepth=1, numTrees=60, seed=1)
rf_2 = RandomForestRegressor(featuresCol="features", labelCol="wage",maxDepth=3, numTrees=40, seed=1)
rf_3 = RandomForestRegressor(featuresCol="features", labelCol="wage",maxDepth=6, numTrees=20, seed=1)
pipe_rf1=Pipeline(stages=[pipe_feat,rf_1]).fit(training_df)
pipe_rf2=Pipeline(stages=[pipe_feat,rf_2]).fit(training_df)
pipe_rf3=Pipeline(stages=[pipe_feat,rf_3]).fit(training_df)
#raise NotImplementedError()
# + deletable=false editable=false nbgrader={"checksum": "918062cd396f3a9c3ebda974d405802c", "grade": true, "grade_id": "cell-af70c6d07ebf40ef", "locked": true, "points": 15, "schema_version": 1, "solution": false}
# tests for 15 pts
np.testing.assert_equal(type(pipe_rf1.stages[0]), pipeline.PipelineModel)
np.testing.assert_equal(type(pipe_rf2.stages[0]), pipeline.PipelineModel)
np.testing.assert_equal(type(pipe_rf3.stages[0]), pipeline.PipelineModel)
np.testing.assert_equal(type(pipe_rf1.stages[1]), regression.RandomForestRegressionModel)
np.testing.assert_equal(type(pipe_rf2.stages[1]), regression.RandomForestRegressionModel)
np.testing.assert_equal(type(pipe_rf3.stages[1]), regression.RandomForestRegressionModel)
np.testing.assert_equal(type(pipe_rf1.transform(training_df)), pyspark.sql.dataframe.DataFrame)
np.testing.assert_equal(type(pipe_rf2.transform(training_df)), pyspark.sql.dataframe.DataFrame)
np.testing.assert_equal(type(pipe_rf3.transform(training_df)), pyspark.sql.dataframe.DataFrame)
# -
#
#
# Use the following evaluator to compute the RMSE of the models on validation data. Print the RMSE of the three models and assign the best one (i.e., the best pipeline) to a variable `best_model`
# + deletable=false editable=false nbgrader={"checksum": "73cc7671f0711e342716e8875324b1ca", "grade": false, "grade_id": "cell-1f8bd4cfa96e326a", "locked": true, "schema_version": 1, "solution": false}
evaluator = evaluation.RegressionEvaluator(labelCol='wage', metricName='rmse')
# use it as follows:
# evaluator.evaluate(fitted_pipeline.transform(df)) -> RMSE
# + deletable=false nbgrader={"checksum": "3c21368329ee32c72847a9261925e586", "grade": false, "grade_id": "cell-2e53b6ab6e82f38d", "locked": false, "schema_version": 1, "solution": true}
# print MSE of each model and define `best_model`
# YOUR CODE HERE
rmse1=evaluator.evaluate(pipe_rf1.transform(validation_df))
print('RMSE For Model1:',rmse1)
rmse2=evaluator.evaluate(pipe_rf2.transform(validation_df))
print('RMSE For Model2:',rmse2)
rmse3=evaluator.evaluate(pipe_rf3.transform(validation_df))
print('RMSE For Model3:',rmse3)
best_model=pipe_rf3
#raise NotImplementedError()
# + deletable=false editable=false nbgrader={"checksum": "e626dc1e89241ad46546ac559e951ca7", "grade": true, "grade_id": "cell-c87098fdf26d5f77", "locked": true, "points": 10, "schema_version": 1, "solution": false}
# tests for 10 pts
np.testing.assert_equal(type(best_model.stages[0]), pipeline.PipelineModel)
np.testing.assert_equal(type(best_model.stages[1]), regression.RandomForestRegressionModel)
np.testing.assert_equal(type(best_model.transform(training_df)), pyspark.sql.dataframe.DataFrame)
# -
#
#
# Compute the RMSE of the model on testing data, print it, and assign it to variable `RMSE_best`
# + deletable=false nbgrader={"checksum": "4f64d0e15006450b30465eba9c04809b", "grade": false, "grade_id": "cell-975307604e1c7a37", "locked": false, "schema_version": 1, "solution": true}
# create RMSE_best below
# YOUR CODE HERE
RMSE_best=evaluator.evaluate(pipe_rf3.transform(testing_df))
print('RMSE For best model(model3-pipe_rf3 ) on testing set:',RMSE_best)
#raise NotImplementedError()
# + deletable=false editable=false nbgrader={"checksum": "1352035aa1251f5f3dbdd1eaa97378d3", "grade": true, "grade_id": "cell-79c466e618817e90", "locked": true, "points": 5, "schema_version": 1, "solution": false}
# tests for 5 pts
np.testing.assert_array_less(RMSE_best, 40)
np.testing.assert_array_less(30, RMSE_best)
# -
#
#
# Using the parameters of the best model, create a new pipeline called `final_model` and fit it to the entire data (`wage_df`)
# + deletable=false nbgrader={"checksum": "f1029e0fa09318af8175fcdcd931eca9", "grade": false, "grade_id": "cell-6a682b3fdbb9ff9c", "locked": false, "schema_version": 1, "solution": true}
# create final_model pipeline below
# YOUR CODE HERE
final_model=Pipeline(stages=[pipe_feat,rf_3]).fit(wage_df)
#raise NotImplementedError()
# + deletable=false editable=false nbgrader={"checksum": "56d5b234a9fc45e110683dc01ae7dffa", "grade": true, "grade_id": "cell-803d8872aeb9cc8f", "locked": true, "points": 5, "schema_version": 1, "solution": false}
# tests for 5 pts
np.testing.assert_equal(type(final_model.stages[0]), pipeline.PipelineModel)
np.testing.assert_equal(type(final_model.stages[1]), regression.RandomForestRegressionModel)
np.testing.assert_equal(type(final_model.transform(wage_df)), pyspark.sql.dataframe.DataFrame)
# -
#
#
# Create a pandas dataframe `feature_importance` with the columns `feature` and `importance` which contains the names of the features. Give appropriate column names such as `maritl_1._Never_Married`. You can build these feature names by using the labels from the fitted `StringIndexer` used in Question 1. Use as feature importance as determined by the random forest of the final model (`final_model`). Sort the pandas dataframe by `importance` in descending order and display.
# + deletable=false nbgrader={"checksum": "1f05a027040f4b4b04272173270dcc9b", "grade": false, "grade_id": "cell-0f7185f318626a45", "locked": false, "schema_version": 1, "solution": true}
# create feature_importance below
# YOUR CODE HERE
maritl = pipe_feat.stages[0].getInputCol()
race = pipe_feat.stages[1].getInputCol()
education = pipe_feat.stages[2].getInputCol()
jobclass= pipe_feat.stages[3].getInputCol()
health = pipe_feat.stages[4].getInputCol()
health_ins = pipe_feat.stages[5].getInputCol()
#a=a.replace('index','1.Never_Married')
a = [i.replace(' ', '_') if isinstance(i, str) else i for i in pipe_feat.stages[0].labels]
b = [i.replace(' ', '_') if isinstance(i, str) else i for i in pipe_feat.stages[1].labels]
c = [i.replace(' ', '_') if isinstance(i, str) else i for i in pipe_feat.stages[2].labels]
d = [i.replace(' ', '_') if isinstance(i, str) else i for i in pipe_feat.stages[3].labels]
e = [i.replace(' ', '_') if isinstance(i, str) else i for i in pipe_feat.stages[4].labels]
f = [i.replace(' ', '_') if isinstance(i, str) else i for i in pipe_feat.stages[5].labels]
a1=maritl+'_'+a[0]
a2=maritl+'_'+a[1]
a3=maritl+'_'+a[2]
a4=maritl+'_'+a[3]
a5=maritl+'_'+a[4]
b1=race+'_'+b[0]
b2=race+'_'+b[1]
b3=race+'_'+b[2]
b4=race+'_'+b[3]
c1=education+'_'+c[0]
c2=education+'_'+c[1]
c3=education+'_'+c[2]
c4=education+'_'+c[3]
c5=education+'_'+c[4]
d1=jobclass+'_'+d[0]
d2=jobclass+'_'+d[1]
e1=health+'_'+e[0]
e2=health+'_'+e[1]
f1=health_ins+'_'+f[0]
f2=health_ins+'_'+f[1]
feature_importance=pd.DataFrame(list(zip(['year','age',a1,a2,a3,a4,a5,b1,b2,b3,b4,c1,c2,c3,c4,c5,d1,d2,e1,e2,f1,f2
], final_model.stages[-1].featureImportances.toArray())),
columns = ['feature', 'importance']).sort_values('importance',ascending=False)
#raise NotImplementedError()
# -
# display your feature importances here
feature_importance
# + deletable=false editable=false nbgrader={"checksum": "65a7424b40deec2e714bd3764fb56667", "grade": true, "grade_id": "cell-dc3926e469167f5e", "locked": true, "points": 25, "schema_version": 1, "solution": false}
# tests for 25 pts
assert type(feature_importance) == pd.core.frame.DataFrame
np.testing.assert_array_equal(list(feature_importance.columns), ['feature', 'importance'])
np.testing.assert_array_equal(list(feature_importance.columns), ['feature', 'importance'])
# -
# **(5 pts)** Comment below on the importance that random forest has given to each feature. Are they reasonable? Do they tell you anything valuable about the titanic dataset? Answer in the cell below
# + [markdown] deletable=false nbgrader={"checksum": "36878e68cb44923cdebab7fbd3f69660", "grade": true, "grade_id": "cell-21e30d00198bae80", "locked": false, "points": 5, "schema_version": 1, "solution": true}
# YOUR ANSWER HERE:
# Yes, the importance the random forest has given to each feature is reasonalable. Race and maritl status doe not relate to wage and that's hat random forest tree shows. Like in titanic dataset people who are educated were among the majority of survivals, we can see the same trend, People who are more educated will earn more wage.
# -
#
#
# Pick any of the trees from the final model and assign its `toDebugString` property to a variable `example_tree`. Print this variable and add comments to the cell describing how you think this particular tree is fitting the data
# + deletable=false nbgrader={"checksum": "a60948658565a5116e4b3e8bb7e602b4", "grade": false, "grade_id": "cell-bf4e4b6323d9fcb5", "locked": false, "schema_version": 1, "solution": true}
# create a variable example_tree with the toDebugString property of a tree from final_model.
# print this string and comment in this same cell about the branches that this tree fit
# YOUR CODE HERE
rf_model=final_model.stages[-1]
len(rf_model.trees)
example_tree=rf_model.trees[0].toDebugString
# the first node(Decision) is whether the fetaure 20 is 0 or not,if it's true we got to the leaf node that is
# feature 3 and check if it's 0 or not and in the following way:
# feature 20 (0 or not)
# feature 3 (O or not)
# feature 11 (1 or not)
# feature 1( less than or equal 40.5)
# feature 0 (less than or equal 2003.5)
# feature 1 less that or equal 22.5
# And then wage is predicted.
# If either of thedecision are False the other node is executed to predict value of wage
# +
# display the tree here
print(example_tree)
# + deletable=false editable=false nbgrader={"checksum": "639477f58c5b7ee916b3add4ad70c547", "grade": true, "grade_id": "cell-1c6f7a9628ad7949", "locked": true, "points": 10, "schema_version": 1, "solution": false}
# tests for 10 points
assert type(example_tree) == str
assert 'DecisionTreeRegressionModel' in example_tree
assert 'feature 0' in example_tree
assert 'If' in example_tree
assert 'Else' in example_tree
assert 'Predict' in example_tree
# -
#
# Comment on the feature that is at the top of the tree. Does it make sense that that is the feature there?
# +
## The feature that is at the top of the tree is Health Insurance and it does make sense as it's among
#the most important features
# + deletable=false editable=false nbgrader={"checksum": "30700e9cc5ca65ee007db0f0a2a2c447", "grade": true, "grade_id": "cell-8240c5a7db1c22af", "locked": true, "points": 5, "schema_version": 1, "solution": false}
| random_forest.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="2B54uxLDndzf"
# # Business Problem
#
# A telecommunication company is facing dip in revenue due to customer attrition and was looking for ways to tackle the issue.
#
# - Analysis of the current churn data and look for patterns
# - Possible churn prediction system
#
# # Why solve using data science?
#
# Not all problems needs to be solved using ML/DL techniques!
#
# Justification in this use case:
#
# - Customer churn does not happen with specific set of factors. Factors may overlap or there many too many resons for the churn.
#
# - Scalability: As the organization gets more customers having ML solutions to handle them will be lot better than doing manual analysis.
#
# With these justifications, lets get our customers data and understand.
#
# # Advantages of having a churn prediction system
#
# Client can react in time and retain the customers by making a special offer according to the preference
#
# # Dataset Recieved
#
# Telecom users dataset
# https://www.kaggle.com/radmirzosimov/telecom-users-dataset
# + colab={"base_uri": "https://localhost:8080/"} id="h80AkerHz09Y" outputId="9b551674-1143-4958-d7c2-1d981236f89c"
# !pip install plotly==4.14.3
# + id="ySfkknDslgfk"
# Import Libraries
import pandas as pd
import numpy as np
import plotly.express as px
import seaborn as sns
from matplotlib import pyplot
from sklearn.model_selection import train_test_split
from sklearn.ensemble import AdaBoostClassifier
from sklearn import metrics
# + id="t-iuCMjW7gRt"
path_to_file = '/content/drive/MyDrive/AIE/telecom_users.csv'
# + id="yqvmzLM58Qcp"
# Read the input data
df_data = pd.read_csv(path_to_file)
# + colab={"base_uri": "https://localhost:8080/", "height": 326} id="M_4O4_uE8pkU" outputId="0d0b0f6c-cc45-47c2-898e-209c4b0ebe46"
# Take first few rows
df_data.head()
# + [markdown] id="dVu3EDjk_4jt"
# # Data Glossary
#
# - customerID - customer id
# - gender - client gender (male / female)
# - SeniorCitizen - is the client retired (1, 0)
# - Partner - is the client married (Yes, No)
# - tenure - how many months a person has been a client of the company
# - PhoneService - is the telephone service connected (Yes, No)
# - MultipleLines - are multiple phone lines connected (Yes, No, No phone service)
# - InternetService - client's Internet service provider (DSL, Fiber optic, No)
# - OnlineSecurity - is the online security service connected (Yes, No, No internet service)
# - OnlineBackup - is the online backup service activated (Yes, No, No internet service)
# - DeviceProtection - does the client have equipment insurance (Yes, No, No internet service)
# - TechSupport - is the technical support service connected (Yes, No, No internet service)
# - StreamingTV - is the streaming TV service connected (Yes, No, No internet service)
# - StreamingMovies - is the streaming cinema service activated (Yes, No, No internet service)
# - Contract - type of customer contract (Month-to-month, One year, Two year)
# - PaperlessBilling - whether the client uses paperless billing (Yes, No)
# - PaymentMethod - payment method (Electronic check, Mailed check, Bank transfer (automatic), Credit card (automatic))
# - MonthlyCharges - current monthly payment
# - TotalCharges - the total amount that the client paid for the services for the entire time
# - Churn - whether there was a churn (Yes or No)
# + [markdown] id="t2UB6Fmy_q1K"
# # Exploratory Data Analysis
# + id="Y7oeIcCk8zcQ"
# Remove ids from the analysis
df_data = df_data.drop(columns=['Unnamed: 0', 'customerID'])
# + colab={"base_uri": "https://localhost:8080/", "height": 309} id="ZE0aRxboBADK" outputId="fca56058-d92f-43ca-91a5-942dd62846c6"
df_data.head()
# + colab={"base_uri": "https://localhost:8080/"} id="L7vxm0_PBFRV" outputId="8bbf24b0-b0b1-4ad1-e884-a9f0d574ad3e"
# Let's see of we have any missing values
df_data.info()
# + [markdown] id="r1mdcGc7CYKY"
# 
# + [markdown] id="qQZ601g0EM52"
# #### From the data gloassary, we can observe that the `TotalCharges` is a number but it is in `object` type. Let's analyze that.
# + colab={"base_uri": "https://localhost:8080/"} id="Kv31CtGbBXBQ" outputId="21bd0fe3-12e2-48c8-b0a2-2fc0112854f3"
df_data['TotalCharges'].value_counts()
# + [markdown] id="p4YepltCIItU"
# Looks like there are about 10 blank values in the `TotalCharges` field. Let's update the values.
# + colab={"base_uri": "https://localhost:8080/"} id="1VFxNwQTFMdR" outputId="477bd04b-8541-45f3-ad7b-619cbf428ace"
# Observe that TotalCharges have blank values
print('Before removing blank values')
print(df_data[df_data['TotalCharges'] == ' '].index)
df_data['TotalCharges'] = df_data['TotalCharges'].replace(r'^\s*$', 0, regex=True)
print('After removing blank values')
print(df_data[df_data['TotalCharges'] == ' '].index)
df_data['TotalCharges'] = df_data['TotalCharges'].astype(float)
# + colab={"base_uri": "https://localhost:8080/"} id="Y4tYMR6WMAvJ" outputId="294a4c73-be61-4be8-fa56-e81bc82c5bff"
# Let's review the data information
df_data.info()
# + [markdown] id="kztOfLKMMqqj"
# ## Business Problem to Data Science Questions
#
# Dolores, the client, was expecting to analysze the current data to understand what went wrong and correct. Here are the questions the team came up with.
#
# _NOTE_: As we present to the stakeholders, questions willget updated.
#
# - How gender, partner, and dependents are related to chrun?
# - Are we facing churn with customers with longer tenure?
# - Are we having issues with phone and internet services?
# - Does customers opted for tech support stayed for longer tenure with less churn?
# - Did customers monthly charge and total charge relate with churn?
# - Do customers opted for streaming face issue with the service?
# - Which contract do customers prefer in order to stay with the business?
#
# ## Let's visualize
#
# We will be using `Plotly` and `Seaborn` for the visualization pupose. `Pandas` used for analysis.
# + [markdown] id="4nCFfRDgV4oH"
# ### How gender, partner, and dependents are related to chrun?
# + colab={"base_uri": "https://localhost:8080/", "height": 542} id="Mxj6hOUXMHa2" outputId="5a562a95-0aba-4d86-ef2e-f3f08a9d1a08"
fig = px.treemap(df_data.groupby(['gender', 'Partner', 'Dependents','Churn']).size().reset_index(name='count'),
path=['gender', 'Partner', 'Dependents','Churn'], values='count',
color='Churn', title='How gender, partner, and dependents are related to chrun?')
fig.show()
# + [markdown] id="8p3JsxVNV71C"
# ### Are we facing churn with customers with longer tenure?
# + colab={"base_uri": "https://localhost:8080/", "height": 542} id="g7ntBL-4RZUR" outputId="bdce16bf-b27c-4813-c61a-bc1379eec034"
fig = px.histogram(df_data.groupby(['tenure', 'Churn']).size().reset_index(name='count'),
x="tenure", y='count', color="Churn", marginal="rug", color_discrete_map={"Yes": "#E45756", "No": "#1CBE4F"},
title='Are we facing churn with customers with longer tenure?')
fig.show()
# + [markdown] id="fWvEFMdnZj-R"
# ### Are we having issues with phone and internet services?
# + colab={"base_uri": "https://localhost:8080/", "height": 542} id="iDzj_-FBWgCY" outputId="bc9baaa1-7c86-4b79-8f47-6bd9ae7b449a"
fig = px.sunburst(df_data.groupby(['Churn', 'PhoneService', 'InternetService']).size().reset_index(name='count'),
path=['Churn', 'PhoneService', 'InternetService'], values='count', title='Are we having issues with phone and internet services?')
fig.show()
# + [markdown] id="XU4L6-8oavt1"
# ### Does customers opted for tech support stayed for longer tenure with less churn?
# + id="9uxSdzoy6vQq"
df_tech_yes = df_data[df_data['TechSupport'] == 'Yes']
df_tech_no = df_data[df_data['TechSupport'] == 'No']
# + [markdown] id="CZQjPjRI8mAT"
# ### Customers getting tech support
# + colab={"base_uri": "https://localhost:8080/", "height": 542} id="CYNBWMv5airj" outputId="22a42d34-89a4-4e38-8b76-58419ce3364d"
fig = px.histogram(df_tech_yes.groupby(['tenure', 'Churn']).size().reset_index(name='count'),
x="tenure", y='count', color="Churn", marginal="rug", color_discrete_map={"Yes": "#E45756", "No": "#1CBE4F"},
title='Statistics of customers opted for tech support')
fig.show()
# + [markdown] id="jzlGFBOj8sGt"
# ### Customers not getting tech support
# + colab={"base_uri": "https://localhost:8080/", "height": 542} id="36NeuejE5e1V" outputId="0cadae47-426c-4265-bcc5-d1824ffda0d3"
fig = px.histogram(df_tech_no.groupby(['tenure', 'Churn']).size().reset_index(name='count'),
x="tenure", y='count', color="Churn", marginal="rug", color_discrete_map={"Yes": "#E45756", "No": "#1CBE4F"},
title='Statistics of customers opted out of the tech support')
fig.show()
# + [markdown] id="MtEz9N7J9gJc"
# ## Did customers monthly charge and total charge relate with churn?
# + colab={"base_uri": "https://localhost:8080/", "height": 513} id="DdeL6k0a_GiV" outputId="04ed4b7a-2ac7-4b52-a71c-2f9142751b9a"
sns.set(rc={'figure.figsize':(26,8.27)})
sns.kdeplot(data=df_data, x="MonthlyCharges", hue="Churn", multiple="stack").set(title='Did customers monthly charge and total charge relate with churn?')
# + colab={"base_uri": "https://localhost:8080/", "height": 509} id="T3UFEoTX9OI0" outputId="8eb92474-b3c5-489d-9994-087833958d60"
sns.set(rc={'figure.figsize':(26,8.27)})
sns.kdeplot(data=df_data, x="TotalCharges", hue="Churn", multiple="stack").set(title='Did customers total charge and total charge relate with churn?')
# + [markdown] id="WcjJLvM7ASky"
# ## Do customers opted for streaming, face issue with the service?
# + colab={"base_uri": "https://localhost:8080/", "height": 498} id="L0VLDI0xA-8w" outputId="d2fbb7ef-1186-489c-8908-0d0da34ec975"
ax = sns.barplot(x="StreamingTV", y="count", hue='Churn',
data=df_data.groupby(['Churn', 'StreamingTV']).size().reset_index(name='count'), palette="Set2").set(title='Streaming TV vs Churn')
# + colab={"base_uri": "https://localhost:8080/", "height": 498} id="YACRLoNRE40H" outputId="96fd6b89-9496-4d21-c9b0-d7ec696fa026"
ax = sns.barplot(x="StreamingMovies", y="count", hue='Churn',
data=df_data.groupby(['Churn', 'StreamingMovies']).size().reset_index(name='count'),
palette="Set2").set(title='Streaming Movies vs Churn')
# + [markdown] id="CrJWXKcWGF_U"
# ## Which contract do customers prefer in order to stay with the business?
# + colab={"base_uri": "https://localhost:8080/", "height": 542} id="PaYtBRaiGFk9" outputId="4fe35ab0-0350-4658-bd55-1c4d1d5cff9e"
fig = px.sunburst(df_data.groupby(['Contract', 'Churn']).size().reset_index(name='count'),
path=['Contract', 'Churn'], values='count', title='Which contract do customers prefer in order to stay with the business?')
fig.show()
# + [markdown] id="eWlpkSgWado8"
# # Data Pre-processing
# + id="Ld9KXF93RsYY"
# List of categorical columns
cat_columns = ['gender', 'SeniorCitizen', 'Partner', 'PhoneService',
'MultipleLines', 'InternetService', 'OnlineSecurity',
'OnlineBackup', 'DeviceProtection', 'TechSupport',
'StreamingTV', 'StreamingMovies', 'Contract',
'PaperlessBilling', 'PaymentMethod', 'Dependents']
# + [markdown] id="Q7MFfZ17bUjW"
# 
# + id="MAWWElGoFZt1"
# We can really quickly build dummy features with pandas by calling the get_dummies function.
df_processed = pd.get_dummies(df_data, prefix_sep="__",
columns=cat_columns)
# + colab={"base_uri": "https://localhost:8080/", "height": 249} id="ZKpUtz9vTvgh" outputId="384bca91-c308-4f8d-d6d3-88013c2dd49b"
df_processed.head()
# + [markdown] id="yNbCoOgJa22w"
# Now we got the data with one hot encoded feature.
# + colab={"base_uri": "https://localhost:8080/"} id="3ef9MNmATxbY" outputId="673ccafb-3bf5-4887-82e0-1443b8bc34b6"
# Encode target column
# First let's see unique values in the target column
print('Before encoding:', df_processed['Churn'].unique())
# Encode target columns: Assign `Yes` to 1 and `No` to 0
df_processed["Churn"] = np.where(df_processed["Churn"].str.contains("Yes"), 1, 0)
print('After encoding:', df_processed['Churn'].unique())
# + [markdown] id="TIoEM7LVbs91"
# Let's save the data transformation we did before so that we perform the same operation in the test dataset. If there is any drift in the data, we might have to re-train the model.
# + id="-2fHUWfxVMkJ"
cat_dummies = [col for col in df_processed
if "__" in col
and col.split("__")[0] in cat_columns]
with open('cat_dummies.txt', 'w') as filehandle:
for listitem in cat_dummies:
filehandle.write('%s\n' % listitem)
# + id="7OzZY0r_VZMH"
processed_columns = list(df_processed.columns[:])
with open('processed_columns.txt', 'w') as filehandle:
for listitem in processed_columns:
filehandle.write('%s\n' % listitem)
# + colab={"base_uri": "https://localhost:8080/"} id="nIiETE6vcTtR" outputId="ade9c680-88e8-4fa9-f293-44b8c6094457"
# Looks like the dataset is imbalanced
df_processed['Churn'].value_counts()
# + [markdown] id="rglq68KhcyhO"
# ### Choosing algorithms some tips!
#
# - Explainability
# - Memory: can you load your data fully? need incremental learning algorithms?
# - Number of features
# - Nonlinearity of the data
# - Training speed
# - Prediction speed
#
# #### How to deal with data imbalance?
#
# There are many ways to handle the dta imbalance.
#
# - Choose a learning algorithm that provide weights for every class.
# - Data-level approach: Under-sampling, Over-sampling, Cluster-based over sampling, Synthetic minority over-sampling technique (SMOTE)
# - Algorithmic ensemble techniques
# - Bagging techniques
# - Boosting: Ada boost, Gradient Tree boosting, XG Boost/
#
# - https://www.analyticsvidhya.com/blog/2017/03/imbalanced-data-classification/
# - https://machinelearningmastery.com/tactics-to-combat-imbalanced-classes-in-your-machine-learning-dataset/
#
# Here, we are using adaptive boosting technique in this example to deal with data imbalance.
#
# #### An AdaBoost classifier.
#
# Ada Boost is the first original boosting technique which creates a highly accurate prediction rule by combining many weak and inaccurate rules. Each classifier is serially trained with the goal of correctly classifying examples in every round that were incorrectly classified in the previous round.
# + colab={"base_uri": "https://localhost:8080/"} id="TPopv0ZKVdS6" outputId="e7b1dce1-9dec-4ac7-c784-281e76886f77"
# Get only features
feature_df = df_processed.drop(['Churn'], axis=1)
# Extract target column
target_df = df_processed[['Churn']]
# Split dataset into train and test (Best Practise is to split into train, validation, and test)
x_train,x_test,y_train,y_test = train_test_split(feature_df, target_df, test_size=0.2, random_state = 0)
# Initialize adaboost classifier
cls = AdaBoostClassifier(n_estimators=100)
# Fit the model
cls.fit(x_train, y_train)
# Predict and calculate metrics
print("Accuracy:", metrics.accuracy_score(y_test, cls.predict(x_test)))
print('Recall Score:', metrics.recall_score(y_test, cls.predict(x_test), average='weighted'))
print('Precision Score:', metrics.precision_score(y_test, cls.predict(x_test), average='weighted'))
print('F1 Score:', metrics.f1_score(y_test, cls.predict(x_test), average='weighted'))
print('Confusion matrix:', metrics.confusion_matrix(y_test, cls.predict(x_test)))
# + id="6D1pKnvgVxpN"
import pickle
# save the classifier
with open('classifier.pkl', 'wb') as fid:
pickle.dump(cls, fid)
| Telecom_Data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Data Structure
# ---
# ## List
days_of_the_week = ['Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday']
type(days_of_the_week)
days_of_the_week[0] #the first element
days_of_the_week[-1] #the last element
days_of_the_week[-2] #the second one from last
for item in days_of_the_week:
print(item)
list(enumerate(days_of_the_week))
for index, value in enumerate(days_of_the_week):
print(f"{index+1}. {value}")
# ## Set
A = {1, 2, 3,4}
B = {2, 12, 4, 5}
A.union(B)
A.intersection(B)
# ## Dictionary
percapita_income = {'Liechtenstein':139100,
'Macau':123965,
'Monaco':115700,
'Luxembourg':114482,
'Singapore':97341
}
percapita_income
percapita_income['Liechtenstein']
# +
def power(first_number, second_number):
return first_number ** second_number
def root(first_number, second_number):
return first_number ** (1/second_number)
# -
first_number = 8
second_number = 3
# power_dict = {'power':16, 'root':2}
lookup_dict = {'power': '^', 'root':'~'}
{'add':'+','sub':'-'}
power_dict = {}
for key, value in lookup_dict.items():
power_dict[key]=eval(f"{key}({first_number},{second_number})")
print(power_dict)
for key, value in lookup_dict.items():
print(f"{first_number} {value} {second_number} = {power_dict[key]}")
| Data Structure.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + language="javascript"
# IPython.OutputArea.prototype._should_scroll = function(lines) {
# return false;
# }
# -
import pandas as pd
import numpy as np
import scipy.stats as sp_stats
import requests
from datetime import timedelta
import matplotlib.pyplot as plt
import time
import math
# +
POLONIEX_OHLCV_BASEURL = 'https://poloniex.com/public?command=returnChartData¤cyPair='
"""
https://poloniex.com/public?command=returnChartData¤cyPair=BTC_POT&start=1435699200&end=9999999999&period=14400
"""
def get_ohlcv_poloniex(pair='USDT_BTC', start=1435699200, end=9999999999, period=900):
"""
returns ohlcv data for poloniex as pandas dataframe
convert to unix timestamp using https://coderstoolbox.net/unixtimestamp/
:param pair: str pair on poloniex
:param start: int unix timestamp of beginning time
:param end: int unix timestamp of ending time
:param period: int candle width in seconds
:return: pandas df of ohlcv data from poloniex for specified pair, times, and period
"""
query = POLONIEX_OHLCV_BASEURL + pair + '&start=' + str(start) + '&end=' + str(end) + '&period=' + str(period)
resp = requests.get(query)
if resp.status_code != 200:
raise requests.ApiError('GET /tasks/ {}'.format(resp.status_code))
return pd.DataFrame(resp.json())
# +
class EntropyEstimatorLz:
"""
Kontoyiannis' LZ entropy estimate, 2013 version (centered window). Inverse
of the avg length of the shortest non-redundant substring. If non-redundant
substrings are short, the text is highly entropic. window==None for
expanding window, in which case
len(msg) % 2 == 0
If the end of msg is more relevant, try estimate_entropy(msg[::-1])
"""
@classmethod
def estimate_entropy(cls, *args, **kwargs):
return cls.konto(*args, **kwargs)['h']
@classmethod
def konto(cls, msg, window=None):
"""
:param msg:
:param window:
:return:
"""
out = {'num': 0, 'sum': 0, 'sub_str': []}
if not isinstance(msg, str):
msg = ''.join(map(str, msg))
if window is None:
points = range(1, len(msg) // 2 + 1)
else:
window = min(window, len(msg) // 2)
points = range(window, len(msg) - window + 1)
for i in points:
if window is None:
l, msg_ = cls.match_length(msg, i, i)
out['sum'] += math.log2(i + 1) / l
else:
l, msg_ = cls.match_length(msg, i, window)
out['sum'] += math.log2(window + 1) / l
out['sub_str'].append(msg_)
out['num'] += 1
out['h'] = (out['sum'] / out['num']) / math.log(2)
out['r'] = 1 - out['h'] / math.log2(len(msg)) # redundancy, 0 <= r <= 1
return out
@staticmethod
def match_length(msg, i, n):
"""
Maximum matched length + 1, with overlap.
i >= n & len(msg) >= i + n
:param msg:
:param i:
:param n:
:return:
"""
sub_str = ''
for l in range(n):
msg1 = msg[i:i + l + 1]
for j in range(i - n, i):
msg0 = msg[j:j + l + 1]
if msg1 == msg0:
sub_str = msg1
break # search for higher l.
return len(sub_str) + 1, sub_str # matched length + 1
if __name__ == '__main__':
# Messages produces entropies of 0.97 and 0.84 as highlighted in
# "Advances in Financial Machine Learning" section 18.4
for m in ('11100001', '01100001'):
h = EntropyEstimatorLz.estimate_entropy(m) * math.log(2)
print('message: %s, entropy: %.2f' % (m, h))
# +
def plugIn(msg, w):
# Compute plug-in (ML) entropy rate
pmf = pmf1(msg, w)
out = - sum([pmf[i] * np.log2(pmf[i]) for i in pmf]) / w
return out, pmf
def pmf1(msg, w):
# Compute the prob mass function for a 1D discrete RV
# len(msg)-w occurances
lib = {}
if not isinstance(msg, str): msg = ''.join(map(str, msg))
for i in range(w, len(msg)):
msg_ = msg[i-w: i]
if msg_ not in lib:
lib[msg_] = [i-w]
else:
lib[msg_] = lib[msg_] + [i-w]
pmf = float(len(msg) - w)
pmf = {i: len(lib[i])/pmf for i in lib}
return pmf
# -
def segment(total):
current_seg = []
len_seg = len(total)//1000
for item in total:
if len(current_seg) < len_seg:
current_seg.append(item)
continue
yield current_seg
current_seg = []
df = get_ohlcv_poloniex(pair='USDT_ETH', start=0, end=9999999999, period=900)
df['ret'] = df['weightedAverage'].pct_change()
df = df[df['ret'] != 0]
df = df[df['ret'] != np.inf]
df = df[df['ret'].notnull()]
df['binary'] = df.apply(lambda row: int(row['ret']*(row['ret']-1) > 0), axis = 1)
partitions = segment(df['binary'])
num_segments = len(df['binary'])//1000
print(num_segments)
# +
print("Plug in entropies")
results = pd.DataFrame()
results['plug-in'] = [plugIn(part, 10)[0] for part in partitions]
results['plug-in'].hist(figsize=(10,8))
# -
print("Kontoyiannis’ method entropies using window size of 100")
partitions = segment(df['binary'])
# for part in partitions:
# print(part)
# print("binary: " + str(EntropyEstimatorLz.estimate_entropy(part, window=100)))
results['kontoyiannis'] = [EntropyEstimatorLz.estimate_entropy(part, window=100) for part in partitions]
print(results['kontoyiannis'].corr(results['plug-in']))
plt.figure(figsize=(10, 8))
plt.scatter(results['kontoyiannis'], results['plug-in'])
| Entropy_BTC.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Central Limit Theorem - Part III
#
# You saw how the **Central Limit Theorem** worked for the sample mean in the earlier concept. However, let's consider another example to see a case where the **Central Limit Theorem** doesn't work...
#
# Work through the questions and use the created variables to answer the questions that follow below the notebook.
#
# Run the below cell to get started.
# +
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
np.random.seed(42)
pop_data = np.random.gamma(1,100,3000)
plt.hist(pop_data);
# -
# `1.` In order to create the sampling distribution for the variance of 100 draws of this distribution, follow these steps:
#
# `a.` Use numpy's **random.choice** to simulate 100 draws from the `pop_data` array. <br><br>
# `b.` Compute the variance of these 100 draws. <br><br>
# `c.` Write a loop to simulate this process 10,000 times, and store each variance into an array called **var_size_100**.<br><br>
# `d.` Plot a histogram of your sample variances.<br><br>
# `e.` Use **var_size_100** and **pop_data** to answer the quiz questions below.
samp_100 = np.random.choice(pop_data, 100, replace = True)
samp_100.var()
var_size_100 = []
for _ in range(10000):
samp_100 = np.random.choice(pop_data, 100, replace = True)
var_size_100.append(samp_100.var())
np.array(var_size_100).var()
plt.hist(var_size_100);
pop_data.var()
np.array(var_size_100).mean()
| Practical_statistics/sampling_distribution_&_Central_limit_theorem/Central Limit Theorem - Part III.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="x2FyP0EFw2i6"
# ## Import and Load datasets
# + id="OMtvBYYYqrEg"
import pandas as pd
import numpy as np
# + id="0kzWlcMVqrE7"
df2017 = pd.read_csv('/content/FY2017-4050-County-Level_Data.csv', encoding= 'unicode_escape', )
df2018 = pd.read_csv('/content/FY18_4050_FMRs_rev.csv', encoding= 'unicode_escape')
df2019 = pd.read_csv('/content/FY2019_4050_FMRs_rev2.csv', encoding= 'unicode_escape')
df2020 = pd.read_csv('/content/FY20_4050_FMRs_rev.csv', encoding= 'unicode_escape')
df2021 = pd.read_csv('/content/FY21_4050_FMRs_rev.csv', encoding= 'unicode_escape')
# + [markdown] id="n_UHpIp2wxVE"
# ## Clean datasets
# + id="JfKWWw8YqrE8"
def wrangle0(x0):
x0 = x0.copy()
x0 = x0.loc[x0['metro'] == 1]
x0 = x0[['fips2010', 'metro_code', 'areaname', 'state_alpha','fmr_0']]
x0 = x0.rename(columns = {'fmr_0': 2021})
return x0
# + id="KVviD1NyvR7Z"
df2017_frm0 = wrangle0(df2017)
# + id="T7UxCYJjvStI"
df2018_frm0 = wrangle0(df2018)
# + id="QcUKL3h4vTRy"
df2019_frm0 = wrangle0(df2019)
# + id="LlHHShAMvTzD"
df2020_frm0 = wrangle0(df2020)
# + id="hAA6RdUdvUX7"
df2021_frm0 = wrangle0(df2021)
# + id="T8vQJEQ-sD9I"
def wrangle1(x1):
x1 = x1.copy()
x1 = x1.loc[x1['metro'] == 1]
x1 = x1[['fips2010', 'metro_code', 'areaname', 'state_alpha','fmr_1']]
x1 = x1.rename(columns = {'fmr_1': 2021})
return x1
# + id="baUVgX0Rse3Z"
df2017_fmr1 = wrangle1(df2017)
# + id="jBWI9JNDtqgU"
df2018_frm1 = wrangle1(df2018)
# + id="joMi8bPAsGG_"
df2019_frm1 = wrangle1(df2019)
# + id="k08_RbWfwM3P"
df2020_frm1 = wrangle1(df2020)
# + id="uN4i4a8twMm-"
df2021_frm1 = wrangle1(df2021)
# + [markdown] id="saetNu4Cw961"
# ## Merge fmr0s and fmr1s
# + id="LNtluY4Pw8d4"
df_fmr0 = df2017_frm0.merge(df2018_frm0, on=['fips2010', 'metro_code', 'areaname', 'state_alpha'])
df_fmr0 = df_fmr0.merge(df2019_frm0, on=['fips2010', 'metro_code', 'areaname', 'state_alpha'])
df_fmr0 = df_fmr0.merge(df2020_frm0, on=['fips2010', 'metro_code', 'areaname', 'state_alpha'])
df_fmr0 = df_fmr0.merge(df2021_frm0, on=['fips2010', 'metro_code', 'areaname', 'state_alpha'])
# + colab={"base_uri": "https://localhost:8080/", "height": 223} id="ZHmyNrdIxi-7" outputId="d5e2a2ea-b349-400e-f9fa-017ad77dc819"
print(df_fmr0.shape)
df_fmr0.head()
# + id="m9FOV5Uazpkq"
df_fmr1 = df2017_fmr1.merge(df2018_frm1, on=['fips2010', 'metro_code', 'areaname', 'state_alpha'])
df_fmr1 = df_fmr1.merge(df2019_frm1, on=['fips2010', 'metro_code', 'areaname', 'state_alpha'])
df_fmr1 = df_fmr1.merge(df2020_frm1, on=['fips2010', 'metro_code', 'areaname', 'state_alpha'])
df_fmr1 = df_fmr1.merge(df2021_frm1, on=['fips2010', 'metro_code', 'areaname', 'state_alpha'])
# + colab={"base_uri": "https://localhost:8080/", "height": 223} id="rbhyuY4Oz5H9" outputId="ec193793-8eeb-4266-ce78-3f5f83e94ca4"
print(df_fmr1.shape)
df_fmr1.head()
# + [markdown] id="mYnJvTsD2uAZ"
# ## Clean Merged DataFrames
# - split area name to remove 'MSA', 'HUD Metro FMR Area'
# - explode cities to separate hyphenated cities
# + id="JwzlnIKj1_LF"
def explode_str(df, col='areaname', sep='-'):
s = df[col]
i = np.arange(len(s)).repeat(s.str.count(sep) +1)
return df.iloc[i].assign(**{col: sep.join(s).split(sep)})
# + id="S5yttEZ90lD9"
def new_wrangle(x):
x = x.copy()
x['areaname'] = x['areaname'].str.split(',', 1, expand=True)[0]
x['areaname'] = x['areaname'].str.strip()
x = explode_str(x)
x = x.drop(columns = ['fips2010'])
return x
# + [markdown] id="AiQdd7I59TJ_"
# ### FMR0
# + id="IThLHSqv34jG"
df_fmr0 = new_wrangle(df_fmr0)
# + id="EkDEenez7jVh"
df_fmr0['areaname'] = df_fmr0['areaname'].str.strip()
df_fmr0['state_alpha'] = df_fmr0['state_alpha'].str.strip()
df_fmr0['city, state'] = df_fmr0['areaname'] + ', ' + df_fmr0['state_alpha']
# + colab={"base_uri": "https://localhost:8080/", "height": 223} id="8r8WynXD8pei" outputId="dcaf4e79-8939-4f50-f758-939e77f46bff"
print(df_fmr0.shape)
df_fmr0.head()
# + id="Wtx9WIjt9AG-"
df_fmr0.to_csv('fmr0.csv', index=False)
# + [markdown] id="_XRPHJm19VtX"
# ### FMR1
# + id="A5ba4KAX9MB0"
df_fmr1 = new_wrangle(df_fmr1)
# + id="mpfe3bgk9SIO"
df_fmr1['areaname'] = df_fmr1['areaname'].str.strip()
df_fmr1['state_alpha'] = df_fmr1['state_alpha'].str.strip()
df_fmr1['city, state'] = df_fmr1['areaname'] + ', ' + df_fmr1['state_alpha']
# + colab={"base_uri": "https://localhost:8080/", "height": 223} id="ds9WvPDE9kXF" outputId="dd18a2f1-1e9b-4822-c40d-358d0f33358f"
print(df_fmr1.shape)
df_fmr1.head()
# + id="zTRL6klD9oCW"
df_fmr1.to_csv('fmr1.csv', index=False)
# + [markdown] id="yv-cN_qn9xi5"
# ## Forecasting
# + colab={"base_uri": "https://localhost:8080/"} id="QYjCDQ-G95cZ" outputId="78b10c32-4d2d-4dfe-e6b8-d857c8780e6a"
# !pip install prophet
# + id="SXtDSsOK95X9"
from prophet import Prophet
# + id="hH_X-yf795gt"
fmr0 = pd.read_csv('/content/fmr0.csv')
# + [markdown] id="ADUS5soZ5Ayq"
# ### Setting fmr0
# - select specific rows
# - melt dataframe and get columns necessary to run prophet
# + colab={"base_uri": "https://localhost:8080/", "height": 223} id="kfeM3NRg3FJ7" outputId="ad282bc5-a188-491a-a836-7c9b356ebb70"
print(fmr0.shape)
fmr0.head()
# + id="zB02sTFn3l2k"
fmr0_melt = fmr0.copy()
# + id="eCIjlhz53wtp"
fmr0_melt = fmr0_melt.drop(columns=['metro_code', 'areaname', 'state_alpha'])
# + id="54YXa8xM95Qy"
# to get in the form prophet needs
fmr0_melt = (fmr0_melt.melt(id_vars=['city, state'],
var_name='ds',
value_name='y')
).reset_index(drop=True)
# + colab={"base_uri": "https://localhost:8080/", "height": 206} id="AXw2TGy195K4" outputId="a85e05a5-67cf-4138-ae5f-85564d629648"
fmr0_melt.head()
# + id="aYgseWng95FL"
# create city list
cities_list = list(fmr0_melt['city, state'].unique())
# + colab={"base_uri": "https://localhost:8080/"} id="66DTdNCm95A7" outputId="26e69359-ac89-4210-b394-bd56d57f503b"
len(cities_list)
# + id="HE35JqOk947W"
def rnd_series(city):
subset = fmr0_melt[fmr0_melt['city, state']==city]
dates =(pd.DataFrame({'ds':pd.to_datetime(fmr0_melt['ds'])}))
return subset
# + id="L9NClx8H9420"
series = [rnd_series(city) for city in cities_list]
# + colab={"base_uri": "https://localhost:8080/", "height": 676} id="Cpq2X5E194rR" outputId="d3d7605e-8f2e-4d48-832d-04e4d4494b58"
series[0]
# + [markdown] id="wjFVpAgiM4HA"
# ### Running Prophet on df
# + id="OMzwXEri49dC"
def run_prophet(series):
model = Prophet()
model.fit(series)
forecast = model.make_future_dataframe(periods=10, freq='Y')
forecast = model.predict(forecast)
forecast = forecast[['ds', 'yhat', 'yhat_lower', 'yhat_upper']]
forecast['city, state'] = series['city, state'].iloc[0]
forecast = forecast[['city, state','ds', 'yhat', 'yhat_lower', 'yhat_upper']]
forecast[['yhat', 'yhat_lower', 'yhat_upper']] = forecast[['yhat', 'yhat_lower', 'yhat_upper']].astype(float)
forecast[['yhat', 'yhat_lower', 'yhat_upper']] = forecast[['yhat', 'yhat_lower', 'yhat_upper']].apply(np.ceil)
return forecast
# + colab={"base_uri": "https://localhost:8080/", "height": 259} id="eLufPNi25h7E" outputId="e5aaef78-6bfd-44c5-d113-830ad609bd23"
f = run_prophet(series[0])
f.head()
# + colab={"base_uri": "https://localhost:8080/"} id="g-us4l3C5psN" outputId="87ce125e-106c-4b5f-eebd-617e81030f43"
for i in range(len(series)):
f = run_prophet(series[i])
f.to_csv('fmr0_predictions.csv', mode='a', index='False')
# + id="9yedDPRS5ufT"
rental_prediction = pd.read_csv('fmr0_predictions.csv')
# + id="A5kFXcaF5x8N"
rental_prediction = rental_prediction.loc[rental_prediction['city, state'] != 'city, state']
# + id="dlOOxbLl5xyb"
rental_prediction.to_csv('fmr0_predictions.csv', index=False)
# + id="SEO6SRda5xuB"
rent_long = rental_prediction[['city, state', 'ds', 'yhat']]
# + colab={"base_uri": "https://localhost:8080/"} id="a_yaqQJ55xnD" outputId="671583f5-bd3f-450b-cfa9-2414a5393bb5"
rent_long.dtypes
# + colab={"base_uri": "https://localhost:8080/"} id="pLbMMi7b5xh_" outputId="7753296d-7e40-4dab-a74c-0d9cd80df042"
rent_long['yhat'] = pd.to_numeric(rent_long['yhat'],errors='coerce')
# + colab={"base_uri": "https://localhost:8080/", "height": 206} id="CYTQu33F5xXN" outputId="27890e39-2c61-4aa1-8182-607cc7df910c"
rent_long.head()
# + id="EhAvT1opJFNn"
rent_long = rent_long.pivot_table(index = 'city, state',
columns = 'ds',
values = 'yhat')
# + id="_J-tHE8sJEzn"
rent_long = rent_long.iloc[3:]
# + colab={"base_uri": "https://localhost:8080/", "height": 455} id="1Ni-RkjiJFBd" outputId="ebbe830f-0ecc-4fc2-ac22-37f3ea778a21"
rent_long
# + id="J2mhVdr3JErn"
rent_long.to_csv('fmr0_long.csv', index=False )
# + [markdown] id="oNJLk3FINE-e"
# ## Forecasting 2
# + id="N3ECI7rVNSMx"
fmr1 = pd.read_csv('/content/fmr1.csv')
# + [markdown] id="3pFR67A7NSMz"
# ### Setting fmr1
# - select specific rows
# - melt dataframe and get columns necessary to run prophet
# + colab={"base_uri": "https://localhost:8080/", "height": 223} id="-jltLYyyNSMz" outputId="e53efbdf-13b1-4dff-990d-014248176577"
print(fmr1.shape)
fmr1.head()
# + id="7N9uA4_cNSM1"
fmr1_melt = fmr1.copy()
# + id="_K4QOn7HNSM1"
fmr1_melt = fmr1_melt.drop(columns=['metro_code', 'areaname', 'state_alpha'])
# + id="twaIvV35NSM2"
# to get in the form prophet needs
fmr1_melt = (fmr1_melt.melt(id_vars=['city, state'],
var_name='ds',
value_name='y')
).reset_index(drop=True)
# + colab={"base_uri": "https://localhost:8080/", "height": 206} id="Szupeo8MNSM3" outputId="a1b59859-68dc-40ba-cf5b-abdeda621e29"
fmr1_melt.head()
# + id="wgT5pt5rNSM6"
# create city list
cities_list = list(fmr1_melt['city, state'].unique())
# + colab={"base_uri": "https://localhost:8080/"} id="VkCrooBBNSM9" outputId="1e28f5c4-b378-4e74-8272-fa97e312aeef"
len(cities_list)
# + id="LON-eiaKNSM-"
def rnd_series(city):
subset = fmr1_melt[fmr1_melt['city, state']==city]
dates =(pd.DataFrame({'ds':pd.to_datetime(fmr1_melt['ds'])}))
return subset
# + id="OzNPXrs0NSM_"
series = [rnd_series(city) for city in cities_list]
# + colab={"base_uri": "https://localhost:8080/", "height": 676} id="XJyZ-1ekNSNC" outputId="797b166c-85fc-4508-c4db-955d55a29a00"
series[0]
# + [markdown] id="bLA4ThmFNSNE"
# ### Running Prophet on df
# + id="SxakVk0iNSNE"
def run_prophet(series):
model = Prophet()
model.fit(series)
forecast = model.make_future_dataframe(periods=10, freq='Y')
forecast = model.predict(forecast)
forecast = forecast[['ds', 'yhat', 'yhat_lower', 'yhat_upper']]
forecast['city, state'] = series['city, state'].iloc[0]
forecast = forecast[['city, state','ds', 'yhat', 'yhat_lower', 'yhat_upper']]
forecast[['yhat', 'yhat_lower', 'yhat_upper']] = forecast[['yhat', 'yhat_lower', 'yhat_upper']].astype(float)
forecast[['yhat', 'yhat_lower', 'yhat_upper']] = forecast[['yhat', 'yhat_lower', 'yhat_upper']].apply(np.ceil)
return forecast
# + colab={"base_uri": "https://localhost:8080/", "height": 259} id="vDFp0wGVNSNF" outputId="76c4091d-6672-4b10-d7de-7fb6bed222d2"
f = run_prophet(series[0])
f.head()
# + colab={"base_uri": "https://localhost:8080/"} id="MsEFRaw7NSNG" outputId="b4cfe39d-2ae8-4b0e-bf9a-791ee0f831e8"
for i in range(len(series)):
f = run_prophet(series[i])
f.to_csv('fmr1_predictions.csv', mode='a', index='False')
# + id="PmRqZivbNSNH"
rental_prediction1 = pd.read_csv('fmr1_predictions.csv')
# + id="MjI19PCoNSNI"
rental_prediction1 = rental_prediction1.loc[rental_prediction1['city, state'] != 'city, state']
# + id="MB53RuBxNSNL"
rental_prediction1.to_csv('fmr1_predictions.csv', index=False)
# + id="WusYP7ctNSNM"
rent_long1 = rental_prediction1[['city, state', 'ds', 'yhat']]
# + colab={"base_uri": "https://localhost:8080/"} id="ay7e1UttNSNM" outputId="1f5a3fd4-9f93-448b-ce99-a427bac12bc7"
rent_long1.dtypes
# + colab={"base_uri": "https://localhost:8080/"} id="FnizMZIdNSNO" outputId="54e5fe41-6673-41f2-f126-460d8d097e28"
rent_long1['yhat'] = pd.to_numeric(rent_long1['yhat'],errors='coerce')
# + colab={"base_uri": "https://localhost:8080/", "height": 206} id="1QiHMTmLNSNO" outputId="634fd1a1-28fc-427c-d865-1bdfa43d794c"
rent_long1.head()
# + id="vhUJ2jaoNSNP"
rent_long1 = rent_long1.pivot_table(index = 'city, state',
columns = 'ds',
values = 'yhat')
# + id="SbqWwNLSNSNQ"
rent_long1 = rent_long1.iloc[3:]
# + colab={"base_uri": "https://localhost:8080/", "height": 455} id="BWWiE2DENSNQ" outputId="b22cc675-2ddb-496d-c313-21cf99096b56"
rent_long1
# + id="PwZ6aVbhNSNT"
rent_long1.to_csv('fmr1_long.csv', index=False )
# + id="G0ge7L2fNDdP"
| notebooks/model/rental/rents.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/"} id="l9-2S1Wq9Mb5" outputId="01b67ca7-8555-4cf3-81dc-54d5bcba47c2"
import pandas as pd
from google.colab import drive
drive.mount('/content/gdrive', force_remount=True)
# + id="M67C6Ixe9eCa"
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import NullFormatter
import pandas as pd
import matplotlib.ticker as ticker
from sklearn.neighbors import KNeighborsClassifier
from sklearn import metrics
# %matplotlib inline
import pickle
# + id="jlQtQs7P9oiQ"
testData = pd.read_csv('/content/gdrive/MyDrive/ML_Project/Dataset/test.csv')
trainData = pd.read_csv('/content/gdrive/MyDrive/ML_Project/Dataset/train.csv')
validData = pd.read_csv('/content/gdrive/MyDrive/ML_Project/Dataset/val.csv')
# + [markdown] id="zlSfCUZ2-jM_"
# # Six Buckets
# + id="_ifKUiVO-yv8"
train_data_target_6k=pd.read_csv('/content/gdrive/MyDrive/ML_Project/Dataset/train_6_buckets.csv')
test_data_target_6k=pd.read_csv('/content/gdrive/MyDrive/ML_Project/Dataset/test_6_buckets.csv')
val_data_target_6k=pd.read_csv('/content/gdrive/MyDrive/ML_Project/Dataset/valid_6_buckets.csv')
testData['data_IMDBscore']=test_data_target_6k['data_IMDBscore']
trainData['data_IMDBscore']=train_data_target_6k['data_IMDBscore']
validData ['data_IMDBscore']=val_data_target_6k['data_IMDBscore']
train_X = trainData.drop(columns=['data_IMDBscore'])
train_Y = trainData['data_IMDBscore']
test_X = testData.drop(columns=['data_IMDBscore'])
test_Y = testData['data_IMDBscore']
val_X=validData.drop(columns=['data_IMDBscore'])
val_Y=validData['data_IMDBscore']
# + colab={"base_uri": "https://localhost:8080/", "height": 421} id="N9AGN0cd-Hb5" outputId="c9178af8-1666-41c0-da0c-1b1202f91411"
error_rate = []
for i in range(1,40):
knn = KNeighborsClassifier(n_neighbors=i)
knn.fit(train_X,train_Y)
pred_i = knn.predict(test_X)
error_rate.append(np.mean(pred_i != test_Y))
filename = '/content/gdrive/MyDrive/SavedModels/knn_6_'+str(i)
filename=filename+".sav"
pickle.dump(knn, open(filename, 'wb'))
plt.figure(figsize=(10,6))
plt.plot(range(1,40),error_rate,color='blue', linestyle='dashed',
marker='o',markerfacecolor='red', markersize=10)
plt.title('Error Rate vs. K Value')
plt.xlabel('K')
plt.ylabel('Error Rate')
print("Minimum error:-",min(error_rate),"at K =",error_rate.index(min(error_rate)))
# + colab={"base_uri": "https://localhost:8080/"} id="TYqPOBKpPQNS" outputId="26ebb905-2d02-4995-e314-a6601327179b"
from sklearn.metrics import f1_score, precision_score, recall_score, accuracy_score
neigh = KNeighborsClassifier(n_neighbors = 18).fit(train_X,train_Y)
trainpred = neigh.predict(train_X)
valpred = neigh.predict(val_X)
testpred = neigh.predict(test_X)
train_f1_score = f1_score(train_Y, trainpred, average='weighted')
train_precision_score = precision_score(train_Y, trainpred, average='weighted')
train_recall_score = recall_score(train_Y, trainpred, average='weighted')
train_accuracy_score = accuracy_score(train_Y, trainpred, normalize=True)
print("train_f1_score "+str(train_f1_score) )
print("train_precision_score "+str(train_precision_score))
print("train_recall_score "+str(train_recall_score))
print("train_accuracy_score "+str(train_accuracy_score))
val_f1_score = f1_score(val_Y, valpred, average='weighted')
val_precision_score = precision_score(val_Y, valpred, average='weighted')
val_recall_score = recall_score(val_Y, valpred, average='weighted')
val_accuracy_score = accuracy_score(val_Y, valpred, normalize=True)
print("val_f1_score "+str(val_f1_score) )
print("val_precision_score "+str(val_precision_score))
print("val_recall_score "+str(val_recall_score))
print("val_accuracy_score "+str(val_accuracy_score))
test_f1_score = f1_score(test_Y, testpred, average='weighted')
test_precision_score = precision_score(test_Y, testpred, average='weighted')
test_recall_score = recall_score(test_Y, testpred, average='weighted')
test_accuracy_score = accuracy_score(test_Y, testpred, normalize=True)
print("test_f1_score "+str(test_f1_score) )
print("test_precision_score "+str(test_precision_score))
print("test_recall_score "+str(test_recall_score))
print("test_accuracy_score "+str(test_accuracy_score))
# + [markdown] id="qo35Yyji_qHL"
# # Eleven Buckets
# + id="rLd4AjG1_wqO"
train_data_target_11k=pd.read_csv('/content/gdrive/MyDrive/ML_Project/Dataset/train_11_buckets.csv')
test_data_target_11k=pd.read_csv('/content/gdrive/MyDrive/ML_Project/Dataset/test_11_buckets.csv')
val_data_target_11k=pd.read_csv('/content/gdrive/MyDrive/ML_Project/Dataset/valid_11_buckets.csv')
testData['data_IMDBscore']=test_data_target_11k['data_IMDBscore']
trainData['data_IMDBscore']=train_data_target_11k['data_IMDBscore']
validData ['data_IMDBscore']=val_data_target_11k['data_IMDBscore']
train_X = trainData.drop(columns=['data_IMDBscore'])
train_Y = trainData['data_IMDBscore']
test_X = testData.drop(columns=['data_IMDBscore'])
test_Y = testData['data_IMDBscore']
val_X=validData.drop(columns=['data_IMDBscore'])
val_Y=validData['data_IMDBscore']
# + colab={"base_uri": "https://localhost:8080/", "height": 421} id="6tUwo_-I_wqP" outputId="f3c95ab4-1859-4279-d3e1-7f5590e27e38"
error_rate = []
for i in range(1,40):
knn = KNeighborsClassifier(n_neighbors=i)
knn.fit(train_X,train_Y)
pred_i = knn.predict(test_X)
error_rate.append(np.mean(pred_i != test_Y))
filename = '/content/gdrive/MyDrive/SavedModels/knn_11_'+str(i)
filename=filename+".sav"
pickle.dump(knn, open(filename, 'wb'))
plt.figure(figsize=(10,6))
plt.plot(range(1,40),error_rate,color='blue', linestyle='dashed',
marker='o',markerfacecolor='red', markersize=10)
plt.title('Error Rate vs. K Value')
plt.xlabel('K')
plt.ylabel('Error Rate')
print("Minimum error:-",min(error_rate),"at K =",error_rate.index(min(error_rate)))
# + colab={"base_uri": "https://localhost:8080/"} id="0Xbnb5QAF-lT" outputId="f02cfa25-791e-4cb3-8439-b55fb5bb4f9b"
from sklearn.metrics import f1_score, precision_score, recall_score, accuracy_score
neigh = KNeighborsClassifier(n_neighbors = 38).fit(train_X,train_Y)
trainpred = neigh.predict(train_X)
valpred = neigh.predict(val_X)
testpred = neigh.predict(test_X)
train_f1_score = f1_score(train_Y, trainpred, average='weighted')
train_precision_score = precision_score(train_Y, trainpred, average='weighted')
train_recall_score = recall_score(train_Y, trainpred, average='weighted')
train_accuracy_score = accuracy_score(train_Y, trainpred, normalize=True)
print("train_f1_score "+str(train_f1_score) )
print("train_precision_score "+str(train_precision_score))
print("train_recall_score "+str(train_recall_score))
print("train_accuracy_score "+str(train_accuracy_score))
val_f1_score = f1_score(val_Y, valpred, average='weighted')
val_precision_score = precision_score(val_Y, valpred, average='weighted')
val_recall_score = recall_score(val_Y, valpred, average='weighted')
val_accuracy_score = accuracy_score(val_Y, valpred, normalize=True)
print("val_f1_score "+str(val_f1_score) )
print("val_precision_score "+str(val_precision_score))
print("val_recall_score "+str(val_recall_score))
print("val_accuracy_score "+str(val_accuracy_score))
test_f1_score = f1_score(test_Y, testpred, average='weighted')
test_precision_score = precision_score(test_Y, testpred, average='weighted')
test_recall_score = recall_score(test_Y, testpred, average='weighted')
test_accuracy_score = accuracy_score(test_Y, testpred, normalize=True)
print("test_f1_score "+str(test_f1_score) )
print("test_precision_score "+str(test_precision_score))
print("test_recall_score "+str(test_recall_score))
print("test_accuracy_score "+str(test_accuracy_score))
# + [markdown] id="rP_G0Fy0_9VL"
# # Twenty One Bucket
# + id="XylmpQsm_8x6"
train_data_target_21k=pd.read_csv('/content/gdrive/MyDrive/ML_Project/Dataset/train_21_buckets.csv')
test_data_target_21k=pd.read_csv('/content/gdrive/MyDrive/ML_Project/Dataset/test_21_buckets.csv')
val_data_target_21k=pd.read_csv('/content/gdrive/MyDrive/ML_Project/Dataset/valid_21_buckets.csv')
testData['data_IMDBscore']=test_data_target_21k['data_IMDBscore']
trainData['data_IMDBscore']=train_data_target_21k['data_IMDBscore']
validData ['data_IMDBscore']=val_data_target_21k['data_IMDBscore']
train_X = trainData.drop(columns=['data_IMDBscore'])
train_Y = trainData['data_IMDBscore']*2
test_X = testData.drop(columns=['data_IMDBscore'])
test_Y = testData['data_IMDBscore']*2
val_X=validData.drop(columns=['data_IMDBscore'])
val_Y=validData['data_IMDBscore']*2
# + colab={"base_uri": "https://localhost:8080/", "height": 421} id="zUW_O6m8ADRQ" outputId="db8013b4-1afb-4fcd-dc27-2c5328d18544"
error_rate = []
for i in range(1,40):
knn = KNeighborsClassifier(n_neighbors=i)
knn.fit(train_X,train_Y)
pred_i = knn.predict(test_X)
error_rate.append(np.mean(pred_i != test_Y))
filename = '/content/gdrive/MyDrive/SavedModels/knn_21_'+str(i)
filename=filename+".sav"
pickle.dump(knn, open(filename, 'wb'))
plt.figure(figsize=(10,6))
plt.plot(range(1,40),error_rate,color='blue', linestyle='dashed',
marker='o',markerfacecolor='red', markersize=10)
plt.title('Error Rate vs. K Value')
plt.xlabel('K')
plt.ylabel('Error Rate')
print("Minimum error:-",min(error_rate),"at K =",error_rate.index(min(error_rate)))
# + colab={"base_uri": "https://localhost:8080/"} id="pnn3Wz69FxG9" outputId="005d4dc7-8705-4d2a-bb9f-a7658afdc053"
from sklearn.metrics import f1_score, precision_score, recall_score, accuracy_score
neigh = KNeighborsClassifier(n_neighbors = 29).fit(train_X,train_Y)
trainpred = neigh.predict(train_X)
valpred = neigh.predict(val_X)
testpred = neigh.predict(test_X)
train_f1_score = f1_score(train_Y, trainpred, average='weighted')
train_precision_score = precision_score(train_Y, trainpred, average='weighted')
train_recall_score = recall_score(train_Y, trainpred, average='weighted')
train_accuracy_score = accuracy_score(train_Y, trainpred, normalize=True)
print("train_f1_score "+str(train_f1_score) )
print("train_precision_score "+str(train_precision_score))
print("train_recall_score "+str(train_recall_score))
print("train_accuracy_score "+str(train_accuracy_score))
val_f1_score = f1_score(val_Y, valpred, average='weighted')
val_precision_score = precision_score(val_Y, valpred, average='weighted')
val_recall_score = recall_score(val_Y, valpred, average='weighted')
val_accuracy_score = accuracy_score(val_Y, valpred, normalize=True)
print("val_f1_score "+str(val_f1_score) )
print("val_precision_score "+str(val_precision_score))
print("val_recall_score "+str(val_recall_score))
print("val_accuracy_score "+str(val_accuracy_score))
test_f1_score = f1_score(test_Y, testpred, average='weighted')
test_precision_score = precision_score(test_Y, testpred, average='weighted')
test_recall_score = recall_score(test_Y, testpred, average='weighted')
test_accuracy_score = accuracy_score(test_Y, testpred, normalize=True)
print("test_f1_score "+str(test_f1_score) )
print("test_precision_score "+str(test_precision_score))
print("test_recall_score "+str(test_recall_score))
print("test_accuracy_score "+str(test_accuracy_score))
| Classification/KNN.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: tnc_velma
# language: python
# name: tnc_velma
# ---
# # Spatial analysis of simulation results
import __init__
import scripts.config as config
import numpy as np
import pandas as pd
import ipywidgets as widgets
from natsort import natsorted
import os
from pathlib import Path
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib import colors
# +
# Plotting parameters
XSMALL_SIZE = 6
SMALL_SIZE = 7
MEDIUM_SIZE = 9
BIGGER_SIZE = 12
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=SMALL_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize
plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the figure title
plt.rcParams['figure.dpi'] = 120
# +
# Config
gcms = ['canesm2_RCP85', 'ccsm4_RCP85', 'giss_e2_h_RCP85', 'noresm1_m_RCP85']
sim_start = pd.to_datetime('01-01-2020')
sim_end = pd.to_datetime('12-31-2099')
# +
# Import results files
scenarios = ['ind35yr']
dailies = []
annuals = []
# Import daily and annual results
# Results in nested lists ([x][y], for x management scenarios and y GCMs)
for scenario in scenarios:
dailies_scenario = []
annuals_scenario = []
scenario_dir = config.velma_data.parents[1] / 'results' / scenario
dirs = os.listdir(scenario_dir)
for gcm in gcms:
results_dir = scenario_dir / 'ellsworth_{}_{}_{}_{}'.format(scenario,
sim_start.year % 100,
sim_end.year % 100,
gcm)
daily_results = pd.read_csv(results_dir / 'DailyResults.csv')
# Format datetime
jday_pad = daily_results['Day'].apply(lambda x: str(x).zfill(3))
str_year = daily_results['Year'].apply(lambda x: str(x))
rng = pd.to_datetime((str_year + jday_pad), format='%Y%j')
daily_results.index = rng
dailies_scenario.append(daily_results)
dailies.append(dailies_scenario)
# -
# ## Biomass carbon
# +
# Trim the image to only the delineated watershed simulated through VELMA
# Can export a delineated DEM from JPDEM
del_dem = np.loadtxt(config.dem_velma.parents[0] / 'delineated_dem.asc', skiprows=6)
watershed_mask = (del_dem==-9999)
biomass_c_dir = results_dir / 'spatial_writer' / 'biomass_c'
biomass_c_asc = []
for file in os.listdir(biomass_c_dir):
asc = np.loadtxt(biomass_c_dir / file, skiprows=6)
asc[watershed_mask] = np.nan
biomass_c_asc.append(asc)
sim_years = []
for file in os.listdir(biomass_c_dir):
year = Path(file).stem.split('_')[-2]
sim_years.append(year)
years = [int(x) for x in sim_years]
# -
results_dir
# +
binary_cmap = cm.get_cmap('binary', 2)
gray = np.array([0.31, 0.31, 0.31, 1])
binary_cmap_colors = binary_cmap(np.linspace(0, 1, 2))
binary_cmap_colors[:1, :] = gray
binary_cmap_gray = colors.ListedColormap(binary_cmap_colors)
old_cmap = cm.get_cmap('YlGn', 256)
newcolors = old_cmap(np.linspace(0, 1, 256))
pink = np.array([248/256, 24/256, 148/256, 1])
newcolors[:2, :] = pink
new_cmap = colors.ListedColormap(newcolors)
@widgets.interact(year=(2020, 2099))
def f(year=min(sim_years)):
ind = sim_years.index(str(year))
fig, ax = plt.subplots(figsize=(5, 6))
im = ax.imshow(biomass_c_asc[ind], cmap=new_cmap)
mask = np.isnan(biomass_c_asc[ind]) * 1.0
mask[mask == 0] = np.nan
ax.imshow(mask, cmap=binary_cmap_gray)
cbar = fig.colorbar(im, ax=ax)
cbar.ax.set_ylabel(r'$gC/m^2$', rotation=270)
# -
# ## Filter maps
# +
# Trim the image to only the delineated watershed simulated through VELMA
# Can export a delineated DEM from JPDEM
del_dem = np.loadtxt(config.dem_velma.parents[0] / 'delineated_dem.asc', skiprows=6)
watershed_mask = (del_dem==-9999)
filter_maps_dir = config.velma_data / 'landcover' / 'filter_maps'
asciis = []
name = 'random_35yr_clearcut_10pct'
for file in natsorted(os.listdir(filter_maps_dir)):
if name in file:
z = np.loadtxt(filter_maps_dir / file, skiprows=6)
z[watershed_mask] = np.nan
asciis.append(z)
# +
binary_cmap = cm.get_cmap('binary', 2)
gray = np.array([0.31, 0.31, 0.31, 1])
binary_cmap_colors = binary_cmap(np.linspace(0, 1, 2))
binary_cmap_colors[:1, :] = gray
binary_cmap_gray = colors.ListedColormap(binary_cmap_colors)
years = [x+1 for x in range(0, len(asciis))]
@widgets.interact(year=(np.min(years), np.max(years)))
def f(year=min(years)):
ind = years.index(year)
fig, ax = plt.subplots(figsize=(5, 6))
im = ax.imshow(asciis[ind], cmap='Reds')
mask = np.isnan(asciis[ind]) * 1.0
mask[mask == 0] = np.nan
ax.imshow(mask, cmap=binary_cmap_gray)
cbar = fig.colorbar(im, ax=ax)
cbar.ax.set_ylabel('Harvested/Not harvested', rotation=270)
| notebooks/results_spatial.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
def load_and_process(url_or_path_to_csv_file):
df1 = (
pd.read_csv(url_or_path_to_csv_file)
.dropna()
.drop(['VotingAgeCitizen','Office','Drive','Carpool','Transit','Walk','Service','Construction'], axis ="columns")
)
df2 = (
df1
.loc[(df1['State'] == 'Florida') | (df1['State'] == 'Texas')|(df1['State'] == 'California')|(df1['State'] == 'New York')]
.reset_index()
.drop(['index'], axis=1)
)
return df2
load_and_process("acs2017_county_data.csv")
| data/processed/.ipynb_checkpoints/MethodChaining-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
#Necessary
from fastai.fastai.text import *
from sklearn.metrics import confusion_matrix, f1_score, classification_report
import html
import csv
# +
#Necessary
BOS = "xbos " #beginning-of-sentence tag
FLD = "xfld " #data field tag
TURN = " eot "
CLASSES = ["angry", "sad", "happy", "others"]
# +
#Necessary
PATH=Path("starterkitdata/")
CLAS_PATH = Path("emo_clas/")
CLAS_PATH.mkdir(exist_ok=True)
LM_PATH = Path("emo_lm/")
LM_PATH.mkdir(exist_ok=True)
# +
#Necessary
def get_texts(path):
texts, labels, data = [],[],[]
with open(path, "r", encoding="utf-8") as dataset:
reader = csv.reader(dataset, delimiter="\t")
next(reader, None) #skip header
data = list(reader)
dataset.close()
for turn in data:
texts.append(BOS+turn[1]+TURN+turn[2]+TURN+turn[3])
if len(turn) == 5: #skip non existant label when reading in testdata
labels.append(CLASSES.index(turn[4]))
return np.array(texts), np.array(labels)
ratio = 10 #ratio of train to validation
trn_texts, trn_labels = get_texts(PATH/"train.txt")
tst_texts, _ = get_texts(PATH/"devwithoutlabels.txt")
val_texts, val_labels = trn_texts[:int(len(trn_texts)/ratio)], trn_labels[:int(len(trn_labels)/ratio)]
trn_texts, trn_labels = trn_texts[int(len(trn_texts)/ratio):], trn_labels[int(len(trn_labels)/ratio):]
# -
len(trn_texts), len(val_texts), len(tst_texts)
#Necessary
col_names = ["labels", "text"]
np.random.seed(42)
trn_idx = np.random.permutation(len(trn_texts))
val_idx = np.random.permutation(len(val_texts))
# +
trn_texts = trn_texts[trn_idx]
val_texts = val_texts[val_idx]
trn_labels = trn_labels[trn_idx]
val_labels = val_labels[val_idx]
# -
#Necessary
df_trn = pd.DataFrame({"text":trn_texts, "labels":trn_labels}, columns=col_names)
df_val = pd.DataFrame({"text":val_texts, "labels":val_labels}, columns=col_names)
df_tst = pd.DataFrame({"text":tst_texts, "labels":[0]*len(tst_texts)}, columns=col_names)
# Get Weights for class imbalance
#Necessary
trn_labelcounts = df_trn.groupby(["labels"]).size()
val_labelcounts = df_val.groupby(["labels"]).size()
trn_label_sum = len(df_trn["labels"])
val_label_sum = len(df_val["labels"])
trn_weights = [count/trn_label_sum for count in trn_labelcounts]
val_weights = [count/val_label_sum for count in val_labelcounts]
trn_weights, val_weights
#Necessary
rtd_val_weights = [max(val_weights)/value for value in val_weights]
rtd_val_weights
# +
df_trn.to_csv(CLAS_PATH/"train.csv", header=False, index=False)
df_val.to_csv(CLAS_PATH/"test.csv", header=False, index=False)
df_tst.to_csv(CLAS_PATH/"tst.csv", header=False, index=False)
(CLAS_PATH/"classes.txt").open("w").writelines(f"{o}\n" for o in CLASSES)
# -
df_trn.head()
(CLAS_PATH/"classes.txt").open().readlines()
# +
df_trn = pd.DataFrame({"text":trn_texts, "labels":[0]*len(trn_texts)}, columns=col_names)
df_val = pd.DataFrame({"text":val_texts, "labels":[0]*len(val_texts)}, columns=col_names)
#df_tst bereits mit 0 Label initialisiert
df_trn.to_csv(LM_PATH/"train.csv", header=False, index=False)
df_val.to_csv(LM_PATH/"test.csv", header=False, index=False)
df_tst.to_csv(LM_PATH/"tst.csv", header=False, index=False)
# -
# Tokenization
# +
re1 = re.compile(r' +')
def fixup(x):
x = x.replace('#39;', "'").replace('amp;', '&').replace('#146;', "'").replace(
'nbsp;', ' ').replace('#36;', '$').replace('\\n', "\n").replace('quot;', "'").replace(
'<br />', "\n").replace('\\"', '"').replace('<unk>','u_n').replace(' @.@ ','.').replace(
' @-@ ','-').replace('\\', ' \\ ')
return re1.sub(' ', html.unescape(x))
# -
def get_texts(df, n_lbls=1):
labels = df.iloc[:,range(n_lbls)].values.astype(np.int64)
texts = df[n_lbls].astype(str)
for i in range(n_lbls+1, len(df.columns)): texts += df[i].astype(str)
texts = list(texts.apply(fixup).values)
tok = Tokenizer().proc_all_mp(partition_by_cores(texts))
return tok, list(labels)
def get_all(df, n_lbls):
tok, labels = [], []
for i, r in enumerate(df):
print(i)
tok_, labels_ = get_texts(r, n_lbls)
tok += tok_;
labels += labels_
return tok, labels
chunksize = 5000
df_trn = pd.read_csv(LM_PATH/"train.csv", header=None, chunksize=chunksize)
df_val = pd.read_csv(LM_PATH/"test.csv", header=None, chunksize=chunksize)
df_tst = pd.read_csv(LM_PATH/"tst.csv", header=None, chunksize=chunksize)
tok_trn, trn_labels = get_all(df_trn, 1)
tok_val, val_labels = get_all(df_val, 1)
tok_tst, tst_labels = get_all(df_tst, 1)
(LM_PATH/"tmp").mkdir(exist_ok=True)
np.save(LM_PATH/"tmp"/"tok_trn.npy",tok_trn)
np.save(LM_PATH/"tmp"/"tok_val.npy",tok_val)
np.save(LM_PATH/"tmp"/"tok_tst.npy",tok_tst)
tok_trn = np.load(LM_PATH/"tmp"/"tok_trn.npy")
tok_val = np.load(LM_PATH/"tmp"/"tok_val.npy")
tok_tst = np.load(LM_PATH/"tmp"/"tok_tst.npy")
freq = Counter(p for o in tok_trn for p in o) #o = review (list of tokenized sentences), p = sentence
freq.most_common(25)
len(freq)
max_vocab = 5000
min_freq = 5
itos = [o for o,c in freq.most_common(max_vocab) if c>min_freq] #Sort out all words that apper less than max_vocab with bigger freq than min_freq
itos.insert(0, "_unk_") #words that did not appear before or not frequent enough
itos.insert(1, "_pad_")
stoi = collections.defaultdict(lambda:0, {v:k for k,v in enumerate(itos)}) #dictionary with index numbers sorted by freq
len(itos)
trn_lm = np.array([[stoi[o] for o in p] for p in tok_trn])
val_lm = np.array([[stoi[o] for o in p] for p in tok_val])
tst_lm = np.array([[stoi[o] for o in p] for p in tok_tst])
np.save(LM_PATH/"tmp"/"trn_ids.npy", trn_lm)
np.save(LM_PATH/"tmp"/"val_ids.npy", val_lm)
np.save(LM_PATH/"tmp"/"tst_ids.npy", tst_lm)
pickle.dump(itos, open(LM_PATH/"tmp"/"itos.pkl","wb"))
#necessary
trn_lm = np.load(LM_PATH/"tmp"/"trn_ids.npy")
val_lm = np.load(LM_PATH/"tmp"/"val_ids.npy")
tst_lm = np.load(LM_PATH/"tmp"/"tst_ids.npy")
itos = pickle.load(open(LM_PATH/"tmp"/"itos.pkl","rb"))
#necessary
vs = len(itos) #vocabsize
vs, len(trn_lm), len(val_lm), len(tst_lm)
# Wikitext103
#necessary
em_sz,nh,nl = 400,1150,3 #embedding, hidden_size, hidden_layer - values of original wikitext model
PRE_PATH = PATH/"models"/"wt103basedsem140"
PRE_LM_PATH = PRE_PATH/"wt103_based.h5"
wgts = torch.load(PRE_LM_PATH, map_location=lambda storage, loc: storage) #load wikitext weights
enc_wgts = to_np(wgts["0.encoder.weight"])
row_m = enc_wgts.mean(0) #mean used for unknown tokens
# Map Vocabularies on each other
itos2 = pickle.load((PRE_PATH/"itos_sem140.pkl").open("rb"))
stoi2 = collections.defaultdict(lambda:-1, {v:k for k,v in enumerate(itos2)})
len(stoi2)
new_w = np.zeros((vs, em_sz), dtype=np.float32) #0-matrix vocab-size*embedding-size
for i,w in enumerate(itos): #Go through vocab of turn dataset
r = stoi2[w]
new_w[i] = enc_wgts[r] if r>=0 else row_m #if word exists copy weight, otherwise average weight
wgts["0.encoder.weight"] = T(new_w)
wgts["0.encoder_with_dropout.embed.weight"] = T(np.copy(new_w))
wgts["1.decoder.weight"] = T(np.copy(new_w))
# ## Language Model
wd = 1e-8
bptt = 70
bs = 150
opt_fn = partial(optim.Adam, betas=(0.8, 0.99))
trn_dl = LanguageModelLoader(np.concatenate(trn_lm), bs, bptt)
val_dl = LanguageModelLoader(np.concatenate(val_lm), bs, bptt)
tst_dl = LanguageModelLoader(np.concatenate(tst_lm), bs, bptt)
md = LanguageModelData(PATH, 1, vs, trn_dl, val_dl, tst_dl, bs=bs, bptt=bptt)
drops = np.array([0.25, 0.1, 0.2, 0.02, 0.15])*1.6 #1.8 smaller dataset = higher multiplicator
md.trn_dl.data = md.trn_dl.data.long()
md.val_dl.data = md.val_dl.data.long()
md.test_dl.data = md.test_dl.data.long()
# Initialize missing tokens with mean weights. So freeze everything except last layer
# +
learner= md.get_model(opt_fn, em_sz, nh, nl,
dropouti=drops[0], dropout=drops[1], wdrop=drops[2], dropoute=drops[3], dropouth=drops[4])
learner.metrics = [accuracy]
learner.freeze_to(-1)
# -
dir(learner), learner.get_model_path
learner.model.load_state_dict(wgts)
learner.lr_find2(end_lr=0.2, wds=wd, num_it=71, linear=True)
learner.sched.plot()
lr = 0.12
lrs = lr
learner.fit(lrs/2, 1, wds=wd, use_clr=(32,2), cycle_len=1)
learner.save('lm_last_ft')
learner.load('lm_last_ft')
learner.unfreeze()
learner.lr_find2(end_lr=0.2, wds=wd, num_it=89)
learner.sched.plot()
lrs = 0.01
learner.fit(lrs, 1, wds=wd, use_clr=(20,10), cycle_len=14)
learner.save("lm1")
learner.save_encoder("lm1_enc")
learner.sched.plot_loss()
learner.sched.plot_lr()
df_trn = pd.read_csv(CLAS_PATH/"train.csv", header=None, chunksize=chunksize)
df_val = pd.read_csv(CLAS_PATH/"test.csv", header=None, chunksize=chunksize)
df_tst = pd.read_csv(CLAS_PATH/"tst.csv", header=None, chunksize=chunksize)
tok_trn, trn_labels = get_all(df_trn, 1)
tok_val, val_labels = get_all(df_val, 1)
tol_tst, tst_labels = get_all(df_tst, 1)
# +
(CLAS_PATH/"tmp").mkdir(exist_ok=True)
np.save(CLAS_PATH/"tmp"/"tok_trn.npy", tok_trn)
np.save(CLAS_PATH/"tmp"/"tok_val.npy", tok_val)
np.save(CLAS_PATH/"tmp"/"tok_tst.npy", tok_tst)
np.save(CLAS_PATH/"tmp"/"trn_labels.npy", trn_labels)
np.save(CLAS_PATH/"tmp"/"val_labels.npy", val_labels)
np.save(CLAS_PATH/"tmp"/"tst_labels.npy", tst_labels)
# -
#necessary
tok_trn = np.load(CLAS_PATH/'tmp'/'tok_trn.npy')
tok_val = np.load(CLAS_PATH/'tmp'/'tok_val.npy')
tok_tst = np.load(CLAS_PATH/"tmp"/"tok_tst.npy")
#Necessary
itos = pickle.load((LM_PATH/"tmp"/"itos.pkl").open("rb"))
stoi = collections.defaultdict(lambda:0, {v:k for k,v in enumerate(itos)})
len(itos)
trn_clas = np.array([[stoi[o] for o in p] for p in tok_trn])
val_clas = np.array([[stoi[o] for o in p] for p in tok_val])
tst_clas = np.array([[stoi[o] for o in p] for p in tok_tst])
np.save(CLAS_PATH/"tmp"/"trn_ids.npy", trn_clas)
np.save(CLAS_PATH/"tmp"/"val_ids.npy", val_clas)
np.save(CLAS_PATH/"tmp"/"tst_ids.npy", tst_clas)
# ## Classifier
#Necessary
trn_clas = np.load(CLAS_PATH/'tmp'/'trn_ids.npy')
val_clas = np.load(CLAS_PATH/'tmp'/'val_ids.npy')
tst_clas = np.load(CLAS_PATH/"tmp"/"tst_ids.npy")
#Necessary
trn_labels = np.squeeze(np.load(CLAS_PATH/'tmp'/'trn_labels.npy'))
val_labels = np.squeeze(np.load(CLAS_PATH/'tmp'/'val_labels.npy'))
tst_labels = np.squeeze(np.load(CLAS_PATH/"tmp"/"tst_labels.npy"))
#Necessary
bptt,em_sz,nh,nl = 70,400,1150,3
vs = len(itos)
opt_fn = partial(optim.Adam, betas=(0.8, 0.99))
bs = 150
#Necessary
min_lbl = trn_labels.min()
trn_labels -= min_lbl
val_labels -= min_lbl
tst_labels -= min_lbl
c=int(trn_labels.max())+1
#Necessary
trn_ds = TextDataset(trn_clas, trn_labels)
val_ds = TextDataset(val_clas, val_labels)
tst_ds = TextDataset(tst_clas, tst_labels)
trn_samp = SortishSampler(trn_clas, key=lambda x: len(trn_clas[x]), bs=bs//2)
val_samp = SortSampler(val_clas, key=lambda x: len(val_clas[x])) #sorts dataset by length to reduce too strongly padded tensors
tst_samp = SortSampler(tst_clas, key=lambda x: len(tst_clas[x]))
trn_dl = DataLoader(trn_ds, bs//2, transpose=True, num_workers=1, pad_idx=1, sampler=trn_samp)
val_dl = DataLoader(val_ds, bs, transpose=True, num_workers=1, pad_idx=1, sampler=val_samp)
tst_dl = DataLoader(tst_ds, bs, transpose=True, num_workers=1, pad_idx=1)
md = ModelData(PATH, trn_dl, val_dl, tst_dl)
#Necessary
dps = np.array([0.4,0.5,0.05,0.3,0.4])*0.7
#Necessary
m = get_rnn_classifier(bptt, 20*70, c, vs, emb_sz=em_sz, n_hid=nh, n_layers=nl, pad_token=1,
layers=[em_sz*3, 50,c], drops=[dps[4], 0.2],
dropouti=dps[0], wdrop=dps[1], dropoute=dps[2], dropouth=dps[3])
#check dropout layers
#Necessary
learn = RNN_Learner(md, TextModel(to_gpu(m)), opt_fn=opt_fn)
learn.reg_fn = partial(seq2seq_reg, alpha=2, beta=1)
learn.clip=.12
learn.metrics = [accuracy]
#loss_weights = torch.FloatTensor(rtd_val_weights).cuda()
#learn.crit = partial(F.cross_entropy, weight=loss_weights)
learn.get_model_path
lrs = 10e-3
wd = 1e-7
learn.load_encoder("lm1_enc")
learn.unfreeze()
learn.lr_find(end_lr=1e-2, wds=wd)
learn.sched.plot()
learn.freeze_to(-1)
learn.fit(lrs, 1, wds=wd, use_wd_sched=True, cycle_len=1, use_clr_beta=(10,10,0.95,0.85), best_save_name='best_lm1')
learn.save('clas_0')
learn.load('best_lm1')
learn.freeze_to(-2)
learn.fit(lrs, 1, wds=wd, use_wd_sched=True, cycle_len=1, use_clr_beta=(10,10,0.95,0.85), best_save_name='best_lm2')
learn.save('clas_1')
learn.load('clas_1')
learn.unfreeze()
learn.lr_find(end_lr=10e-4, wds=wd, linear=True)
learn.sched.plot()
lrs = 10e-4
learn.fit(lrs, 1, wds=wd, use_wd_sched=True, cycle_len=10, use_clr_beta=(10,10,0.95,0.85), best_save_name='best_classifier')
learn.sched.plot_loss()
learn.save('clas_2')
learn.load("best_classifier")
# +
val_lbls = np.load(CLAS_PATH/'tmp'/'val_labels.npy').flatten()
val_lbls_sampled = val_lbls[list(val_samp)]
predictions = np.argmax(learn.predict(), axis=1)
acc = (val_lbls_sampled == predictions).mean()
print("Accuracy =", acc, ", \nConfusion Matrix =")
clas_labels = [0,1,2,3]
print(confusion_matrix(val_lbls_sampled, predictions, labels=clas_labels))
print("F-Score: ", f1_score(val_lbls_sampled, predictions, average="weighted"))
print("Sem-eval F-Score: ", f1_score(val_lbls_sampled, predictions, labels=[0,1,2], average="micro"))
print(classification_report(val_lbls_sampled, predictions))
# -
# ## Submission
# ### Classifier weight tuning
predictions = np.argmax(learn.predict(True), axis=1)
predictions[:10]
rtd_val_weights
average = np.average(rtd_val_weights)
#x = [1,1,1,1]
x = [0.25,0.45,0.5,1.16]
rtd_val_weights_clas = [x[i]/average for i, value in enumerate(rtd_val_weights)]
rtd_val_weights_clas
preds = learn.predict(True)*rtd_val_weights_clas
preds = np.argmax(preds, axis=1)
preds[:30], predictions[:30]
# +
x = np.unique(preds, return_counts=True)
y = np.unique(predictions, return_counts=True)
x[1]/[[np.sum(x[1])]*len(x[1])], y[1]/[[np.sum(y[1])]*len(y[1])]
# -
# ### Create output
def create_output():
output = []
output.append(["id","turn1", "turn2", "turn3", "label"])
preds = learn.predict(True)*rtd_val_weights_clas
predictions = np.argmax(preds, axis=1) #(True = Testset)
for i, text in enumerate(tst_texts):
turns = text.split("eot")
turns[0] = " ".join(turns[0].split()[1:])
output.append([str(i),turns[0].strip(), turns[1].strip(), turns[2].strip(), CLASSES[predictions[i]]])
return output
def write_sample():
outputs = create_output()
with open("test.txt", "w", encoding="utf-8") as writer:
for output_set in outputs:
line = "\t".join(output_set)
writer.write(line+"\n")
write_sample()
np.exp(3.225)
outputs = create_output()
" ".join(outputs[1][1].split()[1:])
| Classification/EmoContext/Wt103basedSentiment140based.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: SageMath 8.0
# language: ''
# name: sagemath
# ---
from scipy.io import loadmat
data = loadmat("iss.mat")
out = loadmat("out.mat")
A = data["A"]; B = data["B"]; M = out["M"]
n = A.shape[0]; m = B.shape[1]
n, m
# +
import numpy as np
from scipy.integrate import odeint
A = A.toarray(); B = B.toarray()
u = np.array([0.05, 0.9, -0.95])
BU = np.dot(B, u)
def deriv(x, t, A, BU):
return np.add(np.dot(A, x), BU) # dx/dt = Ax + Bu
# +
# %%time
delta = 0.01
time = np.arange(0, 20, delta)
x0 = np.dot(np.ones(n), -0.0001)
sol = odeint(deriv, x0, time, args=(A, BU))
# -
list_plot(zip(time, sol[:, 181]), plotjoined=true)
y1 = np.dot(M, np.transpose(sol)).flatten()
list_plot(zip(time, y1), plotjoined=true)
| models/SLICOT/iss/simulations.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Clustering experiment #1
# ========================
#
# Plan:
# -----
#
# * Derive a vector for left and righthand alignment variance for each contiguous block of text in each article (newspaper has pages, which hold articles, which consist of blocks).
# * Create a k=8 kmeans clustering using data from all newspapers from 1745 to 1756.
# * Visualise the clustering
# * Given the set of found poems, see into which clusters the poems get assigned.
# * Report on the spread of these and if a cluster is found which just has poems, report on all of the references within that cluster.
# +
# %matplotlib inline
# Load this library to make the graphs interactive for smaller samples
#import mpld3
#mpld3.enable_notebook()
# Turns out, multiple interactive scattergraphs with 170,000+ points each is a bit too much for a browser
# Who knew?!
# +
from clustering_capitals import create_cluster_dataset, NewspaperArchive
DBFILE = "1745-55.db"
n = NewspaperArchive(textareas="/datastore/burneytextareas")
a = n.get_areas(newspaper="B0574REMEMBRA", year = "1748", month = "03", day = "05")
pg1a1 = a['0001']['001'][0]
print(len(pg1a1['lines']), len(pg1a1['line_widths'][:len(pg1a1['lines'])-1]))
print(pg1a1['line_widths'][:len(pg1a1['lines'])-1][-1])
# +
# Get/create the dataset:
ds = create_cluster_dataset(n, daterange = [1745, 1755], dbfile = DBFILE) # , refresh = True)
# -
# What do these 'vectors' look like? What do the columns refer to?
# +
data, transform, id_list = ds
print(data)
print(transform.get_feature_names())
# -
# Going from a vector back to the metadata reference:
# ---------------------------------------------------
#
# By keeping an 'id_list', we can look up the identifier for any vector in the list from the database we've made for this clustering attemp. This lets us look up what the reference for that is, and where we can find it:
# +
from clustering_capitals import ClusterDB
db = ClusterDB(DBFILE)
item = dict(db.vecidtoitem(id_list[1]))
print(item)
print(transform.inverse_transform(data[1]))
from burney_data import BurneyDB
bdb = BurneyDB("burney.db")
titlemd = bdb.get_title_row(titleAbbreviation=item['newspaper'])
entry = bdb.get_entry_row(year=item['year'], month=item['month'], day=item['day'], title_id= titlemd['id'])
issue = bdb.get_issue_row(id=entry['issue_id'])
print(titlemd)
print(issue)
print(entry)
vector = db.vector(id_list[1])
print(dict(vector))
mask = {'ave_lsp': 1.0, 'density':1.0, 'ltcount':0.0, 'redge_x2ave':0.0, 'st_caps':1.0,
'st_nums':1.0, 'x1_var1':1.0, 'x1_var2':0.0, 'x1ave_ledge':0.0, 'x2_var1':1.0, 'x2_var2':0.0}
m_vec = transform.transform(mask)
print(m_vec)
# +
import numpy as np
from matplotlib import pyplot as plt
# Mask off leaving just the left and right variance columns
npdata = data.toarray()
mask = np.ones((11), dtype=bool)
# remember: ['ave_lsp', 'density', 'ltcount', 'redge_x2ave', 'st_caps',
# 'st_nums', 'x1_var1', 'x1_var2', 'x1ave_ledge', 'x2_var1', 'x2_var2']
mask[[0,1,2,3,4,5,7,8,10]] = False
marray = npdata[:,mask]
# -
# x1 vs x2 varience?
# ------------------
#
# What is the rough shape of this data? The varience of x1 and x2 are equivalent to the left and right alignment of the text varies in a given block of text.
plt.scatter(marray[:,0], marray[:,1], marker = ".", s = [2] * len(marray), linewidths=[0.0] * len(marray))
plt.show()
# Attempting K-Means
# ==================
#
# What sort of clustering algorithm to employ is actually a good question. K-means can give fairly meaningless responses if the data is of a given sort. Generally, it can be useful but cannot be used blindly.
#
# Given the data above, it might be a good start however.
# +
# Build the clustering and show the individual clusters as best we can:
from sklearn.cluster import KMeans
cl_mask = np.ones((11), dtype=bool)
# remember: ['ave_lsp', 'density', 'ltcount', 'redge_x2ave', 'st_caps',
# 'st_nums', 'x1_var1', 'x1_var2', 'x1ave_ledge', 'x2_var1', 'x2_var2']
# so, we should cluster on ave_lsp, density, st_caps, st_nums, x1_var1, x2_var1:
cl_mask[[2,3,7,8,10]] = False
cl_marray = npdata[:,cl_mask]
estimator = KMeans(n_clusters=12)
clusters = estimator.fit(cl_marray)
labels = estimator.labels_
def isol(label, labels):
for l in labels.astype(np.float):
if l != label:
yield "#444444"
else:
yield "#FF3355"
def highlight(label, labels):
for l in labels.astype(np.float):
if l != label:
yield 2
else:
yield 4
# plot graphs of ave_lsp vs x2_var1?
for label in set(labels):
print("Cluster: {0} - x1_var1 vs x2_var2".format(label))
plt.scatter(cl_marray[:,4], cl_marray[:,5], c=list(isol(label, labels)), marker = ".",
s = list(highlight(label, labels)), linewidths=[0.0] * len(marray))
plt.show()
# -
# It looks like cluster 4 and perhaps cluster 11 are ones that should contain more complete poems than the rest if our assumptions are correct. Clump with very low x1 (lefthand edge) variance, but high x2 (right hand side).
#
# What do the other aspects of 4 and 11 look like?
# plot graphs of ave_lsp vs x2_var1?
for label in [4,11]:
print("Cluster: {0} - ave_lp vs density".format(label))
plt.scatter(cl_marray[labels == label,0], cl_marray[labels == label,1], marker = ".", linewidths=1)
plt.show()
print("Cluster: {0} - st_caps vs st_num".format(label))
plt.scatter(cl_marray[labels == label,2], cl_marray[labels == label,3], marker = ".", linewidths=1)
plt.show()
print("Cluster: {0} - x1_var1 vs x2_var2".format(label))
plt.scatter(cl_marray[labels == label,4], cl_marray[labels == label,5], marker = ".", linewidths=1)
plt.show()
# Lets export this as a list of references to explore further - "clusterX.csv"
# +
import csv
def get_info(item_id):
record = dict(db.vecidtoitem(item_id))
vect = dict(db.vector(item_id))
titlemd = bdb.get_title_row(titleAbbreviation=record['newspaper'])
entry = bdb.get_entry_row(year=record['year'], month=record['month'], day=record['day'], title_id= titlemd['id'])
issue = bdb.get_issue_row(id=entry['issue_id'])
record.update(titlemd)
record.update(entry)
record.update(issue)
record.update(vect)
return record
for label in set(labels):
print("Saving label {0}".format(label))
with open("exp2_cluster{0}.csv".format(label), "w") as cfn:
fields = ["title", "titleAbbreviation", "year", "month", "day",
"issueNumber", "printedDate", "page", "article", "block_number", "filepath", "st_caps", "st_nums", "x1_var1", "x2_var1", "ltcount"]
csvdoc = csv.DictWriter(cfn, fieldnames = fields)
csvdoc.writerow(dict([(x,x) for x in fields]))
count = 0
for idx, vlabel in enumerate(list(labels)):
if idx % 1000 == 0:
print("Tackling line {0} - saved {1} lines for this label".format(idx, count))
if vlabel == label:
record = get_info(id_list[idx])
csvdoc.writerow(dict([(x,record[x]) for x in fields]))
count += 1
# -
| Cluster experiment 2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from scipy import stats
genta = [22, 35, 14]
ami = [57, 80, 67]
genta_1_2 = [22, 35]
genta_2_3 = [35, 14]
ami_1_2 = [57, 80]
ami_2_3 = [80, 67]
statistic, p_val = stats.ttest_ind(genta_1_2, genta_2_3)
print('GENTA\nStatistic: {}\np_val: {}'.format(statistic, p_val))
statistic, p_val = stats.ttest_ind(ami_1_2, ami_2_3)
print('AMI\nStatistic: {}\np_val: {}'.format(statistic, p_val))
# # ----------------------------------------------------------------------------------------------------
ami_2012_2014 = [[65, 86], [13, 51]]
chi2, p_val, _, _ = stats.chi2_contingency(ami_2012_2014)
print('chi2 ami_2012_2014:\nchi2: {}\np_val: {}'.format(chi2, p_val))
ami_2014_2016 = [[14, 27], [13, 51]]
chi2, p_val, _, _ = stats.chi2_contingency(ami_2014_2016)
print('chi2 ami_2014_2016:\nchi2: {}\np_val: {}'.format(chi2, p_val))
genta_2012_2014 = [[153, 43], [47, 25]]
chi2, p_val, _, _ = stats.chi2_contingency(genta_2012_2014)
print('chi2 genta_2012_2014:\nchi2: {}\np_val: {}'.format(chi2, p_val))
genta_2014_2016 = [[8, 50], [25, 47]]
chi2, p_val, _, _ = stats.chi2_contingency(genta_2014_2016)
print('chi2 genta_2014_2016:\nchi2: {}\np_val: {}'.format(chi2, p_val))
ami_QI = [[13, 28], [0, 26]]
chi2, p_val, _, _ = stats.chi2_contingency(ami_QI)
print('chi2 ami_QI:\nchi2: {}\np_val: {}'.format(chi2, p_val))
genta_QI = [[50, 8], [1, 16]]
chi2, p_val, _, _ = stats.chi2_contingency(genta_QI)
print('chi2 genta_QI:\nchi2: {}\np_val: {}'.format(chi2, p_val))
| notebooks/.ipynb_checkpoints/Stats-Carine-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/Deepankarkr/Code-library/blob/master/D_4_4_1.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} id="MjSLF0ki8F6F" colab_type="text"
# # From Modeling to Evaluation
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} id="WisREyPc8F6M" colab_type="text"
# ## Introduction
#
# In this lab, we will continue learning about the data science methodology, and focus on the **Modeling** and **Evaluation** stages.
#
# ------------
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} id="6KRhK9fg8F6P" colab_type="text"
# ## Table of Contents
#
# 1. Recap
# 2. Data Modeling
# 3. Model Evaluation
#
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} id="CWcqwXBh8F6Q" colab_type="text"
# # Recap
#
# In Lab **From Understanding to Preparation**, we explored the data and prepared it for modeling.
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} id="gNdIVjw98F6R" colab_type="text"
# The data was compiled by a researcher named <NAME>, who scraped tens of thousands of food recipes (cuisines and ingredients) from three different websites, namely:
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} id="ZqVjcoI88F6R" colab_type="text"
# <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DS0103EN/labs/images/lab4_fig1_allrecipes.png" >
#
# www.allrecipes.com
#
# <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DS0103EN/labs/images/lab4_fig2_epicurious.png" >
#
# www.epicurious.com
#
# <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DS0103EN/labs/images/lab4_fig3_menupan.png" >
#
# www.menupan.com
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} id="5nCP5f0d8F6S" colab_type="text"
# For more information on <NAME> and his research, you can read his paper on [Flavor Network and the Principles of Food Pairing](http://yongyeol.com/papers/ahn-flavornet-2011.pdf).
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} id="qjpLatbM8F6T" colab_type="text"
# <strong> Important note:</strong> Please note that you are not expected to know how to program in Python. This lab is meant to illustrate the stages of modeling and evaluation of the data science methodology, so it is totally fine if you do not understand the individual lines of code. We have a full course on programming in Python, <a href="http://cocl.us/PY0101EN_DS0103EN_LAB4_PYTHON_Coursera"><strong>Python for Data Science</strong></a>, which is also offered on Coursera. So make sure to complete the Python course if you are interested in learning how to program in Python.
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} id="VV0d7YFh8F6U" colab_type="text"
# ### Using this notebook:
#
# To run any of the following cells of code, you can type **Shift + Enter** to excute the code in a cell.
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} id="ekdMTyJh8F6V" colab_type="text"
# Download the library and dependencies that we will need to run this lab.
# + button=false deletable=true new_sheet=false run_control={"read_only": false} id="GbHt_6zv8F6W" colab_type="code" colab={}
import pandas as pd # import library to read data into dataframe
pd.set_option("display.max_columns", None)
import numpy as np # import numpy library
import re # import library for regular expression
import random # library for random number generation
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} id="uZnePzyp8F6b" colab_type="text"
# We already placed the data on an IBM server for your convenience, so let's download it from server and read it into a dataframe called **recipes**.
# + button=false deletable=true new_sheet=false run_control={"read_only": false} id="Fzu3iMeH8F6c" colab_type="code" colab={}
recipes = pd.read_csv("https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DS0103EN/labs/data/recipes.csv")
print("Data read into dataframe!") # takes about 30 seconds
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} id="jOp-Ppuj8F6h" colab_type="text"
# We will repeat the preprocessing steps that we implemented in Lab **From Understanding to Preparation** in order to prepare the data for modeling. For more details on preparing the data, please refer to Lab **From Understanding to Preparation**.
# + button=false deletable=true new_sheet=false run_control={"read_only": false} id="jf0denGg8F6i" colab_type="code" colab={}
# fix name of the column displaying the cuisine
column_names = recipes.columns.values
column_names[0] = "cuisine"
recipes.columns = column_names
# convert cuisine names to lower case
recipes["cuisine"] = recipes["cuisine"].str.lower()
# make the cuisine names consistent
recipes.loc[recipes["cuisine"] == "austria", "cuisine"] = "austrian"
recipes.loc[recipes["cuisine"] == "belgium", "cuisine"] = "belgian"
recipes.loc[recipes["cuisine"] == "china", "cuisine"] = "chinese"
recipes.loc[recipes["cuisine"] == "canada", "cuisine"] = "canadian"
recipes.loc[recipes["cuisine"] == "netherlands", "cuisine"] = "dutch"
recipes.loc[recipes["cuisine"] == "france", "cuisine"] = "french"
recipes.loc[recipes["cuisine"] == "germany", "cuisine"] = "german"
recipes.loc[recipes["cuisine"] == "india", "cuisine"] = "indian"
recipes.loc[recipes["cuisine"] == "indonesia", "cuisine"] = "indonesian"
recipes.loc[recipes["cuisine"] == "iran", "cuisine"] = "iranian"
recipes.loc[recipes["cuisine"] == "italy", "cuisine"] = "italian"
recipes.loc[recipes["cuisine"] == "japan", "cuisine"] = "japanese"
recipes.loc[recipes["cuisine"] == "israel", "cuisine"] = "jewish"
recipes.loc[recipes["cuisine"] == "korea", "cuisine"] = "korean"
recipes.loc[recipes["cuisine"] == "lebanon", "cuisine"] = "lebanese"
recipes.loc[recipes["cuisine"] == "malaysia", "cuisine"] = "malaysian"
recipes.loc[recipes["cuisine"] == "mexico", "cuisine"] = "mexican"
recipes.loc[recipes["cuisine"] == "pakistan", "cuisine"] = "pakistani"
recipes.loc[recipes["cuisine"] == "philippines", "cuisine"] = "philippine"
recipes.loc[recipes["cuisine"] == "scandinavia", "cuisine"] = "scandinavian"
recipes.loc[recipes["cuisine"] == "spain", "cuisine"] = "spanish_portuguese"
recipes.loc[recipes["cuisine"] == "portugal", "cuisine"] = "spanish_portuguese"
recipes.loc[recipes["cuisine"] == "switzerland", "cuisine"] = "swiss"
recipes.loc[recipes["cuisine"] == "thailand", "cuisine"] = "thai"
recipes.loc[recipes["cuisine"] == "turkey", "cuisine"] = "turkish"
recipes.loc[recipes["cuisine"] == "vietnam", "cuisine"] = "vietnamese"
recipes.loc[recipes["cuisine"] == "uk-and-ireland", "cuisine"] = "uk-and-irish"
recipes.loc[recipes["cuisine"] == "irish", "cuisine"] = "uk-and-irish"
# remove data for cuisines with < 50 recipes:
recipes_counts = recipes["cuisine"].value_counts()
cuisines_indices = recipes_counts > 50
cuisines_to_keep = list(np.array(recipes_counts.index.values)[np.array(cuisines_indices)])
recipes = recipes.loc[recipes["cuisine"].isin(cuisines_to_keep)]
# convert all Yes's to 1's and the No's to 0's
recipes = recipes.replace(to_replace="Yes", value=1)
recipes = recipes.replace(to_replace="No", value=0)
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} id="ft55aleu8F6l" colab_type="text"
# <hr>
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} id="0oW1MY048F6m" colab_type="text"
# # Data Modeling <a id="2"></a>
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} id="pxqOfu5b8F6m" colab_type="text"
# <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DS0103EN/labs/images/lab4_fig4_flowchart_data_modeling.png" >
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} id="Stdt8vnE8F6n" colab_type="text"
# Download and install more libraries and dependies to build decision trees.
# + button=false deletable=true new_sheet=false run_control={"read_only": false} id="BzWHzvwF8F6o" colab_type="code" colab={}
# import decision trees scikit-learn libraries
# %matplotlib inline
from sklearn import tree
from sklearn.metrics import accuracy_score, confusion_matrix
import matplotlib.pyplot as plt
# !conda install python-graphviz --yes
import graphviz
from sklearn.tree import export_graphviz
import itertools
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} id="Ij9b2Lgv8F6s" colab_type="text"
# Check the data again!
# + button=false deletable=true new_sheet=false run_control={"read_only": false} id="lsJOV8cy8F6t" colab_type="code" colab={}
recipes.head()
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} id="AtcJuZir8F6w" colab_type="text"
# ## [bamboo_tree] Only Asian and Indian Cuisines
#
# Here, we are creating a decision tree for the recipes for just some of the Asian (Korean, Japanese, Chinese, Thai) and Indian cuisines. The reason for this is because the decision tree does not run well when the data is biased towards one cuisine, in this case American cuisines. One option is to exclude the American cuisines from our analysis or just build decision trees for different subsets of the data. Let's go with the latter solution.
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} id="VgE7laGA8F6x" colab_type="text"
# Let's build our decision tree using the data pertaining to the Asian and Indian cuisines and name our decision tree *bamboo_tree*.
# + button=false deletable=true new_sheet=false run_control={"read_only": false} id="mrLlLnQF8F6y" colab_type="code" colab={}
# select subset of cuisines
asian_indian_recipes = recipes[recipes.cuisine.isin(["korean", "japanese", "chinese", "thai", "indian"])]
cuisines = asian_indian_recipes["cuisine"]
ingredients = asian_indian_recipes.iloc[:,1:]
bamboo_tree = tree.DecisionTreeClassifier(max_depth=3)
bamboo_tree.fit(ingredients, cuisines)
print("Decision tree model saved to bamboo_tree!")
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} id="EWiZ98pK8F63" colab_type="text"
# Let's plot the decision tree and examine how it looks like.
# + button=false deletable=true new_sheet=false run_control={"read_only": false} id="tyfxpgeN8F65" colab_type="code" colab={}
export_graphviz(bamboo_tree,
feature_names=list(ingredients.columns.values),
out_file="bamboo_tree.dot",
class_names=np.unique(cuisines),
filled=True,
node_ids=True,
special_characters=True,
impurity=False,
label="all",
leaves_parallel=False)
with open("bamboo_tree.dot") as bamboo_tree_image:
bamboo_tree_graph = bamboo_tree_image.read()
graphviz.Source(bamboo_tree_graph)
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} id="_G2URYta8F69" colab_type="text"
# The decision tree learned:
# * If a recipe contains *cumin* and *fish* and **no** *yoghurt*, then it is most likely a **Thai** recipe.
# * If a recipe contains *cumin* but **no** *fish* and **no** *soy_sauce*, then it is most likely an **Indian** recipe.
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} id="hJ88gL6Y8F6-" colab_type="text"
# You can analyze the remaining branches of the tree to come up with similar rules for determining the cuisine of different recipes.
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} id="YCy7zg668F6_" colab_type="text"
# Feel free to select another subset of cuisines and build a decision tree of their recipes. You can select some European cuisines and build a decision tree to explore the ingredients that differentiate them.
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} id="pf6DqkbA8F7A" colab_type="text"
# # Model Evaluation <a id="4"></a>
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} id="_7BfRXIa8F7A" colab_type="text"
# <img src="https://s3-api.us-geo.objectstorage.softlayer.net/cf-courses-data/CognitiveClass/DS0103EN/labs/images/lab4_fig5_flowchart_evaluation.png" >
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} id="0qqWad2X8F7B" colab_type="text"
# To evaluate our model of Asian and Indian cuisines, we will split our dataset into a training set and a test set. We will build the decision tree using the training set. Then, we will test the model on the test set and compare the cuisines that the model predicts to the actual cuisines.
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} id="0zEWCmrw8F7C" colab_type="text"
# Let's first create a new dataframe using only the data pertaining to the Asian and the Indian cuisines, and let's call the new dataframe **bamboo**.
# + button=false deletable=true new_sheet=false run_control={"read_only": false} id="uLfjh-dz8F7D" colab_type="code" colab={}
bamboo = recipes[recipes.cuisine.isin(["korean", "japanese", "chinese", "thai", "indian"])]
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} id="JowA_BSv8F7H" colab_type="text"
# Let's see how many recipes exist for each cuisine.
# + button=false deletable=true new_sheet=false run_control={"read_only": false} id="V6VoTTQ08F7J" colab_type="code" colab={}
bamboo["cuisine"].value_counts()
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} id="pYa-lUx98F7M" colab_type="text"
# Let's remove 30 recipes from each cuisine to use as the test set, and let's name this test set **bamboo_test**.
# + button=false deletable=true new_sheet=false run_control={"read_only": false} id="LrVtod-L8F7O" colab_type="code" colab={}
# set sample size
sample_n = 30
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} id="giGDj4sg8F7S" colab_type="text"
# Create a dataframe containing 30 recipes from each cuisine, selected randomly.
# + button=false deletable=true new_sheet=false run_control={"read_only": false} id="FG9UTDIZ8F7T" colab_type="code" colab={}
# take 30 recipes from each cuisine
random.seed(1234) # set random seed
bamboo_test = bamboo.groupby("cuisine", group_keys=False).apply(lambda x: x.sample(sample_n))
bamboo_test_ingredients = bamboo_test.iloc[:,1:] # ingredients
bamboo_test_cuisines = bamboo_test["cuisine"] # corresponding cuisines or labels
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} id="4QkqFFcy8F7X" colab_type="text"
# Check that there are 30 recipes for each cuisine.
# + button=false deletable=true new_sheet=false run_control={"read_only": false} id="73NKe2SF8F7a" colab_type="code" colab={}
# check that we have 30 recipes from each cuisine
bamboo_test["cuisine"].value_counts()
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} id="HjPdbXJh8F7d" colab_type="text"
# Next, let's create the training set by removing the test set from the **bamboo** dataset, and let's call the training set **bamboo_train**.
# + button=false deletable=true new_sheet=false run_control={"read_only": false} id="Up1hy0z68F7e" colab_type="code" colab={}
bamboo_test_index = bamboo.index.isin(bamboo_test.index)
bamboo_train = bamboo[~bamboo_test_index]
bamboo_train_ingredients = bamboo_train.iloc[:,1:] # ingredients
bamboo_train_cuisines = bamboo_train["cuisine"] # corresponding cuisines or labels
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} id="qOu6oLJz8F7h" colab_type="text"
# Check that there are 30 _fewer_ recipes now for each cuisine.
# + button=false deletable=true new_sheet=false run_control={"read_only": false} id="vgB4aV6y8F7i" colab_type="code" colab={}
bamboo_train["cuisine"].value_counts()
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} id="mkkMGDJY8F7l" colab_type="text"
# Let's build the decision tree using the training set, **bamboo_train**, and name the generated tree **bamboo_train_tree** for prediction.
# + button=false deletable=true new_sheet=false run_control={"read_only": false} id="N4FCKqB18F7n" colab_type="code" colab={}
bamboo_train_tree = tree.DecisionTreeClassifier(max_depth=15)
bamboo_train_tree.fit(bamboo_train_ingredients, bamboo_train_cuisines)
print("Decision tree model saved to bamboo_train_tree!")
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} id="DsrTWtbC8F7p" colab_type="text"
# Let's plot the decision tree and explore it.
# + button=false deletable=true new_sheet=false run_control={"read_only": false} id="OdXUm6kL8F7q" colab_type="code" colab={}
export_graphviz(bamboo_train_tree,
feature_names=list(bamboo_train_ingredients.columns.values),
out_file="bamboo_train_tree.dot",
class_names=np.unique(bamboo_train_cuisines),
filled=True,
node_ids=True,
special_characters=True,
impurity=False,
label="all",
leaves_parallel=False)
with open("bamboo_train_tree.dot") as bamboo_train_tree_image:
bamboo_train_tree_graph = bamboo_train_tree_image.read()
graphviz.Source(bamboo_train_tree_graph)
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} id="P7E9c7tT8F7t" colab_type="text"
# Now that we defined our tree to be deeper, more decision nodes are generated.
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} id="I4SIfoe28F7u" colab_type="text"
# #### Now let's test our model on the test data.
# + button=false deletable=true new_sheet=false run_control={"read_only": false} id="PTDjbrAc8F7u" colab_type="code" colab={}
bamboo_pred_cuisines = bamboo_train_tree.predict(bamboo_test_ingredients)
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} id="xdcek--f8F7y" colab_type="text"
# To quantify how well the decision tree is able to determine the cuisine of each recipe correctly, we will create a confusion matrix which presents a nice summary on how many recipes from each cuisine are correctly classified. It also sheds some light on what cuisines are being confused with what other cuisines.
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} id="a_onYDI58F70" colab_type="text"
# So let's go ahead and create the confusion matrix for how well the decision tree is able to correctly classify the recipes in **bamboo_test**.
# + button=false deletable=true new_sheet=false run_control={"read_only": false} id="ibu3jElf8F71" colab_type="code" colab={}
test_cuisines = np.unique(bamboo_test_cuisines)
bamboo_confusion_matrix = confusion_matrix(bamboo_test_cuisines, bamboo_pred_cuisines, test_cuisines)
title = 'Bamboo Confusion Matrix'
cmap = plt.cm.Blues
plt.figure(figsize=(8, 6))
bamboo_confusion_matrix = (
bamboo_confusion_matrix.astype('float') / bamboo_confusion_matrix.sum(axis=1)[:, np.newaxis]
) * 100
plt.imshow(bamboo_confusion_matrix, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(test_cuisines))
plt.xticks(tick_marks, test_cuisines)
plt.yticks(tick_marks, test_cuisines)
fmt = '.2f'
thresh = bamboo_confusion_matrix.max() / 2.
for i, j in itertools.product(range(bamboo_confusion_matrix.shape[0]), range(bamboo_confusion_matrix.shape[1])):
plt.text(j, i, format(bamboo_confusion_matrix[i, j], fmt),
horizontalalignment="center",
color="white" if bamboo_confusion_matrix[i, j] > thresh else "black")
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.show()
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} id="4cc77D5-8F7_" colab_type="text"
# After running the above code, you should get a confusion matrix similar to the following:
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} id="LCTUSV4m8F8A" colab_type="text"
# <img src="https://ibm.box.com/shared/static/69f5m7txv2u6g47867qe0eypnfylrj4w.png">
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} id="PlyGtOIO8F8C" colab_type="text"
# The rows represent the actual cuisines from the dataset and the columns represent the predicted ones. Each row should sum to 100%. According to this confusion matrix, we make the following observations:
#
# * Using the first row in the confusion matrix, 60% of the **Chinese** recipes in **bamboo_test** were correctly classified by our decision tree whereas 37% of the **Chinese** recipes were misclassified as **Korean** and 3% were misclassified as **Indian**.
#
# * Using the Indian row, 77% of the **Indian** recipes in **bamboo_test** were correctly classified by our decision tree and 3% of the **Indian** recipes were misclassified as **Chinese** and 13% were misclassified as **Korean** and 7% were misclassified as **Thai**.
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} id="nXjXj6x58F8D" colab_type="text"
# **Please note** that because decision trees are created using random sampling of the datapoints in the training set, then you may not get the same results every time you create the decision tree even using the same training set. The performance should still be comparable though! So don't worry if you get slightly different numbers in your confusion matrix than the ones shown above.
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} id="0xCkJqye8F8F" colab_type="text"
# Using the reference confusion matrix, how many **Japanese** recipes were correctly classified by our decision tree?
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} id="XFLL9-tz8F8G" colab_type="raw"
# Your Answer:
#
#
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} id="eqKmvs4U8F8H" colab_type="text"
# Double-click __here__ for the solution.
# <!-- The correct answer is:
# 36.67%.
# -->
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} id="yZZReIcu8F8J" colab_type="text"
# Also using the reference confusion matrix, how many **Korean** recipes were misclassified as **Japanese**?
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} id="HPuTiROa8F8L" colab_type="raw"
# Your Answer:
#
#
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} id="K63rgyY-8F8M" colab_type="text"
# Double-click __here__ for the solution.
# <!-- The correct answer is:
# 3.33%.
# -->
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} id="MYQLMT1o8F8R" colab_type="text"
# What cuisine has the least number of recipes correctly classified by the decision tree using the reference confusion matrix?
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} id="XRt1K6Gd8F8S" colab_type="raw"
# Your Answer:
#
#
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} id="O6oV1thM8F8T" colab_type="text"
# Double-click __here__ for the solution.
# <!-- The correct answer is:
# Japanese cuisine, with 36.67% only.
# -->
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} id="tQuoCMml8F8V" colab_type="text"
# <br>
# <hr>
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} id="6JsUnRTB8F8W" colab_type="text"
# ### Thank you for completing this lab!
#
# This notebook was created by <NAME>klson . We hope you found this lab session interesting. Feel free to contact us if you have any questions!
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} id="AJZf3Nhm8F8X" colab_type="text"
# This notebook is part of a course on **Coursera** called *Data Science Methodology*. If you accessed this notebook outside the course, you can take this course, online by clicking [here](https://cocl.us/DS0103EN_Coursera_LAB4).
# + [markdown] button=false deletable=true new_sheet=false run_control={"read_only": false} id="TDXRdrng8F8Z" colab_type="text"
# <hr>
#
# Copyright © 2019 [Cognitive Class](https://cognitiveclass.ai/?utm_source=bducopyrightlink&utm_medium=dswb&utm_campaign=bdu). This notebook and its source code are released under the terms of the [MIT License](https://bigdatauniversity.com/mit-license/).
| D_4_4_1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from __future__ import division
import numpy as np
#from sklearn.gaussian_process import GaussianProcess
from scipy.optimize import minimize
from acquisition_functions import AcquisitionFunction, unique_rows
#from visualization import Visualization
from prada_gaussian_process import PradaGaussianProcess
from prada_gaussian_process import PradaMultipleGaussianProcess
from acquisition_maximization import acq_max_nlopt
from acquisition_maximization import acq_max_direct
from acquisition_maximization import acq_max
from sklearn.metrics.pairwise import euclidean_distances
import time
# -
class PradaBayOptFn(object):
def __init__(self, f, pbounds, acq='ei', verbose=1, opt='nlopt'):
"""
Input parameters
----------
f: function to optimize:
pbounds: bounds on parameters
acq: acquisition function, 'ei', 'ucb'
opt: optimization toolbox, 'nlopt','direct','scipy'
Returns
-------
dim: dimension
bounds: bounds on original scale
scalebounds: bounds on normalized scale of 0-1
time_opt: will record the time spent on optimization
gp: Gaussian Process object
"""
# Find number of parameters
self.dim = len(pbounds)
# Create an array with parameters bounds
if isinstance(pbounds,dict):
# Get the name of the parameters
self.keys = list(pbounds.keys())
self.bounds = []
for key in pbounds.keys():
self.bounds.append(pbounds[key])
self.bounds = np.asarray(self.bounds)
else:
self.bounds=np.asarray(pbounds)
# create a scalebounds 0-1
scalebounds=np.array([np.zeros(self.dim), np.ones(self.dim)])
self.scalebounds=scalebounds.T
self.max_min_gap=self.bounds[:,1]-self.bounds[:,0]
# Some function to be optimized
self.f = f
# optimization toolbox
self.opt=opt
# acquisition function type
self.acq=acq
# store X in original scale
self.X_original= None
# store X in 0-1 scale
self.X = None
# store y=f(x)
self.Y = None
self.time_opt=0
self.k_Neighbor=2
# Lipschitz constant
self.L=0
# Gaussian Process class
self.gp=PradaGaussianProcess
# acquisition function
self.acq_func = None
# will be later used for visualization
def posterior(self, Xnew):
self.gp.fit(self.X, self.Y)
mu, sigma2 = self.gp.predict(Xnew, eval_MSE=True)
return mu, np.sqrt(sigma2)
def init(self, gp_params, n_init_points=3):
"""
Input parameters
----------
gp_params: Gaussian Process structure
n_init_points: # init points
"""
# Generate random points
l = [np.random.uniform(x[0], x[1], size=n_init_points) for x in self.bounds]
# Concatenate new random points to possible existing
# points from self.explore method.
temp=np.asarray(l)
temp=temp.T
init_X=list(temp.reshape((n_init_points,-1)))
self.X_original = np.asarray(init_X)
# Evaluate target function at all initialization
y_init=self.f(init_X)
y_init=np.reshape(y_init,(n_init_points,1))
self.Y = np.asarray(y_init)
# convert it to scaleX
temp_init_point=np.divide((init_X-self.bounds[:,0]),self.max_min_gap)
self.X = np.asarray(temp_init_point)
import ppo_functions as pfunctions
import numpy as np
# +
myfunction = pfunctions.PpoImport()
pbounds = myfunction.bounds
print (myfunction.bounds)
print (len(myfunction.bounds))
gp_params = {'theta':0.1*2,'noise_delta':0.1}
print (gp_params)
# Create an array with parameters bounds
if isinstance(pbounds,dict):
# Get the name of the parameters
keys = list(pbounds.keys())
bounds = []
for key in pbounds.keys():
bounds.append(pbounds[key])
bounds = np.asarray(bounds)
else:
bounds=np.asarray(pbounds)
print (bounds)
# -
print (myfunction.name)
# +
# Generate random points
l = [np.random.uniform(x[0], x[1], size=3) for x in bounds]
n_init_points = 3
print (l)
temp=np.asarray(l)
temp=temp.T
init_X=list(temp.reshape((n_init_points,-1)))
print (init_X)
X_original = np.asarray(init_X)
# Evaluate target function at all initialization
y_init=myfunction.func(init_X)
print ("y_init: ", y_init)
y_init=np.reshape(y_init,(n_init_points,1))
Y = np.asarray(y_init)
# +
max_min_gap=bounds[:,1]-bounds[:,0]
temp_init_point=np.divide((init_X-bounds[:,0]),max_min_gap)
print ("Temp: ", temp_init_point)
X = np.asarray(temp_init_point)
print ("X", X)
# +
# def init(self, gp_params, n_init_points=3):
"""
Input parameters
----------
gp_params: Gaussian Process structure
n_init_points: # init points
"""
# # Generate random points
# l = [np.random.uniform(x[0], x[1], size=3) for x in myfunction.bounds]
# print (l)
# Concatenate new random points to possible existing
# points from self.explore method.
temp=np.asarray(l)
temp=temp.T
init_X=list(temp.reshape((n_init_points,-1)))
self.X_original = np.asarray(init_X)
# Evaluate target function at all initialization
y_init=self.f(init_X)
y_init=np.reshape(y_init,(n_init_points,1))
self.Y = np.asarray(y_init)
# convert it to scaleX
temp_init_point=np.divide((init_X-self.bounds[:,0]),self.max_min_gap)
self.X = np.asarray(temp_init_point)
# -
| ICDM2016_B3O/prada_bayes_opt/Untitled.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] nbsphinx="hidden"
# # Quantization of Signals
#
# *This jupyter notebook is part of a [collection of notebooks](../index.ipynb) on various topics of Digital Signal Processing. Please direct questions and suggestions to [<EMAIL>](mailto:<EMAIL>).*
# -
# ## Quantization Error of a Linear Uniform Quantizer
#
# As illustrated in the [preceding section](linear_uniform_characteristic.ipynb), quantization results in two different types of distortions. Overload distortions are a consequence of exceeding the minimum/maximum amplitude of the quantizer. Granular distortions are a consequence of the quantization process when no clipping occurs. Various measures are used to quantify the distortions of a quantizer. We limit ourselves to the signal-to-noise ratio as commonly used measure.
# ### Signal-to-Noise Ratio
#
# A quantizer can be evaluated by its [signal-to-noise ratio](https://en.wikipedia.org/wiki/Signal-to-noise_ratio) (SNR), which is defined as the power of the continuous amplitude signal $x[k]$ divided by the power of the quantization error $e[k]$. Under the assumption that both signals are drawn from a zero-mean wide-sense stationary (WSS) process, the average SNR is given as
#
# \begin{equation}
# SNR = 10 \cdot \log_{10} \left( \frac{\sigma_x^2}{\sigma_e^2} \right) \quad \text{ in dB}
# \end{equation}
#
# where $\sigma_x^2$ and $\sigma_e^2$ denote the variances of the signals $x[k]$ and $e[k]$, respectively. The SNR quantifies the average impact of the distortions introduced by quantization. The statistical properties of the signal $x[k]$ and the quantization error $e[k]$ are required in order to evaluate the SNR of a quantizer. First, a statistical model for the quantization error is introduced.
# ### Model for the Quantization Error
#
# In order to derive the statistical properties of the quantization error, the probability density functions (PDFs) of the quantized signal $x_\text{Q}[k]$ and the error $e[k]$, as well as its bivariate PDFs have to be derived. The underlying calculus is quite tedious due to the nonlinear nature of quantization. Please refer to [[Widrow](../index.ipynb#Literature)] for a detailed treatment. The resulting model is summarized in the following. We focus on the non-clipping case $x_\text{min} \leq x[k] < x_\text{max}$ first, hence on granular distortions. Here the quantization error is in general bounded $|e[k]| < \frac{Q}{2}$.
#
# Under the assumption that the input signal has a wide dynamic range compared to the quantization step size $Q$, the quantization error $e[k]$ can be approximated by the following statistical model
#
# 1. The quantization error $e[k]$ is not correlated with the input signal $x[k]$
#
# 2. The quantization error is [white](../random_signals/white_noise.ipynb)
#
# $$ \Phi_{ee}(\mathrm{e}^{\,\mathrm{j}\,\Omega}) = \sigma_e^2 $$
#
# 3. The probability density function (PDF) of the quantization error is given by the zero-mean [uniform distribution](../random_signals/important_distributions.ipynb#Uniform-Distribution)
#
# $$ p_e(\theta) = \frac{1}{Q} \cdot \text{rect} \left( \frac{\theta}{Q} \right) $$
#
# The variance of the quantization error is then [derived from its PDF](../random_signals/important_distributions.ipynb#Uniform-Distribution) as
#
# \begin{equation}
# \sigma_e^2 = \frac{Q^2}{12}
# \end{equation}
#
# Let's assume that the quantization index is represented as binary or [fixed-point number](https://en.wikipedia.org/wiki/Fixed-point_arithmetic) with $w$-bits. The common notation for the mid-tread quantizer is that $x_\text{min}$ can be represented exactly. Half of the $2^w$ quantization indexes is used for the negative signal values, the other half for the positive ones including zero. The quantization step is then given as
#
# \begin{equation}
# Q = \frac{ |x_\text{min}|}{2^{w-1}} = \frac{ x_\text{max}}{2^{w-1} - 1}
# \end{equation}
#
# where $x_\text{max} = |x_\text{min}| - Q$. Introducing the quantization step, the variance of the quantization error can be expressed by the word length $w$ as
#
# \begin{equation}
# \sigma_e^2 = \frac{x^2_\text{max}}{3 \cdot 2^{2w}}
# \end{equation}
#
# The average power of the quantization error quarters per additional bit spend. Introducing the variance into the definition of the SNR yields
#
# \begin{equation}
# \begin{split}
# SNR &= 10 \cdot \log_{10} \left( \frac{3 \sigma_x^2}{x^2_\text{max}} \right) + 10 \cdot \log_{10} \left( 2^{2w} \right) \\
# & \approx 10 \cdot \log_{10} \left( \frac{3 \sigma_x^2}{x^2_\text{max}} \right) + 6.02 w \quad \text{in dB}
# \end{split}
# \end{equation}
#
# It now can be concluded that the SNR decays approximately by 6 dB per additional bit spend. This is often referred to as the 6 dB per bit rule of thumb for linear uniform quantization. Note, this holds only under the assumptions stated above.
# ### Uniformly Distributed Signal
#
# A statistical model for the input signal $x[k]$ is required in order to calculate the average SNR of a linear uniform quantizer. For a signal that conforms to a zero-mean uniform distribution and under the assumption $x_\text{max} \gg Q$ its PDF is given as
#
# \begin{equation}
# p_x(\theta) = \frac{1}{2 x_\text{max}} \text{rect}\left( \frac{\theta}{2 x_\text{max}} \right)
# \end{equation}
#
# Hence, all amplitudes between $-x_\text{max}$ and $x_\text{max}$ occur with the same probability. The variance of the signal is then calculated to
#
# \begin{equation}
# \sigma_x^2 = \frac{4 x_\text{max}^2}{12}
# \end{equation}
#
# Introducing $\sigma_x^2$ and $\sigma_e^2$ into the definition of the SNR yields
#
# \begin{equation}
# SNR = 10 \cdot \log_{10} \left( 2^{2 w} \right) \approx 6.02 \, w \quad \text{in dB}
# \end{equation}
#
# The word length $w$ and resulting SNRs for some typical digital signal representations are
#
# | | $w$ | SNR |
# |----|:----:|:----:|
# | Compact Disc (CD) | 16 bit | 96 dB |
# | Digital Video Disc (DVD) | 24 bit | 144 dB |
# | Video Signals | 8 bit | 48 dB |
#
# Note that the SNR values hold only if the continuous amplitude signal conforms reasonably well to a uniform PDF and if it uses the full amplitude range of the quantizer. If the latter is not the case this can be considered by introducing the level $0 < A \leq 1$ into above considerations, such that $x_\text{min} \leq \frac{x[k]}{A} < x_\text{max}$. The resulting variance is given as
#
# \begin{equation}
# \sigma_x^2 = \frac{4 x_\text{max}^2 A^2}{12}
# \end{equation}
#
# introduced into the definition of the SNR yields
#
# \begin{equation}
# SNR = 10 \cdot \log_{10} \left( 2^{2 w} \right) + 20 \cdot \log_{10} ( A ) \approx 6.02 \, w + 20 \cdot \log_{10} ( A ) \quad \text{in dB}
# \end{equation}
#
# From this it can be concluded that a level of -6 dB is equivalent to a loss of one bit in terms of SNR of the quantized signal.
# #### Example - Quantization of a uniformly distributed signal
#
# In this example the linear uniform quantization of a random signal drawn from a uniform distribution is evaluated. The amplitude range of the quantizer is $x_\text{min} = -1$ and $x_\text{max} = 1 - Q$.
# +
# %matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import scipy.signal as sig
w = 8 # wordlength of the quantized signal
xmin = -1 # mimimum amplitude of input signal
N = 8192 # number of samples
K = 30 # maximum lag for cross-correlation
def uniform_midtread_quantizer(x, Q):
# limiter
x = np.copy(x)
idx = np.where(x <= -1)
x[idx] = -1
idx = np.where(x > 1 - Q)
x[idx] = 1 - Q
# linear uniform quantization
xQ = Q * np.floor(x/Q + 1/2)
return xQ
def analyze_quantizer(x, e):
# estimated PDF of error signal
pe, bins = np.histogram(e, bins=20, normed=True, range=(-Q, Q))
# estimate cross-correlation between input and error
ccf = 1/len(x) * np.correlate(x, e, mode='full')
# estimate PSD of error signal
nf, Pee = sig.welch(e, nperseg=128)
# estimate SNR
SNR = 10*np.log10((np.var(x)/np.var(e)))
print('SNR = %f in dB' %SNR)
# plot statistical properties of error signal
plt.figure(figsize=(9,4))
plt.subplot(121)
plt.bar(bins[:-1]/Q, pe*Q, width = 2/len(pe))
plt.title('Estimated histogram of quantization error')
plt.xlabel(r'$\theta / Q$')
plt.ylabel(r'$\hat{p}_x(\theta) / Q$')
plt.axis([-1, 1, 0, 1.2])
plt.subplot(122)
plt.plot(nf*2*np.pi, Pee*6/Q**2)
plt.title('Estimated PSD of quantization error')
plt.xlabel(r'$\Omega$')
plt.ylabel(r'$\hat{\Phi}_{ee}(e^{j \Omega}) / \sigma_e^2$')
plt.axis([0, np.pi, 0, 2])
plt.tight_layout()
plt.figure(figsize=(10,6))
ccf = ccf[N-K-1:N+K-1]
kappa = np.arange(-len(ccf)//2,len(ccf)//2)
plt.stem(kappa, ccf)
plt.title('Cross-correlation function between input signal and error')
plt.xlabel(r'$\kappa$')
plt.ylabel(r'$\varphi_{xe}[\kappa]$')
# quantization step
Q = 1/(2**(w-1))
# compute input signal
np.random.seed(1)
x = np.random.uniform(size=N, low=xmin, high=(-xmin-Q))
# quantize signal
xQ = uniform_midtread_quantizer(x, Q)
e = xQ - x
# analyze quantizer
analyze_quantizer(x, e)
# -
# **Exercise**
#
# * Change the number of bits `w` and check if the derived SNR holds
# * How does the SNR change if you lower the magnitude of the minimum amplitude `xmin` of the input signal?
# * What happens if you chose the magnitude of the minimum amplitude `xmin` in the range of the quantization step? Why?
#
# Solution: The numerically computed SNR conforms well to the theoretic result derived above. Lowering the magnitude of the minimum amplitude results in a lower SNR as predicted above. The input signal $x[k]$ is correlated to the quantization error $e[k]$ if the magnitude of the minimum amplitude is lowered such that it is close to the quantization step. Here the assumptions made for the statistical model of the quantization error do not hold.
# ### Harmonic Signal
#
# For a harmonic input signal $x[k] = x_\text{max} \cdot \cos[\Omega_0 k]$ the variance $\sigma_x^2$ is given by its squared [root mean square](https://en.wikipedia.org/wiki/Root_mean_square) (RMS) value
#
# \begin{equation}
# \sigma_x^2 = \frac{x_\text{max}^2}{2}
# \end{equation}
#
# Introducing this into the definition of the SNR together with the variance $\sigma_e^2$ of the quantization error yields
#
# \begin{equation}
# SNR = 10 \cdot \log_{10} \left(2^{2 w} \cdot \frac{3}{2} \right) \approx 6.02 \, w + 1.76 \quad \text{in dB}
# \end{equation}
#
# The gain of 1.76 dB with respect to the case of a uniformly distributed input signal is due to the fact that the amplitude distribution of a harmonic signal is not uniform
#
# \begin{equation}
# p_x(\theta) = \frac{1}{\pi \sqrt{1 - (\frac{\theta}{x_\text{max}})^2}}
# \end{equation}
#
# for $|\theta| < x_\text{max}$. High amplitudes are more likely to occur. The relative power of the quantization error is lower for higher amplitudes which results in an increase of the average SNR.
# ### Normally Distributed Signal
#
# So far, we did not consider clipping of the input signal $x[k]$, e.g. by ensuring that its minimum/maximum values do not exceed the limits of the quantizer. However, this cannot always be ensured for practical signals. Moreover, many practical signals cannot be modeled as a uniform distribution. For instance, a [normally distributed](../random_signals/important_distributions.ipynb#Normal-Distribution) random signal exceeds a given maximum value with non-zero probability. Hence, clipping will occur for such an input signal. Clipping results in overload distortions whose amplitude can be much higher that $\frac{Q}{2}$. For the overall average SNR both granular and overload distortions have to be included.
#
# The root mean square (RMS) of the normal distributed input signal is given by its standard deviation $\sigma_x$. The RMS level $A$ of the input signal normalized to the maximum level of the quantizer as
#
# \begin{equation}
# A = \frac{\sigma_x}{x_\text{max}}
# \end{equation}
#
# The probability that clipping occurs can be derived from the [cumulative distribution function](../random_signals/important_distributions.ipynb#Normal-Distribution) (CDF) of the normal distribution as
#
# \begin{equation}
# \Pr \{ |x[k]| > x_\text{max} \} = 1 + \text{erf} \left( \frac{-1}{\sqrt{2} A} \right)
# \end{equation}
#
# where $x_\text{max} = - x_\text{min}$ was assumed. For a normally distributed signal with a given probability that clipping occurs $\Pr \{ |x[k]| > x_\text{max} \} = 10^{-5}$ the SNR can be approximately calculated to [[Zölzer](../index.ipynb#Literature)]
#
# \begin{equation}
# SNR \approx 6.02 \, w - 8.5 \quad \text{in dB}
# \end{equation}
#
# The reduction of the SNR by 8.5 dB results from the fact that small signal values are more likely to occur for a normally distributed signal. The relative quantization error for small signals is higher, which results in a lower average SNR. Overload distortions due to clipping result in a further reduction of the average SNR.
# #### Example - Quantization of a normal distributed signal
#
# The following example evaluates the SNR of a linear uniform quantizer with $w=8$ for a normally distributed signal $x[k]$. The SNR is computed and plotted for various RMS levels, the probabilities for clipping are shown additionally.
# +
from scipy.special import erf
w = 8 # wordlength of the quantizer
A = np.logspace(-2, 0, num=500) # RMS levels
N = int(1e6) # number of samples
np.random.seed(1)
def compute_SNR(a):
# compute input signal
x = np.random.normal(size=N, scale=a)
# quantize signal
xQ = uniform_midtread_quantizer(x, Q)
e = xQ - x
# compute SNR
SNR = 10*np.log10((np.var(x)/np.var(e)))
return SNR
def plot_SNR(A, SNR):
# plot results
plt.figure(figsize=(8,4))
plt.plot(20*np.log10(A), SNR)
plt.xlabel(r'RMS level $\sigma_x / x_\mathrm{min}$ in dB')
plt.ylabel('SNR in dB')
plt.grid()
# quantization step
Q = 1/(2**(w-1))
# compute SNR for given RMS levels
SNR = [compute_SNR(a) for a in A]
# plot results
plot_SNR(A, SNR)
# find maximum SNR
Amax = A[np.argmax(SNR)]
Pc = 1 + erf(-1/(np.sqrt(2)*Amax))
print(r'Maximum SNR = {0:2.3f} dB for A = {1:2.1f} dB with clipping probability {2:2.1e}'
.format(np.array(SNR).max(), 20*np.log10(Amax), Pc))
# -
# **Exercise**
#
# * Can you explain the overall shape of the SNR?
# * For which RMS level and probability of clipping is the SNR optimal?
# * Change the wordlength `w` of the quantizer. How does the SNR change?
#
# Solution: The SNR is low for low RMS levels of the input signal since the relative level of the quantization error is high. The SNR increases with increasing level until the clipping errors become dominant which make the SNR decay after its maximum. The SNR is optimal for $A \approx -12$ dB which is equivalent to $\Pr \{ |x[k]| > x_\text{max} \} \approx 10^{-4}$. Increasing the wordlength by one bit increases the SNR approximately by 6 dB.
# ### Laplace Distributed Signal
#
# The [Laplace distribution](../random_signals/important_distributions.ipynb#Laplace-Distribution) is a commonly applied model for speech and music signals. As for the normal distribution, clipping will occur with a non-zero probability. The probability that clipping occurs can be derived from the [cumulative distribution function](../random_signals/important_distributions.ipynb#Laplace-Distribution) (CDF) of the normal distribution as
#
# \begin{equation}
# \Pr \{ |x[k]| > x_\text{max} \} = e^{- \frac{\sqrt{2}}{A}}
# \end{equation}
#
# The SNR for a Laplace distributed signal is in general lower compared to a normal distributed signal. The reason for this is, that the Laplace distribution features low signal values with a higher and large values with a lower probability in comparison to the normal distribution. The relative quantization error for small signals is higher, which results in a lower average SNR. The probability of overload distortions is also higher compared to the normal distribution.
# #### Example - Quantization of a Laplace distributed signal
#
# The following example evaluates the SNR of a linear uniform quantizer with $w=8$ for a Laplace distributed signal $x[k]$. The SNR is computed and plotted for various RMS levels.
# +
w = 8 # wordlength of the quantizer
A = np.logspace(-2, 0, num=500) # relative RMS levels
N = int(1e6) # number of samples
np.random.seed(1)
def compute_SNR(a):
# compute input signal
x = np.random.laplace(size=N, scale=a/np.sqrt(2))
# quantize signal
xQ = uniform_midtread_quantizer(x, Q)
e = xQ - x
# compute SNR
SNR = 10*np.log10((np.var(x)/np.var(e)))
return SNR
# quantization step
Q = 1/(2**(w-1))
# compute SNR for given RMS levels
SNR = [compute_SNR(a) for a in A]
# plot results
plot_SNR(A, SNR)
# find maximum SNR
Amax = A[np.argmax(SNR)]
Pc = np.exp(-np.sqrt(2)/Amax)
print(r'Maximum SNR = {0:2.3f} dB for A = {1:2.1f} dB with clipping probability {2:2.1e}'
.format(np.array(SNR).max(), 20*np.log10(Amax), Pc))
# -
# **Exercise**
#
# * Compare the SNR for the Laplace distributed signal to the case of a normally distributed signal. What is different?
#
# Solution: The overall SNR is lower compared to the case of a normally distributed signal. Its maximum is also at lower RMS levels. Both can be explained by the properties of the Laplace distribution discussed above.
# + [markdown] nbsphinx="hidden"
# **Copyright**
#
# This notebook is provided as [Open Educational Resource](https://en.wikipedia.org/wiki/Open_educational_resources). Feel free to use the notebook for your own purposes. The text is licensed under [Creative Commons Attribution 4.0](https://creativecommons.org/licenses/by/4.0/), the code of the IPython examples under the [MIT license](https://opensource.org/licenses/MIT). Please attribute the work as follows: *<NAME>, Digital Signal Processing - Lecture notes featuring computational examples, 2016-2018*.
| quantization/linear_uniform_quantization_error.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
df = pd.read_csv('dataset20.11.csv', sep=';')
df = df.dropna()
df
p_data = pd.read_csv('data/positive.csv', sep=';', header=None)
n_data = pd.read_csv('data/negative.csv', sep=';', header=None)
p_data
dataset = pd.concat([p_data[:50000], n_data[:50000]])
dataset = dataset[[3, 4]]
dataset.columns = ['text', 'label']
dataset['label'] = dataset['label'].apply(lambda x: 1 if x == -1 else 0)
dataset
def text_cleaner(text):
text = text.lower()
alph = 'абвгдеёжзийклмнопрстуфхцчшщъыьэюя'
cleaned_text = ''
for char in text:
if (char.isalpha() and char[0] in alph) or (char == ' '):
cleaned_text += char
return cleaned_text.strip()
# %%time
dataset['cleaned'] = dataset['text'].apply(text_cleaner)
dataset
# +
from sklearn.feature_extraction.text import TfidfVectorizer
positive_vector = TfidfVectorizer(lowercase=True, max_df=0.1, min_df=0.0001).fit_transform(dataset[dataset.label == 0].cleaned)
negative_vector = TfidfVectorizer(lowercase=True, max_df=0.1, min_df=0.0001).fit_transform(dataset[dataset.label == 1].cleaned)
print(positive_vector.shape)
print(negative_vector.shape)
# +
# %%time
from sklearn.cluster import dbscan
positive_cluster = dbscan(positive_vector, eps=0.3, min_samples=10, n_jobs=-1)
# -
sum(positive_cluster[1] == 0)
# %%time
negative_cluster = dbscan(negative_vector, eps=0.3, min_samples=10, n_jobs=-1)
sum(negative_cluster[1] == 0)
dataset_clustered = pd.concat([dataset[dataset.label == 0][positive_cluster[1] == -1],
dataset[dataset.label == 1][negative_cluster[1] == -1]], ignore_index=True)
dataset_clustered
# +
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(dataset_clustered['cleaned'], dataset_clustered['label'], test_size=0.2)
print(X_train.shape)
print(X_test.shape)
# -
def plot_classification_report(classificationReport,
title='Classification report'):
df = pd.DataFrame(classificationReport).T.iloc[:,:3]
df.style.background_gradient(cmap='viridis',
subset=pd.IndexSlice['0':'9', :'f1-score'])
return df
# +
from sklearn.feature_extraction.text import TfidfVectorizer
vectorizer = TfidfVectorizer(lowercase=True)
x_train_tf = vectorizer.fit_transform(X_train)
x_test_tf = vectorizer.transform(X_test)
print(x_train_tf.shape)
# +
from sklearn.naive_bayes import MultinomialNB
model = MultinomialNB()
model.fit(x_train_tf, y_train)
predictions = model.predict(x_test_tf)
# +
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.metrics import classification_report, confusion_matrix
sns.heatmap(plot_classification_report(classification_report(y_test, predictions, output_dict=True)),
square=False, annot=True, cbar=False)
plt.show()
sns.heatmap(confusion_matrix(y_test, predictions), square=False, annot=True, cbar=False)
plt.xlabel('true label')
plt.ylabel('predicted label')
plt.show()
# -
df['label'] = model.predict(vectorizer.transform(df.text))
df
df.loc[17606].text
df[df.label == 0].text
df[df.label == 0]
df[df.label == 0].to_csv('sentimented_dataset.csv', index=False)
df.groupby('group_url').count()[['from_id']]
df = df[df.from_id > 0]
df
df.groupby('group_url').count()[['from_id']]
df.info()
df.nunique()
print('1:', sum(df['label'] == 1))
print('0:', sum(df['label'] == 0))
| NLPrecomendersystem/Sentiment analisis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Linear Classification
# We are now going to develop a more powerful approach to image classification that we will eventually naturally extend to entire Neural Networks and Convolutional Neural Networks. The approach will have two major components:
#
# * a **score function**: mapping the raw data to class scores
# * a **loss function**: quantifying the agreement between the predicted scores and the ground truth labels
#
# We will then cast this as an optimization problem in which we will minimize the loss function with respect to the parameters of the score function.
# ## Linear Classifier
#
# Each image \\(x_i\\) is associated with a label \\(y_i\\) in training dataset of N samples and K classes. The image \\(x_i\\) can be flatten out to a single column vector of shape[Dx1]. So we build a linear score function \\(f: R^D \mapsto R^K\\) to get the scores of every label for the image.
#
# $$f(x_i, W, b) = W x_i + b$$
# where
# $$x_i \in R^D (i = 1 \dots N)$$
# $$y_i \in { 1 \dots K }$$
#
# The matrix **W** (of size [KxD]), and the vector **b** (of size [Kx1]) are the parameters of the function.The parameters in **W** are often called the weights, and **b** is called the bias vector because it influences the output scores, but without interacting with the actual data \\(x_i\\). However, you will often hear people use the terms weights and parameters interchangeably.
#
# For example, in CIFAR-10 we have a training set of N = 50,000 images, each with D = 32 x 32 x 3 = 3072 pixels, and K = 10. Some notes here:
#
# * \\(W x_i\\) is effectively evaluating 10 separate classifiers in parallel (one for each class), where each classifier is a row of W
# * The input data \\((x_i, y_i)\\) are given and fixed, we have control over the setting of **W** and **b**. Intuitively we wish that the correct class has a score that is higher than the scores of incorrect classes
# * Once the learning is complete, we can discard the entire training set and only keep the learned parameters
# * Classifying the test image involves a single matrix multiplication and addition, which is significantly faster than comparing a test image to all training images
#
# **Bias trick**(homogeneous equation)
#
# The new linear score function \\(f(x_i, W, b) = W x_i + b\\) can be simplified to a single matrix multiply.
#
# $$f(x_i, W) = W x_i$$
# because
# $$
# \left[\begin{array}{lcr}W & b \end{array}\right]
# \left[\begin{array}{lcr}x_i \\ 1 \end{array}\right]
# = W x_i + b
# $$
#
# With our CIFAR-10 example, \\(x_i\\) is now [3073 x 1] instead of [3072 x 1] , and **W** is now [10 x 3073] instead of [10 x 3072].
# ## Loss Function
# We do have control over these weights and we want to set them so that the predicted class scores are consistent with the ground truth labels in the training data.
#
# We are going to measure our unhappiness with outcomes such as this one with a loss function (or sometimes also referred to as the **cost** function or the **objective**). Intuitively, the loss will be high if we’re doing a poor job of classifying the training data, and it will be low if we’re doing well.
# ### Multiclass Support Vector Machine loss
# The SVM loss is set up so that the SVM “wants” the correct class for each image to a have a score higher than the incorrect classes by some fixed margin \\(\Delta\\). The Multiclass SVM loss for the i-th example is then formalized as follows:
#
# $$L_i = \sum_{j\neq y_i} \max(0, s_j - s_{y_i} + \Delta)$$
# where the score for the j-th class is the j-th element (**s** short for scores)
# $$s_j = f(x_i, W)_j$$
#
# +
import numpy as np
def L_i(x, y, W):
delta = 1.0
scores = W.dot(x)
correct_class_score = scores[y]
D = W.shape[0]
loss_i = 0.0
for j in range(D):
if j == y:
continue
loss_i += max(0, scores[j] - correct_class_score + delta)
return loss_i
# +
import numpy as np
def L_i_vectorized(x, y, W):
delta = 1.0
scores = W.dot(x)
margins = np.maximum(0, scores - scores[y] + delta)
margins[y] = 0
loss_i = np.sum(margins)
return loss_i
# +
D_IMG = 33
N_CLASS = 10
W = np.random.rand(N_CLASS, D_IMG)
x = np.random.randint(0, 255, D_IMG)
y = 4 # 0 <= y < NUM_CLASS
L_i(x, y, W), L_i_vectorized(x, y, W)
# +
import numpy as np
def L(X, Y, W):
"""
fully-vectorized implementation :
- X holds all the training examples as columns (e.g. 3073 x 50,000 in CIFAR-10)
- Y is array of integers specifying correct class (e.g. 50,000-D array)
- W are weights (e.g. 10 x 3073)
"""
# evaluate loss over all examples in X without using any for loops
# left as exercise to reader in the assignment
delta = 1.0
scores = W.dot(X)
# convert Y to one-hot matrix: 10x5,0000
# -
# ## Softmax Classifier
# Softmax classifier is the generalization of the binary Logistic Regression classifier to multiple classes. It gives a slightly more intuitive output and has a probalistic interpretation which normalizes class probalilites.
#
# **softmax function** $$f_j(z) = \frac{e^{z_j}}{\sum_k e^{z_k}}$$
# here \\(z_j\\) is the j-th element. It takes a vector **z** of arbitrary real-valued scores and squashes it to a vector of values between zero and one that sum to one.
#
# **cross-entropy** between a "true" distribution **p** and an estimated distribution **q** is defined as following in information theory.
#
# $$H(p,q) = - \sum_x p(x) \log q(x)$$
#
# The cross-entropy can be written in terms of entropy and the Kullback-Leibler divergence as following.
#
# $$H(p,q) = H(p) + D_{KL}(p||q)$$
#
# **cross-entropy loss**
# $$L_i = -\log\left(\frac{e^{f_{y_i}}}{ \sum_j e^{f_j} }\right) \hspace{0.5in} \text{or equivalently} \hspace{0.5in} L_i = -f_{y_i} + \log\sum_j e^{f_j}$$
#
# * minimizing the KL divergence between the two distributions (a measure of distance)
# * wants the predicted distribution to have all of its mass on the correct answer
#
# **Probabilistic interpretation** Looking at the expression, we see that
# $$P(y_i \mid x_i; W) = \frac{e^{f_{y_i}}}{\sum_j e^{f_j} }$$
#
# we are therefore minimizing the negative log likelihood of the correct class, which can be interpreted as performing Maximum Likelihood Estimation (MLE).
# ### Numeric stability
# When you're writing code for computing the Softmax function in practice, the interdediate terms may be very large due to the exponentials, such as \\(e^{f_{y_i}}\\) and \\(\sum_j e^{f_j}\\). So it is important to use a normalization trick below.
# +
import numpy as np
f = np.array([123, 456, 789])
p = np.exp(f) / np.sum(np.exp(f))
f, p
# +
import numpy as np
f -= np.max(f)
p = np.exp(f) / np.sum(np.exp(f))
f,p
# -
# ## Possibly confusing naming conventions
# To be precise, the SVM classifier uses the **hinge loss**, or also sometimes called the **max-margin loss**. The Softmax classifier uses the **cross-entropy loss**. The Softmax classifier gets its name from the **softmax function**, which is used to squash the raw class scores into normalized positive values that sum to one, so that the cross-entropy loss can be applied. In particular, note that technically it doesn't make sense to talk about the "softmax loss", since softmax is just the squashing function, but it is a relatively commonly used shorthand.
# ## Regularization
# We have a dataset and a set of parameters W that correctly classify every example.(\\(L_i = 0\\) for all i). One easy way to see this is that if one parameters W correctly classify all examples, then any multiple of these parameters \\(\lambda W\\) where \\(\lambda > 1\\) will also give zero loss because this transformation uniformly stretches all score magnitudes and hence also their absolute differences.
#
# In other words, we wish to encode some preference for a certain set of weights **W** over others to remove this ambiguity. We can do so by extending the loss function with a regularization penalty \\(R(W)\\). The most common regularization penalty is the L2 norm that discourages large weights through an elementwise quadratic penalty over all parameters:
#
# $$R(W) = \sum_k\sum_l W_{k,l}^2$$
#
# That is, the full Multiclass SVM loss becomes:
#
# $$L = \underbrace{ \frac{1}{N} \sum_i L_i }_\text{data loss} + \underbrace{ \lambda R(W) }_\text{regularization loss}$$
#
# The L2 penalty prefers smaller and more diffuse weight vectors, this effect can improve the generalization performance of the classifiers on test images and lead to less overfitting.
#
| cs231n/linear_classifier.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
from pyspark.sql import SparkSession
#创建SparkSession。
spark = SparkSession.builder.getOrCreate()
#读取文件并存储到DataFrame中。
df = spark.read.csv('../Datasets/news/news_sentiment.csv', header=False)
#选取名称为_c1的列,并展示该列的前5行。
df.select(df._c1).show(5)
# +
import pyspark.sql.functions as func
#选取名称为_c1的列,将该列的数据文本进行分词,并修改该列的名称为words。
df = df.select(func.split(df._c1, ' ').alias('words'))
df.show(5)
# +
from pyspark.ml.feature import Word2Vec
#初始化词向量特征的抽取模型。
word2Vec = Word2Vec(vectorSize=3, minCount=0, inputCol="words", outputCol="features")
model = word2Vec.fit(df)
word2vec_df = model.transform(df)
word2vec_df.show(5)
# +
from pyspark.ml.feature import StandardScaler
#初始化特征标准化模型。
scaler = StandardScaler(inputCol="features", outputCol="scaledFeatures",
withStd=True, withMean=False)
scalerModel = scaler.fit(word2vec_df)
scaled_df = scalerModel.transform(word2vec_df)
scaled_df.show(5)
| Chapter_7/Section_7.3.2.1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import string
def print_rangoli(size):
alphabet = string.ascii_lowercase
for i in range(size - 1, 0, -1):
row = ["-"] * (size * 2 - 1)
for j in range(0, size - i):
row[size - 1 - j] = alphabet[j + i]
row[size - 1 + j] = alphabet[j + i]
print("-".join(row))
for i in range(0, size):
row = ["-"] * (size * 2 - 1)
for j in range(0, size - i):
row[size - 1 - j] = alphabet[j + i]
row[size - 1 + j] = alphabet[j + i]
print("-".join(row))
################################################################################
#----------------------------- begin locked code ------------------------------#
if __name__ == '__main__':
n = int(input())
print_rangoli(n)
#------------------------------ end locked code -------------------------------#
################################################################################
# +
# another way
import string
def print_rangoli(size):
alpha = string.ascii_lowercase
for i in range(n - 1, 0, -1):
row = ["-"] * (n * 2 - 1)
for j in range(0, n - i):
row[n - 1 - j] = alpha[j + i]
row[n - 1 + j] = alpha[j + i]
print("-".join(row))
for i in range(0, n):
row = ["-"] * (n * 2 - 1)
for j in range(0, n - i):
row[n - 1 - j] = alpha[j + i]
row[n - 1 + j] = alpha[j + i]
print("-".join(row))
if __name__ == '__main__':
n = int(input())
print_rangoli(n)
| Python/3. Strings/23. alphabet rangoli.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
"""
A perfect number is a number for which the sum of its proper divisors is exactly equal to the number.
For example, the sum of the proper divisors of 28 would be 1 + 2 + 4 + 7 + 14 = 28,
which means that 28 is a perfect number.
A number n is called deficient if the sum of its proper divisors is less than n and it
is called abundant if this sum exceeds n.
As 12 is the smallest abundant number, 1 + 2 + 3 + 4 + 6 = 16,
the smallest number that can be written as the sum of two abundant numbers is 24.
By mathematical analysis, it can be shown that all integers greater than 28123
can be written as the sum of two abundant numbers. However, this upper limit cannot
be reduced any further by analysis even though it is known that the greatest number
that cannot be expressed as the sum of two abundant numbers is less than this limit.
Find the sum of all the positive integers which cannot be written as the sum of two abundant numbers.
"""
# i'm sorry, this code is very bad, but i can't think straight at the moment
import itertools
def factorsLess(n):
"""Returns all factors of n that are less than n. (c) agf, steveha and <NAME> from SO."""
step = 2 if n%2 else 1
a = set(x for tup in ([i, n//i]
for i in range(1, int(n**0.5)+1, step) if n % i == 0) for x in tup)
a.discard(n)
return a
def findAbundantNumsBelow30000():
"""Returns all abundant numbers below 30000."""
nums = []
for i in range(30000):
if sum(factorsLess(i)) > i:
nums.append(i)
return nums
# max(findAbundantNumsBelow30000())
abNums = findAbundantNumsBelow30000()
sums = itertools.combinations_with_replacement(abNums, 2)
calcSums = []
for a in sums:
# print(a)
# print(a[1])
calcSums.append(a[0]+a[1])
expressableInAbNums = []
calcSums
# -
max(calcSums)
s = set(calcSums)
# +
nonexpressable = []
for c in range(30000):
if c not in s:
nonexpressable.append(c)
# -
nonexpressable
sum(nonexpressable)
| Project_Euler-Problem_23-Non-abundant_sums.ipynb |
# # Translation (TensorFlow)
# Install the Transformers and Datasets libraries to run this notebook.
# !pip install datasets transformers[sentencepiece]
# !apt install git-lfs
# You will need to setup git, adapt your email and name in the following cell.
# !git config --global user.email "<EMAIL>"
# !git config --global user.name "<NAME>"
# You will also need to be logged in to the Hugging Face Hub. Execute the following and enter your credentials.
# +
from huggingface_hub import notebook_login
notebook_login()
# +
from datasets import load_dataset, load_metric
raw_datasets = load_dataset("kde4", lang1="en", lang2="fr")
# -
raw_datasets
split_datasets = raw_datasets["train"].train_test_split(train_size=0.9, seed=20)
split_datasets
split_datasets["validation"] = split_datasets.pop("test")
split_datasets["train"][1]["translation"]
# +
from transformers import pipeline
model_checkpoint = "Helsinki-NLP/opus-mt-en-fr"
translator = pipeline("translation", model=model_checkpoint)
translator("Default to expanded threads")
# -
split_datasets["train"][172]["translation"]
translator(
"Unable to import %1 using the OFX importer plugin. This file is not the correct format."
)
# +
from transformers import AutoTokenizer
model_checkpoint = "Helsinki-NLP/opus-mt-en-fr"
tokenizer = AutoTokenizer.from_pretrained(model_checkpoint, return_tensors="tf")
# +
en_sentence = split_datasets["train"][1]["translation"]["en"]
fr_sentence = split_datasets["train"][1]["translation"]["fr"]
inputs = tokenizer(en_sentence)
with tokenizer.as_target_tokenizer():
targets = tokenizer(fr_sentence)
# -
wrong_targets = tokenizer(fr_sentence)
print(tokenizer.convert_ids_to_tokens(wrong_targets["input_ids"]))
print(tokenizer.convert_ids_to_tokens(targets["input_ids"]))
# +
max_input_length = 128
max_target_length = 128
def preprocess_function(examples):
inputs = [ex["en"] for ex in examples["translation"]]
targets = [ex["fr"] for ex in examples["translation"]]
model_inputs = tokenizer(inputs, max_length=max_input_length, truncation=True)
# Set up the tokenizer for targets
with tokenizer.as_target_tokenizer():
labels = tokenizer(targets, max_length=max_target_length, truncation=True)
model_inputs["labels"] = labels["input_ids"]
return model_inputs
# -
tokenized_datasets = split_datasets.map(
preprocess_function,
batched=True,
remove_columns=split_datasets["train"].column_names,
)
# +
from transformers import TFAutoModelForSeq2SeqLM
model = TFAutoModelForSeq2SeqLM.from_pretrained(model_checkpoint)
# +
from transformers import DataCollatorForSeq2Seq
data_collator = DataCollatorForSeq2Seq(tokenizer, model=model, return_tensors="tf")
# -
batch = data_collator([tokenized_datasets["train"][i] for i in range(1, 3)])
batch.keys()
batch["labels"]
batch["decoder_input_ids"]
for i in range(1, 3):
print(tokenized_datasets["train"][i]["labels"])
tf_train_dataset = tokenized_datasets["train"].to_tf_dataset(
columns=["input_ids", "attention_mask", "labels"],
collate_fn=data_collator,
shuffle=True,
batch_size=32,
)
tf_eval_dataset = tokenized_datasets["validation"].to_tf_dataset(
columns=["input_ids", "attention_mask", "labels"],
collate_fn=data_collator,
shuffle=False,
batch_size=16,
)
# !pip install sacrebleu
# +
from datasets import load_metric
metric = load_metric("sacrebleu")
# -
predictions = [
"This plugin lets you translate web pages between several languages automatically."
]
references = [
[
"This plugin allows you to automatically translate web pages between several languages."
]
]
metric.compute(predictions=predictions, references=references)
predictions = ["This This This This"]
references = [
[
"This plugin allows you to automatically translate web pages between several languages."
]
]
metric.compute(predictions=predictions, references=references)
predictions = ["This plugin"]
references = [
[
"This plugin allows you to automatically translate web pages between several languages."
]
]
metric.compute(predictions=predictions, references=references)
# +
import numpy as np
def compute_metrics():
all_preds = []
all_labels = []
sampled_dataset = tokenized_datasets["validation"].shuffle().select(range(200))
tf_generate_dataset = sampled_dataset.to_tf_dataset(
columns=["input_ids", "attention_mask", "labels"],
collate_fn=data_collator,
shuffle=False,
batch_size=4,
)
for batch in tf_generate_dataset:
predictions = model.generate(*batch)
decoded_preds = tokenizer.batch_decode(predictions, skip_special_tokens=True)
labels = batch["labels"].numpy()
labels = np.where(labels != -100, labels, tokenizer.pad_token_id)
decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)
decoded_preds = [pred.strip() for pred in decoded_preds]
decoded_labels = [[label.strip()] for label in decoded_labels]
all_preds.extend(decoded_preds)
all_labels.extend(decoded_labels)
result = metric.compute(predictions=decoded_preds, references=decoded_labels)
return {"bleu": result["score"]}
# +
from huggingface_hub import notebook_login
notebook_login()
# -
print("Eval loss:", model.evaluate(tf_eval_dataset))
print(compute_metrics())
# +
from transformers import create_optimizer
from transformers.keras_callbacks import PushToHubCallback
import tensorflow as tf
num_epochs = 3
num_train_steps = len(tf_train_dataset) * num_epochs
optimizer, schedule = create_optimizer(
init_lr=5e-5,
num_warmup_steps=0,
num_train_steps=num_train_steps,
weight_decay_rate=0.01,
)
model.compile(optimizer=optimizer)
# Train in mixed-precision float16
tf.keras.mixed_precision.set_global_policy("mixed_float16")
# +
from transformers.keras_callbacks import PushToHubCallback
callback = PushToHubCallback(
output_dir="marian-finetuned-kde4-en-to-fr", tokenizer=tokenizer
)
model.fit(
tf_train_dataset,
validation_data=tf_eval_dataset,
callbacks=[callback],
epochs=num_epochs,
)
# -
print("Eval loss:", model.evaluate(tf_eval_dataset))
print(compute_metrics())
# +
from transformers import pipeline
# Replace this with your own checkpoint
model_checkpoint = "huggingface-course/marian-finetuned-kde4-en-to-fr"
translator = pipeline("translation", model=model_checkpoint)
translator("Default to expanded threads")
# -
translator(
"Unable to import %1 using the OFX importer plugin. This file is not the correct format."
)
| notebooks/course/chapter7/section4_tf.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %pushd ../../
# %env CUDA_VISIBLE_DEVICES=3
# +
import json
import os
import sys
import tempfile
from tqdm.auto import tqdm
import torch
import torchvision
from torchvision import transforms
from PIL import Image
import numpy as np
torch.cuda.set_device(0)
# -
from netdissect import setting
segopts = 'netpqc'
segmodel, seglabels, _ = setting.load_segmenter(segopts)
len(seglabels)
class UnsupervisedImageFolder(torchvision.datasets.ImageFolder):
def __init__(self, root, transform=None, max_size=None, get_path=False):
self.temp_dir = tempfile.TemporaryDirectory()
os.symlink(root, os.path.join(self.temp_dir.name, 'dummy'))
root = self.temp_dir.name
super().__init__(root, transform=transform)
self.get_path = get_path
self.perm = None
if max_size is not None:
actual_size = super().__len__()
if actual_size > max_size:
self.perm = torch.randperm(actual_size)[:max_size].clone()
logging.info(f"{root} has {actual_size} images, downsample to {max_size}")
else:
logging.info(f"{root} has {actual_size} images <= max_size={max_size}")
def _find_classes(self, dir):
return ['./dummy'], {'./dummy': 0}
def __getitem__(self, key):
if self.perm is not None:
key = self.perm[key].item()
if isinstance(key, str):
path = key
else:
path, target = self.samples[index]
sample = self.loader(path)
if self.transform is not None:
sample = self.transform(sample)
if self.get_path:
return sample, path
else:
return sample
def __len__(self):
if self.perm is not None:
return self.perm.size(0)
else:
return super().__len__()
class Sampler(torch.utils.data.Sampler):
def __init__(self, dataset, seg_path):
self.todos = []
for path, _ in dataset.samples:
k = os.path.splitext(os.path.basename(path))[0]
if not os.path.exists(os.path.join(seg_path, k + '.pth')):
self.todos.append(path)
def __len__(self):
return len(self.todos)
def __iter__(self):
yield from self.todos
transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
def process(img_path, seg_path, device='cuda', batch_size=128, **kwargs):
os.makedirs(seg_path, exist_ok=True)
dataset = UnsupervisedImageFolder(img_path, transform=transform, get_path=True)
sampler = Sampler(dataset, seg_path)
loader = torch.utils.data.DataLoader(dataset, num_workers=24, batch_size=batch_size, pin_memory=True, sampler=sampler)
with torch.no_grad():
for x, paths in tqdm(loader):
segs = segmodel.segment_batch(x.to(device), **kwargs).detach().cpu()
for path, seg in zip(paths, segs):
k = os.path.splitext(os.path.basename(path))[0]
torch.save(seg, os.path.join(seg_path, k + '.pth'))
del segs
seg_path = 'notebooks/stats/churches/dome2tree/naive'
import glob
torch.backends.cudnn.benchmark=True
# !ls notebooks/stats/churches/dome2tree
process(
'/data/vision/torralba/ganprojects/placesgan/tracer/baselines/pyflow/dome2tree_all_256/poisson',
'notebooks/stats/churches/dome2tree_all/poisson',
batch_size=8,
)
process(
'/data/vision/torralba/ganprojects/placesgan/tracer/baselines/pyflow/dome2tree_domeonly_256/naive',
'notebooks/stats/churches/dome2tree/naive',
batch_size=8,
)
process(
'/data/vision/torralba/ganprojects/placesgan/tracer/baselines/pyflow/dome2tree_domeonly_256/poisson',
'notebooks/stats/churches/dome2tree/poisson',
batch_size=8,
)
process(
'/data/vision/torralba/ganprojects/placesgan/tracer/baselines/pyflow/dome2tree_domeonly_256/laplace',
'notebooks/stats/churches/dome2tree/laplace',
batch_size=8,
)
process(
'/data/vision/torralba/distillation/gan_rewriting/results/ablations/stylegan-church-dome2tree-8-1-2001-0.0001-overfit/images',
'churches/dome2tree_all/overfit',
batch_size=8)
process(
'/data/vision/torralba/ganprojects/placesgan/tracer/utils/samples/domes',
'churches/domes',
batch_size=12)
process(
'/data/vision/torralba/ganprojects/placesgan/tracer/utils/samples/dome2tree',
'churches/dome2tree/ours',
batch_size=8)
process(
'/data/vision/torralba/ganprojects/placesgan/tracer/utils/samples/dome2spire',
'churches/dome2spire/ours',
batch_size=8)
| notebooks/stats/Generic_Segmentation-Copy6.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from collections import Counter
comp = pd.read_csv("companies.csv")
# acq = pd.read_csv("acquisitions.csv")
print(comp.shape)
comp.head()
# # filter the dataset by "first_funding_at" after 1990
# filter the dataset by "first_funding_at" after 1990
comp_filter = comp[comp['first_funding_at'] >= '1990-01-01']
comp_filter.shape
comp_filter.info()
# # create subdataframe for each category
# +
# how many categories?
cat_comp_dict = {}
comp_cat_dict = {}
l_maxlen = 0
all_cat_list = []
for idx, s in enumerate(comp_filter["category_list"]):
if type(s) != str:
cat_dict[idx] = 0
continue
l = s.split("|")
comp_cat_dict[idx] = l
if len(l) > l_maxlen:
l_maxlen = len(l)
l_max_show = l
# print(l_max_show)
for i in l:
all_cat_list.append(i)
if i not in cat_comp_dict.keys():
cat_comp_dict[i] = []
cat_comp_dict[i].append(idx)
print(len(cat_comp_dict))
print(l_maxlen)
# print(l_max_show)
# def add_cid(cat, cat_dict, cid):
# if cat not in cat_dict.keys():
# cat_dict[cat] = []
# cat_dict[cat].append(cid)
# -
all_cat_stats = Counter(all_cat_list)
all_cat_stats = pd.DataFrame.from_dict(all_cat_stats, orient='index', columns=['num of companies'])
all_cat_stats['percentage of total companies'] = all_cat_stats['num of companies'] / len(comp_filter) * 100
all_cat_stats = all_cat_stats.sort_values('percentage of total companies',ascending=False)
all_cat_stats.head()
all_cat_stats.tail()
all_cat_stats.index
# +
sub_cat_dataset = {}
sub_status = {}
for cat, cid in cat_comp_dict.items():
df = comp_filter.iloc[cid,:]
sub_cat_dataset[cat] = df
sub_status[cat] = dict(df.groupby('status')['name'].count())
# -
sub_status_df = pd.DataFrame.from_dict(sub_status, orient='index')
sub_status_df.fillna(0, inplace=True)
sub_status_df.head()
# +
# sub_status_df['total'] = sub_status_df['acquired'] + sub_status_df['closed'] + sub_status_df['ipo'] + sub_status_df['operating']
# sub_status_df.sort_values('total', ascending=False).head()
# +
sub_status_arr = sub_status_df.to_numpy()
# print(sub_status_arr.shape)
tot = np.sum(sub_status_arr, axis=1)
# print(len(tot))
acq_p = sub_status_arr[:,0] / tot *100
# print(len(acq_p))
closed_p = sub_status_arr[:,1] / tot * 100
ipo_p = sub_status_arr[:,2] / tot * 100
operating_p = sub_status_arr[:,3] / tot * 100
sub_status_df['total'] = tot
sub_status_df['acq_percent'] = acq_p
sub_status_df['closed_percent'] = closed_p
sub_status_df['ipo_percent'] = ipo_p
sub_status_df['operating_percent'] = operating_p
sub_status_df = sub_status_df.sort_values('total', ascending=False)
sub_status_df.head()
# -
id = cat_comp_dict['Software']
df = comp_filter.iloc[id,:]
dict(df.groupby('status')['name'].count())
# +
d = {'a': {'acquired': 1071, 'closed': 645, 'ipo': 160, 'operating': 6883},
'b': {'acquired': 10, 'closed': 6, 'ipo': 1, 'operating': 68}}
f = pd.DataFrame.from_dict(d, orient='index')
f
# -
d = {0: [1,2,3]}
d[0].append(4)
d[1] = []
d[1].append(5)
0 in d.keys()
# +
# split the category column
# new data frame with split value columns
new = comp["category_list"].str.split("|", n = 3, expand = True)
# making seperate first name column from new data frame
comp["category_1"]= new[0]
# making seperate last name column from new data frame
comp["category_2"]= new[1]
# Dropping old Name columns
comp.drop(columns =["category_list"], inplace = True)
# df display
comp.head()
# -
# # Label the dataset
# ### ipo and acquired , lable = 1, others label = 0
data_stats = comp.groupby('status').count()
data_stats
# +
# data_stats.index
# +
# data_stats.plot.bar(y='name',rot=15)
# +
ax = data_stats.plot(kind='barh', y='name', figsize=(10,7), fontsize=13);
ax.set_alpha(0.8)
ax.set_title("Distribution of Status", fontsize=18)
# create a list to collect the plt.patches data
totals = []
# find the values and append to list
for i in ax.patches:
totals.append(i.get_width())
# set individual bar lables using above list
total = sum(totals)
# set individual bar lables using above list
for i in ax.patches:
# get_width pulls left or right; get_y pushes up or down
ax.text(i.get_width()+.3, i.get_y()+.38, \
str(round((i.get_width()/total)*100, 2))+'%', fontsize=15,color='dimgrey')
# # invert for largest on top
# ax.invert_yaxis()
plt.show()
# -
# +
# Check the companies with same names
# names = comp_filter['name']
# sorted(Counter(names).items(), key=lambda x: x[1], reverse=True)
# -
comp_filter['label'] = 0
comp_filter.loc[comp_filter.status == 'ipo', 'label'] = 1
comp_filter.loc[comp_filter.status == 'acquired', 'label'] = 1
comp_filter.head()
comp_filter['category_list'][1]
comp_filter.to_csv("comp_filter.csv", index=False, encoding='utf8')
df = pd.read_csv('comp_filter.csv')
df.head()
# # Add investor number to the dataset
investor = pd.read_csv('investments.csv')
investor_num = investor[['company_permalink',
'investor_permalink']].groupby(['company_permalink']).agg(['count'])
comp_plus_InvestorNum = pd.merge(how='inner',left=df, right=investor_num,
left_on='permalink', right_on='company_permalink')
comp_plus_InvestorNum.to_csv('comp_plus_InvestorNum',index=False, encoding='utf8')
# # Deal with the data type
# 1. calculate the funding_duration
# 2. Change datetime to UTC
df = pd.read_csv('comp_plus_InvestorNum.csv')
df.head()
df.rename(columns={"('investor_permalink', 'count')": "num_of_investor"}, inplace=True)
#df['Num_of_investor'] = df.iloc[:,-1]
# convert '-' to NAN in comp['funding_total_usd']:
df['funding_total_usd'] = pd.to_numeric(df['funding_total_usd'], errors='coerce')
# calculate the funding_duration between 'first_funding_at' and 'last_funding_at'
t1 = pd.to_datetime(df.first_funding_at, errors='coerce')
t1 = pd.to_timedelta(t1).dt.days
t2 = pd.to_datetime(df.last_funding_at, errors='coerce')
t2 = pd.to_timedelta(t2).dt.days
# funding_duration means how many days between the first and last fund raising.
df['funding_duration'] = t2 - t1
# +
# convert datetime type for "last_funding_at","first_funding_at"
df['first_funding_at_UTC'] = t1
df['last_funding_at_UTC'] = t2
# -
df.info()
df.to_csv('companies_allFeatures.csv', index=False, encoding='utf8')
# # Date Preprocessing
# 1. Select features, filter out missing data
# 2. Shuffle the dataset
# 4. Split dataset to training, dev and test set (90%, 5%, 5%)
# 2. Upsample the training set to balance.
# 3. Encode and transform category features.
# 2. Encode and transform text features.
# 3. Concatenate all the features.
import sklearn
from scipy.sparse import hstack
from sklearn import preprocessing
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.utils import resample, shuffle
from sklearn import datasets
from sklearn.model_selection import train_test_split
df = pd.read_csv('companies_allFeatures.csv')
df
# select meaningful features
df = df.drop(columns=['permalink', 'name', 'homepage_url', 'status', 'state_code',
'region','city', 'founded_at', 'first_funding_at','last_funding_at'])
df_clean = df.dropna()
df.info()
df_clean.info()
df_clean.head()
df_clean.iloc[23]
# shuffle and split the dataset into tain, dev, and test set.
X = df_clean.drop(columns=['label'])
y = df_clean['label']
X_train, X_test_, y_train, y_test_ = train_test_split(X, y, test_size=0.1, random_state=2,
stratify=y, shuffle=True)
X_dev, X_test, y_dev, y_test = train_test_split(X_test_, y_test_, test_size=0.5, random_state=2,
shuffle=False)
df = X_dev
df['label'] = y_dev
df.to_csv('dev.csv')
df = X_test
df['label'] = y_test
df.to_csv('test.csv')
df = X_train
df['label'] = y_train
df.to_csv('train.csv')
# # Upsamle the training set
df = pd.read_csv('train.csv', )
df.shape
# +
# Separate majority and minority classes
df_majority = df[df.label==0]
df_minority = df[df.label==1]
# n is the number of majority class (label = 0)
n = df.label.value_counts()[0]
# Upsample minority class
df_minority_upsampled = resample(df_minority,
replace=True, # sample with replacement
n_samples=n, # to match majority class
random_state=123) # reproducible results
# downsample majority class
# df_majority_downsampled = resample(df_majority,
# replace=False, # sample with replacement
# n_samples=n, # to match majority class
# random_state=123) # reproducible results
# Combine majority class with upsampled minority class
df_upsampled = pd.concat([df_majority, df_minority_upsampled])
# df_downsampled = pd.concat([df_minority, df_majority_downsampled])
# -
df_upsampled = shuffle(df_upsampled)
df_upsampled.to_csv('train_upsampled_shuffled.csv', index=False)
df.shape
# # Transform categorical and text features
df = pd.read_csv('train_upsampled_shuffled.csv')
X_train = df.iloc[:,1:-1]
y_train = df.iloc[:,-1]
# +
df_dev = pd.read_csv('dev.csv')
X_dev = df_dev.iloc[:,1:-1]
y_dev = df_dev.iloc[:,-1]
df_test = pd.read_csv('test.csv')
X_test = df_test.iloc[:,1:-1]
y_test = df_test.iloc[:,-1]
# -
df_dev.shape
X_train
# +
### separate the 3 tpye of features ###
X_train_text = X_train.category_list
X_train_country = X_train.country_code
X_train_nums = X_train.drop(columns=['category_list','country_code'])
X_dev_text = X_dev.category_list
X_dev_country = X_dev.country_code
X_dev_nums = X_dev.drop(columns=['category_list','country_code'])
X_test_text = X_test.category_list
X_test_country = X_test.country_code
X_test_nums = X_test.drop(columns=['category_list','country_code'])
# +
# encode text feature
X_train.category_list = X_train.category_list.astype(str)
vectorizer1 = CountVectorizer(min_df=5)
vectorizer1.fit(X_train.category_list)
X_train_text = vectorizer1.transform(X_train.category_list)
X_dev_text = vectorizer1.transform(X_dev.category_list)
X_test_text = vectorizer1.transform(X_test.category_list)
# +
# encode categorical feature
X_train.country_code= X_train.country_code.astype(str)
vectorizer2 = CountVectorizer(min_df=1)
vectorizer2.fit(X_train.category_list)
X_train_country = vectorizer2.transform(X_train.country_code)
X_dev_country = vectorizer2.transform(X_dev.country_code)
X_test_country = vectorizer2.transform(X_test.country_code)
# -
X_train_text.toarray()
# # Conduct feature scaling/normalization for numerical features
scaler = sklearn.preprocessing.StandardScaler()
scaler.fit(X_train_nums)
X_train_nums = scaler.transform(X_train_nums)
X_dev_nums = scaler.transform(X_dev_nums)
X_test_nums = scaler.transform(X_test_nums)
# concatinate inputs to ONE single input X
X_train_con = hstack([X_train_nums, X_train_country, X_train_text])
X_dev_con = hstack([X_dev_nums, X_dev_country, X_dev_text])
X_test_con = hstack([X_test_nums, X_test_country, X_test_text])
# # Feed models
from sklearn.linear_model import LogisticRegression
from sklearn import metrics
from sklearn import ensemble
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score, roc_curve, roc_auc_score, auc
from scipy import interp
# %matplotlib inline
# +
#### Random Forest ####
model_RF = sklearn.ensemble.RandomForestClassifier(n_estimators=25, random_state=521)
model_RF.fit(X_train_con, y_train)
y_pred = model_RF.predict(X_dev_con)
acc = accuracy_score(y_dev, y_pred)
f1 = f1_score(y_dev, y_pred)
print(acc)
print(f1)
cm = confusion_matrix(y_dev, y_pred)
plt.figure(figsize=(9,9))
sns.heatmap(cm, annot=True, fmt=".3f", linewidths=.5, square = True, cmap = 'Oranges');
plt.ylabel('Actual label');
plt.xlabel('Predicted label');
# all_sample_title = 'Accuracy Score: {0}'.format(score)
plt.title('Random Forest Classifier', size = 15);
plt.savefig('Confusion_matrix_RF.png')
tpr = cm[1,1]/(cm[1,1] + cm[1,0])
fpr = cm[0,1]/(cm[0,1] + cm[0,0])
print('TPR: {0}'.format(tpr))
print('FPR: {0}'.format(fpr))
# -
# +
#### Logistic Regression #####
model_LR = LogisticRegression()
model_LR.fit(X_train_con, y_train)
y_pred = model_LR.predict(X_dev_con)
acc = accuracy_score(y_dev, y_pred)
print(acc)
f1 = f1_score(y_dev, y_pred)
print(f1)
print('Accuracy Score: {0}'.format(score))
cm = confusion_matrix(y_dev, y_pred)
plt.figure(figsize=(9,9))
sns.heatmap(cm, annot=True, fmt=".3f", linewidths=.5, square = True, cmap = 'Oranges');
plt.ylabel('Actual label');
plt.xlabel('Predicted label');
# all_sample_title = 'Accuracy Score: {0}'.format(score)
plt.title('Logistic Regression Classifier', size = 15);
plt.savefig('Confusion_matrix_LR.png')
tpr = cm[1,1]/(cm[1,1] + cm[1,0])
fpr = cm[0,1]/(cm[0,1] + cm[0,0])
print('TPR: {0}'.format(tpr))
print('FPR: {0}'.format(fpr))
# +
# probas_ = model_LR.predict_proba(X_dev_con)
# tprs = []
# aucs = []
# mean_fpr = np.linspace(0, 1, 100)
# # Compute ROC curve and area the curve
# fpr, tpr, thresholds = roc_curve(y_dev, probas_[:, 1])
# tprs.append(interp(mean_fpr, fpr, tpr))
# tprs[-1][0] = 0.0
# roc_auc = auc(fpr, tpr)
# aucs.append(roc_auc)
# plt.plot(fpr, tpr, lw=1, alpha=0.3,
# label='ROC (AUC = %0.2f)' % roc_auc)
# # plt.plot(x,x, "--")
# # plt.set_xlim([0,1])
# # plt.set_ylim([0,1])
# plt.title("ROC Curve", fontsize=14)
# plt.ylabel('TPR', fontsize=12)
# plt.xlabel('FPR', fontsize=12)
# plt.savefig('ROC_curve.png')
# print(roc_auc)
# -
# +
model_knn = sklearn.neighbors.KNeighborsClassifier(n_neighbors=90)
model_knn.fit(X_train_con, y_train)
y_pred = model_knn.predict(X_dev_con)
acc = accuracy_score(y_dev, y_pred)
f1 = sklearn.metrics.f1_score(y_dev, y_pred)
print(acc)
print(f1)
cm = confusion_matrix(y_dev, y_pred)
plt.figure(figsize=(9,9))
sns.heatmap(cm, annot=True, fmt=".3f", linewidths=.5, square = True, cmap = 'Oranges');
plt.ylabel('Actual label');
plt.xlabel('Predicted label');
# all_sample_title = 'Accuracy Score: {0}'.format(score)
plt.title('KNN Classifier', size = 15);
plt.savefig('Confusion_matrix_KNN.png')
tpr = cm[1,1]/(cm[1,1] + cm[1,0])
fpr = cm[0,1]/(cm[0,1] + cm[0,0])
print('TPR: {0}'.format(tpr))
print('FPR: {0}'.format(fpr))
# -
# # Choose KNN and Test it on the test set!
# +
model_knn = sklearn.neighbors.KNeighborsClassifier(n_neighbors=90)
model_knn.fit(X_train_con, y_train)
y_pred = model_knn.predict(X_test_con)
acc = accuracy_score(y_test, y_pred)
f1 = sklearn.metrics.f1_score(y_test, y_pred)
print('Accuracy: {0}'.format(acc))
print('F1 score: {0}'.format(f1))
# +
model_RF = sklearn.ensemble.RandomForestClassifier(n_estimators=25, random_state=521)
model_RF.fit(X_train_con, y_train)
y_pred = model_RF.predict(X_test_con)
acc = accuracy_score(y_test, y_pred)
f1 = f1_score(y_test, y_pred)
print(acc)
print(f1)
# -
| Meta-Learning-create-sub-dataset.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import nibabel as nib
from nilearn import image
import os
import glob
from scipy import ndimage
from tqdm import tqdm
from scipy import ndimage
import random
from shutil import copyfile
# # Folder Structure Before Processing
#
# Download the image zips created for HAW Landshut(s3://deepc-landshut-data/). Unzip the images on the Dataset/ folder
#
# ```
# Dataset
# │
# │
# └───MSSEG2/timestamp2
# │ │
# │ └───segs
# │ | │
# │ | └───anatomy_seg
# │ | └───matter_seg
# │ | │
# │ | │ gt_013.nii.gz
# │ | │ ...
# │ |
# │ | 013.nii.gz
# │ | ...
# │
# │
# └───NAMIC_DS/NAMIC
# │ │
# │ └───lesion_segs
# │ | │
# │ | │ lupus001.nii.gz
# │ | │ ...
# │ |
# │ | lupus001.nii.gz
# │ | ...
# │
# │
# └───OpenMSData/crossectional
# │ │
# │ └───lesion_segs
# │ | │
# │ | │ patient01.nii.gz
# │ | │ ...
# │ |
# │ | patient01.nii.gz
# │ | ...
# │
# │
# └───OpenMSData/longitudnal
# │ │
# │ └───lesion_segs
# │ | │
# │ | │ patient01.nii.gz
# │ | │ ...
# │ |
# │ | patient01.nii.gz
# │ | ...
# ```
# +
# get all flair files
# -
imgs = sorted(glob.glob('training/**/preprocessed/*flair*.nii', recursive=True))
mask1 = sorted(glob.glob('training/**/masks/*mask1.nii', recursive=True))
mask2 = sorted(glob.glob('training/**/masks/*mask2.nii', recursive=True))
assert len(imgs)== len(mask1) == len(mask2)
name = 'Train'
os.makedirs(name, exist_ok=True)
out_img_path = os.path.join(name, "Images")
out_seg_path = os.path.join(name, "segments")
os.makedirs(out_img_path, exist_ok=True)
os.makedirs(out_seg_path, exist_ok=True)
for i , img_path in tqdm(enumerate(imgs)):
hdr = nib.load(mask1[i]).header
aff = nib.load(mask1[i]).affine
img_name = img_path.split('/')[-1]+'.gz'
nii_img = nib.load(img_path)
nib.save(nii_img, os.path.join(out_img_path, img_name))
nib.load(os.path.join(out_img_path, img_name))
# concensus mechanism
mask_array = nib.load(mask1[i]).get_fdata()+nib.load(mask2[i]).get_fdata()
mask_array[mask_array==1]=0
mask_array[mask_array==2]=1
nifti_img = nib.Nifti1Image(mask_array, aff, hdr)
nib.save(nifti_img, os.path.join(out_seg_path, img_name))
nib.load(os.path.join(out_seg_path, img_name))
img_path.split('/')[-1]+'.gz'
nib.load("/home/ubuntu/nnUNet/JSRT/Dataset/nnUNet_raw/nnUNet_raw_data/Task503_MSLesionFlairISBI/imagesTr/training01_01_flair_pp_0000.nii.gz")
os.path.join(out_img_path, img_name)
| ISBI/Dataset/Preprocess For nnUNet.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <a href="https://colab.research.google.com/github/joanby/python-ml-course/blob/master/notebooks/T11%20-%203%20-%20Reconocimiento%20de%20texto%20escrito-Colab.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# # Clonamos el repositorio para obtener los dataSet
# !git clone https://github.com/joanby/python-ml-course.git
# # Damos acceso a nuestro Drive
from google.colab import drive
drive.mount('/content/drive')
# Test it
# !ls '/content/drive/My Drive'
from google.colab import files # Para manejar los archivos y, por ejemplo, exportar a su navegador
import glob # Para manejar los archivos y, por ejemplo, exportar a su navegador
from google.colab import drive # Montar tu Google drive
# %tensorflow_version 1.x
# # El dataset de MNIST
import tensorflow as tf
print(tf.__version__)
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data", one_hot = True)
len(mnist.train.images)
len(mnist.test.images)
im_temp = mnist.train.images[0]
from skimage import io
import numpy as np
io.imshow(np.reshape(im_temp, (28,28)))
mnist.train.labels[0]
# # Una red neuronal con Tensor Flow - v1
# * Las imágenes de entrenamiento de MNIST viven en un espacio vectorial de dimensión 784.
# * El dataset se puede pensar como 55000 filas y 784 columnas.
# * Cada dato del datset es un número real entre 0 y 1.
#
# y = softmax(W * x + b)
dim_input = 784
n_categories = 10
x = tf.placeholder(tf.float32, [None, dim_input])
W = tf.Variable(tf.zeros([dim_input,n_categories]))
b = tf.Variable(tf.zeros([n_categories]))
softmax_args = tf.matmul(x,W) + b
y_hat = tf.nn.softmax(softmax_args)
# #### Entrenando la red neuronal
# * Loss / Cost <- objetivo minimizar las pérdidas
from IPython.display import display, Math, Latex
display(Math(r"H_{y}(\hat{y}) = -\sum_{i} y_i log(\hat{y_i})"))
y_ = tf.placeholder(tf.float32, [None, 10])
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y_hat), reduction_indices=[1]))
# +
#tf.nn.softmax_cross_entropy_with_logits(softmax_args, y_)
# -
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
session = tf.InteractiveSession()
tf.global_variables_initializer().run()
for _ in range(10000):
batch_x, batch_y = mnist.train.next_batch(150)
session.run(train_step, feed_dict={x:batch_x, y_: batch_y})
# #### Evaluando la red neuronal
#
#
correct_predictions = tf.equal(tf.argmax(y_hat, 1), tf.argmax(y_,1))
accuracy = tf.reduce_mean(tf.cast(correct_predictions, tf.float32))
print(session.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels}))
| notebooks/T11 - 3 - Reconocimiento de texto escrito-Colab.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/SanjibSarkarU/colab-NN/blob/main/lstm.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + colab={"base_uri": "https://localhost:8080/"} id="mI9_CNCMW5Q3" outputId="055820cd-42b7-401b-b6b5-0ac715055424"
# !pip install tensorflow-gpu
# + id="3NzvErHZW--3"
import numpy as np
import tensorflow as tf
# from tensorflow.keras.models import Sequential
import pandas as pd
from matplotlib import pyplot as plt
import matplotlib.colors as pltc
from keras.models import Sequential
from keras.layers import Dense, LSTM, SimpleRNN
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
plt.style.use('dark_background')
# + id="3UgPedKAYLjZ"
df = pd.read_csv('20220222_153859_hlc.csv', skiprows=1)
df = df.rename(columns={"Latitude (Deg N)": 'lat', "Longitude (Deg W)": 'lng'}, errors="raise")
df = df.loc[(df['lat'] != 0)]
# + id="2RdYz1YhMN4f"
colors = [k for k,v in pltc.cnames.items()]
# + id="xBc9fxfkYrBZ"
# df.dtypes
# + id="v8x8Qb2vmhty"
# df.lat.plot()
# + id="TOOzDjdwZdrZ"
start = 10500
end = 13700
series_time = df.Time[start:end].to_numpy()
series_lat = df.lat[start:end].to_numpy()
series_lng = df.lng[start:end].to_numpy()
xaxis = np.array([i for i in range(len(series_lat))])
# + id="GrDrGxsufAZ8"
# type(series_lat)
# xaxis.shape
# + id="JMhm2VLaI6Cn"
# series_lat
# + id="DNlPuBtbbBAC"
def plot_dataset(xaxis, yaxis, xlabel= 'xaxis', ylabel= 'yaxis', color='r'):
plt.plot(xaxis, yaxis, color=color)
plt.xlabel(xlabel)
plt.ylabel(ylabel)
# + id="SoUcS3J8gtQt"
def train_test_split(dataset, percentage):
train_size = int(len(dataset)*percentage)
test_seze = len(dataset) - train_size
train, test = dataset[0:train_size,:], dataset[train_size:len(dataset),:]
return train, test
# + id="zhoR9AHtZgya"
# plot_dataset(xaxis, series_lat)
# + id="GFkGJ3v2c53h"
# plot_dataset(xaxis, series_lng)
# + id="OKnIzmUcdvyt"
# plot_dataset(series_lng, series_lat)
# + id="Re4Kz7-xfUX0"
# reshape
series_lat = np.reshape(series_lat,(len(series_lat),1))
series_lng = np.reshape(series_lng,(len(series_lng),1))
xaxis = np.reshape(xaxis,(len(xaxis),1))
# + id="XDAJOpm0eX51"
# # Normalization is optional but recommended for neural network as certain
# # activation functions are sensitive to magnitude of numbers.
# # normalize the dataset
# scaler = MinMaxScaler(feature_range=(0, 1)) #Also try QuantileTransformer
# + id="rvUsUriTF3Qd"
# series_lat = scaler.fit_transform(series_lat)
# # series_lng = scaler.fit_transform(series_lng)
# + colab={"base_uri": "https://localhost:8080/"} id="-JYxilAQgo47" outputId="76127a4d-be4f-4bb4-d017-98d1d646f22b"
# split into train and test sets
train_lat, test_lat = train_test_split(series_lat, 0.7)
train_lng, test_lng = train_test_split(series_lng, 0.7)
train_xaxis, test_xaxis = train_test_split(xaxis, 0.7)
train_lat.shape
# + id="CB7qabiIynbE"
tl= test_lat
# + id="hTX83Ib4moJw"
def test_data_preparation(series, window_size, batch_size):
dataset = tf.data.Dataset.from_tensor_slices(series)
dataset = dataset.window(window_size, shift=1, drop_remainder=True)
dataset = dataset.flat_map(lambda window: window.batch(window_size))
# dataset = dataset.shuffle(shuffle_buffer).map(lambda window: (window[:-1], window[-1:]))
dataset = dataset.batch(batch_size).prefetch(1)
return dataset
# + id="8k5f-ysuUshp"
from keras.preprocessing.sequence import TimeseriesGenerator
def ts_data_preparation(features, window_size=5, future=1, batch_size =1, shuffle=False):
target = features[(future-1):]
features = features[:-(future-1)] if future != 1 else features
tensor = TimeseriesGenerator(features, target, length=window_size, batch_size=batch_size, shuffle=shuffle)
return tensor
# + id="aQInDrnQp1J5"
def model_lstm(window_size):
tf.keras.backend.clear_session()
model = tf.keras.models.Sequential([
tf.keras.layers.LSTM(50, activation='relu', return_sequences=True, input_shape=(window_size,1)),
tf.keras.layers.LSTM(50, activation='relu'),
tf.keras.layers.Dense(1)])
model.compile(optimizer= tf.keras.optimizers.SGD(learning_rate=1e-7, momentum=0.9),
loss='mse')
model.summary()
# print('Train...', model.summary())
return model
# + id="O-QEemLO94Hb" colab={"base_uri": "https://localhost:8080/", "height": 1000} outputId="c84f6421-3711-488f-f443-a78166c84b1e"
for i in range(1, 21, 4):
window_size=10
future =i
batch_size= 32
train_x_lat = ts_data_preparation(train_lat, window_size=window_size, future=future, batch_size=batch_size, shuffle=True)
train_x_lng = ts_data_preparation(train_lng, window_size=window_size, future=future, batch_size=batch_size, shuffle=True)
# test_lat = test_data_preparation(test_lat, window_size=window_size, batch_size=batch_size)
model = model_lstm(window_size)
epochs = 50
model.fit(train_x_lat, epochs=epochs, verbose=0)
# testPredict_lat = model.predict(test_lat)
predict_all_lat = model.predict(test_data_preparation(series_lat, window_size=window_size, batch_size=batch_size))
plt.plot(predict_all_lat, color = colors[11+i], label = f'{future} sec future')
plt.plot(series_lat, color ='r', label= 'original' )
plt.title(f'Window: {window_size}')
plt.legend()
# + id="XCuqCpXmNEBE"
# plt.plot(series_lat, color ='b', label= f'{future} sec future' )
# plt.legend()
# + id="srxDLwhRkLJ-"
# plt.plot(testPredict_lat, color='b', label=f'{future} sec future')
# plt.plot(tl, color = 'r', label = 'Original')
# plt.legend()
# + id="HLPM2LkgX9QM"
# predict_all_lat = model.predict(test_data_preparation(series_lat, window_size=window_size, batch_size=batch_size))
# plt.plot(series_lat, color ='b', label= 'Original )
# plt.plot(predict_all_lat, color = 'r', label =f'{future} sec future'')
# plt.legend()
| lstm.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:miniconda3-lens-conversion]
# language: python
# name: conda-env-miniconda3-lens-conversion-py
# ---
# # Create DART Reanalysis Intake Catalog for AWS or Stratus S3 Bucket
# +
import pandas as pd
from intake.source.utils import reverse_format
from tqdm.auto import tqdm
#import s3fs
import os
from pathlib import Path
import xarray as xr
# -
# ## Define a Few Metadata Concepts
# +
variables = {'atm': {'PS', 'Q', 'T', 'US', 'VS', 'CLDLIQ', 'CLDICE'},
'lnd': {'ER', 'HR', 'TSA', 'EFLX_LH_TOT'}}
frequencies = {'atm': 'weekly',
'lnd': 'hourly6'}
# + [markdown] tags=[]
# ## Define Zarr store location for pulling metadata values.
# +
s3_root = "s3://ncar-dart-reanalysis/"
# Use if pulling Zarr metadata from Glade
zarr_dir = Path('/glade/scratch/bonnland/DART/ds345.0/zarr-publish/')
# Use if pulling Zarr metadata from AWS
#fs = s3fs.S3FileSystem(anon=True)
# -
# ## Get list of available Zarr Stores
def get_file_list(store_path):
store_path_str = store_path.as_posix()
# Remove 'catalogs' directory from top level listing to get frequencies
frequencies = [f for f in os.listdir(store_path_str) if 'catalogs' not in f]
stores = []
for frequency in tqdm(frequencies):
print(frequency)
objects = [os.path.relpath(x, start=store_path_str) for x in store_path.glob(f"{frequency}/*.zarr")]
stores.extend(objects)
return stores
# +
#stores = get_file_list(fs)
stores = get_file_list(zarr_dir)
stores = [f"{zarr_dir}/{store}" for store in stores ]
#stores = [f"{s3_root}{store}" for store in stores]
stores
# -
# ## Extract Relevant Store Metadata
#def get_filename_attrs(store, fs=fs):
def get_filename_attrs(store):
relative_path = os.path.relpath(store, start=zarr_dir)
print(relative_path)
template = "{frequency}/{variable}.zarr"
attrs = reverse_format(template, relative_path)
# Obtain spatial_resolution from grid; validate correct string values.
# if attrs['grid'] == 'NAM-22i':
# attrs['spatial_resolution'] = '0.25 deg'
# elif attrs['grid'] == 'NAM-44i':
# attrs['spatial_resolution'] = '0.50 deg'
# else:
# value = attrs['grid']
# print(f'Unknown grid value: {value}')
# assert(False)
attrs['path'] = f'{s3_root}' + relative_path
return attrs
def get_store_attrs(store_path):
"""Given a full s3 path to a store and a filesystem object, return the store's full metadata."""
metadata = get_filename_attrs(store_path)
var_name = metadata['variable']
#store = s3fs.S3Map(root=store_path, s3=fs)
#ds = xr.open_zarr(store)
ds = xr.open_zarr(store_path)
attrs = {'long_name': ds[var_name].attrs['long_name'],
'units': ds[var_name].attrs['units'],
'component': 'atm' if var_name in variables['atm'] else 'lnd',
'standard_name': 'unspecified',
'spatial_domain': 'global',
'vertical_levels': 1 if ('lev' not in ds[var_name].dims) else ds.sizes['lev'],
'start_time': pd.to_datetime(str(ds['time'].values[0])).isoformat(),
'end_time': pd.to_datetime(str(ds['time'].values[-1])).isoformat(),
}
attrs.update(metadata)
return attrs
len(stores)
# Check validity with first few stores
#stores = stores[0:5]
stores
# + [markdown] tags=[]
# ## Extract File Attributes of Zarr stores
# + tags=[]
# %%time
entries = list(map(get_store_attrs, stores))
# -
entries[-1]
# ## Create Pandas DataFrame and Save to CSV File
# +
df = pd.DataFrame(entries)
# Reorder catalog columns
catalog_order = ['variable', 'long_name', 'units', 'standard_name', 'vertical_levels',
'component', 'spatial_domain',
'start_time', 'end_time',
'frequency', 'path']
df = df.reindex(columns=catalog_order)
df.head()
# +
# Make 'path' the final column in the DataFrame
#path = df.pop('path')
#df['path'] = path
#df.head()
# -
len(df)
# + tags=[]
df.to_csv("../../catalogs/aws-dart-reanalysis.csv", index=False)
# -
| builders/notebooks/aws-dart_catalog_builder.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia O3 1.6.0
# language: julia
# name: julia-o3-1.6
# ---
using Rocket
# A __Subject__ is a sort of bridge or proxy that is available in some implementations of reactive frameworks that acts both as an observer and as an Observable. Because it is an observer, it can subscribe to one or more Observables, and because it is an Observable, it can pass through the items it observes by reemitting them, and it can also emit new items.
#
# Because a Subject subscribes to an Observable, it will trigger that Observable to begin emitting items (if that Observable is “cold” — that is, if it waits for a subscription before it begins to emit items). This can have the effect of making the resulting Subject a “hot” Observable variant of the original “cold” Observable.
#
# See also:
# - [ReactiveX: Subject](http://reactivex.io/documentation/subject.html)
# - [Introduction to Rx: Subject](http://introtorx.com/Content/v1.0.10621.0/02_KeyTypes.html#Subject)
# - [To Use Subject or Not To Use Subject](https://www.davesexton.com/blog/post/To-Use-Subject-Or-Not-To-Use-Subject.aspx)
# ### Subject execution modes
#
# Rocket.jl supports two both synchronous and asynchronous execution modes for any Subject-like object.
# +
sync_subject = Subject(Int, scheduler = AsapScheduler())
println("Before subscription")
subscription = subscribe!(sync_subject, logger("sync_subject logger"))
println("After subscription")
println("Before next")
next!(sync_subject, 1)
println("After next")
unsubscribe!(subscription)
next!(sync_subject, 2)
# -
# Default scheduler is an `AsapScheduler`.
# +
async_subject = Subject(Int, scheduler = AsyncScheduler())
println("Before subscription")
subscription = subscribe!(async_subject, (d) -> println(d))
println("After subscription")
println("Before next")
next!(async_subject, 1)
println("After next")
yield()
yield()
unsubscribe!(subscription)
yield()
next!(async_subject, 2)
# -
# ### Subject as an actor
#
# It is possible to use some Subject as an Actor. This is the only way to share (multicast) a single observable execution between multiple listeners.
# +
source = from(1:5)
subject = Subject(Int)
subscription1 = subscribe!(subject, logger("1"))
subscription2 = subscribe!(subject, logger("2"))
subscribe!(source, subject)
unsubscribe!(subscription1)
unsubscribe!(subscription2)
# -
# ### Varieties of Subject
#
# There are few varieties of Subject that are designed for particular use cases. Not all of these are available in all implementations, and some implementations use other naming conventions:
# #### BehaviorSubject
#
# When an observer subscribes to a BehaviorSubject, it begins by emitting the item most recently emitted by the source Observable (or a seed/default value if none has yet been emitted) and then continues to emit any other items emitted later by the source Observable(s).
# +
bsubject = BehaviorSubject(Int, 1)
subscription1 = subscribe!(bsubject, logger("1"))
next!(bsubject, 2)
subscription2 = subscribe!(bsubject, logger("2"))
next!(bsubject, 3)
unsubscribe!(subscription1)
unsubscribe!(subscription2)
# -
# #### ReplaySubject
#
# ReplaySubject emits to any observer all of the items that were emitted by the source Observable(s), regardless of when the observer subscribes.
# +
rsubject = ReplaySubject(Int, 2)
next!(rsubject, 0);
subscription1 = subscribe!(rsubject, logger("1"))
next!(rsubject, 2)
subscription2 = subscribe!(rsubject, logger("2"))
next!(rsubject, 3)
unsubscribe!(subscription1)
unsubscribe!(subscription2)
# -
| demo/03_subjects.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Chapter 5: Compressing Data via Dimensionality Reduction
#
# ## PCA
#
# Loading the data:
# +
import pandas as pd
df_wine = pd.read_csv('wine.data', header=None)
df_wine.columns = ['Class label', 'Alcohol', 'Malic acid', 'Ash',
'Alcalinity of ash', 'Magnesium', 'Total phenols',
'Flavanoids', 'Nonflavanoid phenols', 'Proanthocyanins',
'Color intensity', 'Hue', 'OD280/OD315 of diluted wines',
'Proline']
df_wine.head()
# -
# Splitting, scaling features:
# +
from sklearn.cross_validation import train_test_split
X, y = df_wine.iloc[:, 1:].values, df_wine.iloc[:, 0].values
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=0.3, random_state=0)
# +
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
X_train_std = sc.fit_transform(X_train)
X_test_std = sc.transform(X_test)
# -
# ### Covariance Matrix and its Eigenvectors
# +
import numpy as np
cov_mat = np.cov(X_train_std.T)
eigen_vals, eigen_vecs = np.linalg.eig(cov_mat)
print('\nEigenvalues \n%s' % sorted(eigen_vals, reverse=True))
# -
# Let's use SVD too as <NAME> recommends and verify we can compute the same thing:
# +
import numpy.linalg
U, S, V = numpy.linalg.svd(cov_mat)
print('\nEigenvalues \n%s' % sorted(S, reverse=True))
# -
# ### Total and explained variance
tot = sum(eigen_vals)
var_exp = [(i / tot) for i in sorted(eigen_vals, reverse=True)]
cum_var_exp = np.cumsum(var_exp)
# +
import matplotlib.pyplot as plt
# %matplotlib inline
# %config InlineBackend.figure_format = 'retina'
plt.bar(range(1, 14), var_exp, alpha=0.5, align='center',
label='individual explained variance')
plt.step(range(1, 14), cum_var_exp, where='mid',
label='cumulative explained variance')
plt.ylabel('Explained variance ratio')
plt.xlabel('Principal components')
plt.legend(loc='best')
plt.tight_layout()
plt.show()
# -
# cross referencing with <NAME>'s course, we could use this information to choose K by seeing how many features it will take to preserve 90% of the variance:
for i, cum_var in enumerate(cum_var_exp):
print("with {} dimensions we preserve {:.2f} of variance".format(i + 1, cum_var))
# Going with 7 or 8 dimensions would be a sensible choice.
#
# ## Feature transformation
# +
# Make a list of (eigenvalue, eigenvector) tuples
eigen_pairs = [(np.abs(eigen_vals[i]), eigen_vecs[:, i])
for i in range(len(eigen_vals))]
# Sort the (eigenvalue, eigenvector) tuples from high to low
eigen_pairs.sort(reverse=True)
w = np.hstack((eigen_pairs[0][1][:, np.newaxis],
eigen_pairs[1][1][:, np.newaxis]))
X_train_pca = X_train_std.dot(w)
colors = ['r', 'b', 'g']
markers = ['s', 'x', 'o']
for l, c, m in zip(np.unique(y_train), colors, markers):
plt.scatter(X_train_pca[y_train == l, 0],
X_train_pca[y_train == l, 1],
c=c, label=l, marker=m)
plt.xlabel('PC 1')
plt.ylabel('PC 2')
plt.legend(loc='lower left')
plt.tight_layout()
# plt.savefig('./figures/pca2.png', dpi=300)
plt.show()
# -
# ## Scikit-learn's PCA
# +
from sklearn.decomposition import PCA
pca = PCA()
X_train_pca = pca.fit_transform(X_train_std)
pca.explained_variance_ratio_
# -
plt.bar(range(1, 14), pca.explained_variance_ratio_, alpha=0.5, align='center')
plt.step(range(1, 14), np.cumsum(pca.explained_variance_ratio_), where='mid')
plt.ylabel('Explained variance ratio')
plt.xlabel('Principal components')
plt.show()
# +
pca = PCA(n_components=2)
X_train_pca = pca.fit_transform(X_train_std)
X_test_pca = pca.transform(X_test_std)
plt.scatter(X_train_pca[:, 0], X_train_pca[:, 1])
plt.xlabel('PC 1')
plt.ylabel('PC 2')
plt.show()
# -
# ## Training Logistic Regression on reduced matrix
# +
from matplotlib.colors import ListedColormap
def plot_decision_regions(X, y, classifier, resolution=0.02):
# setup marker generator and color map
markers = ('s', 'x', 'o', '^', 'v')
colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')
cmap = ListedColormap(colors[:len(np.unique(y))])
# plot the decision surface
x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1
x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution),
np.arange(x2_min, x2_max, resolution))
Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)
Z = Z.reshape(xx1.shape)
plt.contourf(xx1, xx2, Z, alpha=0.4, cmap=cmap)
plt.xlim(xx1.min(), xx1.max())
plt.ylim(xx2.min(), xx2.max())
# plot class samples
for idx, cl in enumerate(np.unique(y)):
plt.scatter(x=X[y == cl, 0], y=X[y == cl, 1],
alpha=0.8, c=cmap(idx),
marker=markers[idx], label=cl)
# +
from sklearn.linear_model import LogisticRegression
lr = LogisticRegression()
lr = lr.fit(X_train_pca, y_train)
# -
plot_decision_regions(X_train_pca, y_train, classifier=lr)
plt.xlabel('PC 1')
plt.ylabel('PC 2')
plt.legend(loc='lower left')
plt.tight_layout()
# plt.savefig('./figures/pca3.png', dpi=300)
plt.show()
# ### Comparing performance of LR with all features
#
# Let's see how LR trained on the 3 principal components fairs vs one trained on all of them
| python-ml-book/ch05/ch05.ipynb |