code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Integrated Gradients Benchmark
# +
import sys
sys.path.append("../")
import fastISM
from fastISM.models.basset import basset_model
from fastISM.models.factorized_basset import factorized_basset_model
from fastISM.models.bpnet import bpnet_model
import tensorflow as tf
import numpy as np
from importlib import reload
import time
# -
reload(fastISM.flatten_model)
reload(fastISM.models)
reload(fastISM.ism_base)
reload(fastISM.change_range)
reload(fastISM.fast_ism_utils)
reload(fastISM)
tf.__version__
# !nvidia-smi
# !nvidia-smi -L
# !nvcc --version
print("Num GPUs Available: ", len(tf.config.experimental.list_physical_devices('GPU')))
device = 'GPU:0' if tf.config.experimental.list_physical_devices('GPU') else '/device:CPU:0'
device
import alibi
from alibi.explainers import IntegratedGradients
alibi.__version__
# ## Benchmark
def time_ig(model, batch_sizes, seqlen, num_examples=500, n_steps=50, targets = [None]):
x = np.random.random((num_examples,seqlen,4))
times = []
per_100 = []
for b in batch_sizes:
ig = IntegratedGradients(model,
layer=None,
method="gausslegendre",
n_steps=n_steps,
internal_batch_size=b)
# dry run
ig.explain(x[:10], baselines=None,
target=targets[0])
t = time.time()
for tgt in targets:
ig.explain(x, baselines=None,
target=tgt)
times.append(time.time()-t)
per_100.append((times[-1]/num_examples)*100)
print("BATCH: {}\tTIME: {:.2f}\tPER 100: {:.2f}".format(b, times[-1], (times[-1]/num_examples)*100))
print("BEST PER 100: {:.2f}".format(min(per_100)))
# ### Basset (1000)
model = basset_model(seqlen=1000, num_outputs=1)
# +
# %%capture --no-stdout
# hide warning about scalar output
time_ig(model, [100, 200, 500], 1000, num_examples=10, targets=[None]) # targets None since only one scalar output
# +
# %%capture --no-stdout
time_ig(model, [100, 200, 500, 1000], 1000, num_examples=100, targets=[None])
# -
# ### Basset (2000)
model = basset_model(seqlen=2000, num_outputs=1)
# +
# %%capture --no-stdout
time_ig(model, [100, 200, 500], 2000, num_examples=10, targets=[None])
# +
# %%capture --no-stdout
time_ig(model, [100, 200, 500], 2000, num_examples=100, targets=[None])
# -
# ### Factorized Basset (1000)
model = factorized_basset_model(seqlen=1000, num_outputs=1)
# %%capture --no-stdout
time_ig(model, [100, 200, 500], 1000, num_examples=10, targets=[None])
# %%capture --no-stdout
time_ig(model, [100, 200, 500], 1000, num_examples=100, targets=[None])
# ### Factorized Basset (2000)
model = factorized_basset_model(seqlen=2000, num_outputs=1)
# %%capture --no-stdout
time_ig(model, [100, 200, 300], 2000, num_examples=10, targets=[None])
# %%capture --no-stdout
time_ig(model, [100, 200, 300], 2000, num_examples=100, targets=[None])
# %%capture --no-stdout
time_ig(model, [100, 200, 300], 2000, num_examples=200, targets=[None])
# ### BPNet (1000)
# +
model = bpnet_model(seqlen=1000, num_dilated_convs=9)
# flatten and concat outputs
inp = tf.keras.Input(shape=model.input_shape[1:])
prof, cts = model(inp)
prof = tf.keras.layers.Flatten()(prof)
cts = tf.keras.layers.Flatten()(cts)
out = tf.keras.layers.Concatenate()([prof, cts])
model_ig = tf.keras.Model(inputs=inp, outputs=out)
# flattened outputs
model = model_ig
# -
model.output
time_ig(model, [500], 1000, num_examples=10, targets=range(1001)) # all 1000 profile outs + 1 count out
# ### BPNet (2000)
# +
model = bpnet_model(seqlen=2000, num_dilated_convs=9)
# flatten and concat outputs
inp = tf.keras.Input(shape=model.input_shape[1:])
prof, cts = model(inp)
prof = tf.keras.layers.Flatten()(prof)
cts = tf.keras.layers.Flatten()(cts)
out = tf.keras.layers.Concatenate()([prof, cts])
model_ig = tf.keras.Model(inputs=inp, outputs=out)
# flattened outputs
model = model_ig
# -
model.output
time_ig(model, [500], 2000, num_examples=5, targets=range(2001)) # all 2000 profile outs + 1 count out
| notebooks/IntegratedGradientsBenchmark.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="_2vOYq2-FOZk"
# # **Importing Libraries and Dependencies**
# + colab={"base_uri": "https://localhost:8080/"} id="2E0fhfjVt69e" outputId="e9ee1a3a-e5b8-4d3d-a394-47b46aa0f0c9"
# !python -m spacy download en --quiet
# !git clone "https://github.com/anoopkunchukuttan/indic_nlp_library"
# !git clone https://github.com/anoopkunchukuttan/indic_nlp_resources.git
INDIC_NLP_LIB_HOME=r"/content/indic_nlp_library"
INDIC_NLP_RESOURCES="/content/indic_nlp_resources"
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import torch.nn.functional as F
import spacy
import random
import string
import csv
import sys
sys.path.append(r'{}'.format(INDIC_NLP_LIB_HOME))
from indicnlp import common
common.set_resources_path(INDIC_NLP_RESOURCES)
from indicnlp import common
common.set_resources_path(INDIC_NLP_RESOURCES)
# + [markdown] id="ZI_IDzqeFb8G"
# # **Mounting Google Drive**
# + id="a1bip0Ty4B_b" colab={"base_uri": "https://localhost:8080/"} outputId="b1b9a002-056a-4eef-b69c-5afb9f7baf0f"
from google.colab import drive
drive.mount('/content/drive')
# + [markdown] id="bHiRzQFpGNqP"
# # **Reading data and Preprocessing**
# + id="GlGaeZEQzvD9"
# reading the dataset from google drive
with open('/content/drive/MyDrive/AssignmentNLP/train/train.csv', newline='') as f:
reader = csv.reader(f)
raw_data = list(reader)
# + id="Kfxa_EIeMrNs"
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") #setting device as GPU if available, else device -> CPU
# + id="hxAsplUi7UHR"
train_data=raw_data
# + id="GVMvcgPA6SdZ" colab={"base_uri": "https://localhost:8080/"} outputId="71e3befd-0dcd-4462-ffbb-55495ec7fd56"
for i in range(0,10):
print(train_data[i])
# + id="sK1nsbEy6lY0" colab={"base_uri": "https://localhost:8080/"} outputId="a737c11a-db21-44d7-f9e9-4a8f4eff0efe"
train_data=train_data[1:] #removing header
for i in range(0,10):
print(train_data[i])
for i in range(0, len(train_data)): #extracting the (hindi,english) from the two-dimensional list read from the train.csv file
train_data[i]=train_data[i][1:]
# + colab={"base_uri": "https://localhost:8080/"} id="Cq3gYepc7svb" outputId="05cc8850-9fef-484d-bcfc-5d7731ebc3d5"
for i in range(0,10):
print(train_data[i])
# + colab={"base_uri": "https://localhost:8080/"} id="4mOjwihR7xYe" outputId="cfa59d40-32ae-42d7-99f4-2c7baf9a6856"
print(len(train_data))
# + id="Wh0ttO5f84fp"
def standardize(s): # this function is used to standardize the string i.e remove extra extra outer whitespaces, change to lowercase, remove most punctuation marks
s=s.strip()
s=s.lower()
norm_s=""
ignore=string.punctuation # this gives all punctuations
for i in s:
#if i=='!' or i=='.' or i=='?' or i==',' or i=='"' or i=="," or i==';' or i==':':
if i in ignore:
continue
else:
norm_s=norm_s+i
return norm_s # returns the staandardized string
# + [markdown] id="d6rMsqxjFnG5"
# # **Defining Tokenizers for English (spacy) and Hindi (Indic NLP)**
# + id="Wut4cYmXd2n6" colab={"base_uri": "https://localhost:8080/"} outputId="29b4189c-3433-440d-f72a-c0997f26e3fd"
from indicnlp.tokenize import indic_tokenize
indic_string='सुनो, कुछ आवाज़ आ रही है। फोन?'
print('Input String: {}'.format(indic_string))
print('Tokens: ')
for t in indic_tokenize.trivial_tokenize(indic_string): # Inddic NLP tokenizer function()
print(t)
print(indic_tokenize.trivial_tokenize(indic_string))
def tokenize_hindi(text): #tokenizer for hindi using Indic NLP
return indic_tokenize.trivial_tokenize(text)
sample_text = 'सुनो, कुछ आवाज़ आ रही है। फोन?' # sample tokenization of a hindi sentence (for testing)
print(tokenize_hindi(sample_text))
# + id="rtn9o6ejefvy" colab={"base_uri": "https://localhost:8080/"} outputId="350d80a6-4308-4036-dc3c-14ff45e9fec1"
spacy_english = spacy.load("en") # importing the spacy pipeline for the English language "en"
def tokenize_english(text): #tokenizer for english using Spacy
return [token.text for token in spacy_english.tokenizer(text)]
sample_text = "I am, going to work" # sample tokenization of an english sentence
print(tokenize_english(sample_text))
# + id="VqnB8H3YB1UH"
for i in train_data: # iterating thorugh the (hindi, english) sentence pair
i[0]=standardize(i[0]) #standardizing the hindi sentence, please refer ti standard() function a few cells above
i[1]=standardize(i[1]) # standardizing the english sentence
# + colab={"base_uri": "https://localhost:8080/"} id="3kPhH0uBCDbC" outputId="a1ac7ad4-8ef6-4861-eae2-cb4ff8feefd1"
for i in range(0,10):
print(train_data[i])
# + [markdown] id="dAP9CaMSF-fP"
# # **Creating the English and Hindi Vocabuaries**
# + id="jPuCg2E7ufn1"
class Vocabulary: # defining the classs which will be used to create the vocabularies of hindi and elish lanuguages respectively
def __init__(self, name):
self.name = name # attribute name to store the name of the vocabulary
self.word_count = dict() # this is a simple look up table ( dictionary) which will be used to keep the frquency count of a word as ot appears in the vocabulary
self.word2index = {"<sos>": 0, "<eos>": 1} # this dictionary will be used to create a mapping from words (string) to unique numbers or indices in the vocabulary
self.index2word = {0: "<sos>", 1: "<eos>"} # this dictionay will be used to create an inverse mapping of word2index, i.e maps indices to string words
self.total_words = 2 # this attribute gives the total number of words in the vocabulary, initialized to 2 as, every vocabulary will have start-of-sentence and end-of-sentence token
def processSentence(self, sentence): # this fucntions reads a sentence
if self.name=="hindi":
list_words=tokenize_hindi(sentence) # use hindi tokenizer if calling object is hindi i.e. hindi vocab
else:
list_words=tokenize_english(sentence) # use english tokenizer if calling object is english i.e. english vocab
for i in list_words:
self.processWord(i)
def processWord(self, word): #every encountered word
if word not in self.word2index: # if word encountered does not already exist in vicabulary
self.word2index[word] = self.total_words # assign an unique index to the word
self.word_count[word] = 1 # set word frequency count as 1
self.index2word[self.total_words] = word # store the reverse mapping of assigned index to the word
self.total_words += 1 # increment total words in the vocabulary by 1
else:
self.word_count[word] += 1 # if word already exsists in vocabulary, just increment the frequency count by 1
# + id="7Dd2lJYQC8P3"
hindi = Vocabulary("hindi") # instantiating the hind vocabulary
english = Vocabulary("english") # instantiating the english vocabulary
# + id="RyDF4aDdDR1t" colab={"base_uri": "https://localhost:8080/"} outputId="618f40d9-55e0-4ce4-9039-dd2cbdd5cd82"
def checkNoise(x): # this function can be used to remove noisy (hindi, english) sentence pairs
l1=x[0].count(" ")
l2=x[1].count(" ")
if l1<30 and l2<30: # this condition checks if both the sentences are not exceedingly large (i.e. > 30 words or tokens)
if l1>2 and l2>2: # this condition checks that the sentences are atleast >2 words in length, else it might be noise
if l1<2*l2 and l2<2*l1: # this condition checks that the hindi and english sentences are similar in size i.e less than 2 twice the length of each other
return True # if all the above condition pass, then sentence pair is not noise
return False #else noise
clean_data=list()
for i in train_data:
if(checkNoise(i)): # here we de noisify the dataset
clean_data.append(i)
print(len(clean_data)) # printing length of the cleaned dataset
# + colab={"base_uri": "https://localhost:8080/"} id="VZNgU4p_yj4Y" outputId="2146f33d-f195-4876-9422-85456b8cdc67"
for i in range(0,20): # checking clean data set
print(clean_data[i])
# + id="MP4ojOCmzwIW" colab={"base_uri": "https://localhost:8080/", "height": 214} outputId="f3967766-2e47-47f5-cb3f-5b4ddd794254"
for sample in clean_data: # please note executing this cell will take some time (around 45 minutes)
hindi.processSentence(sample[0]) # now I take the cleaned dataset, extract the hindi sentences one at a time and create the hindi vocabulary
english.processSentence(sample[1]) # now I take the cleaned dataset, extract the english sentences one at a time and create the english vocabulary
print('--------------- vocabulary creation done-----------------')
while True:pass
# + colab={"base_uri": "https://localhost:8080/"} id="qu8e8eIiJr3i" outputId="29b6d83c-dae4-4c94-eee3-245f61cd2d3d"
print(english.name) # printing out the attributes of the two objects hindi and english (of Class Vocavulary)
print(hindi.name)
print(english.total_words) # english vocabulary size
print(hindi.total_words) # hindi vocabulary size
# + [markdown] id="ZC7HFWviGU4T"
# # **Defining the Encoder (LSTM) architecture**
#
#
# In the following cell the architecture of the Encoder is defined. I have used a GRU as a encoder. Please note I've made the following design choices.
#
# Number of recurrent layers- 1
#
# Number of features in the hidden state-512
#
# Size of the embedding vector created using nn.Embedding() - 512
#
# + id="lDJzTXG639z2"
class Encoder(nn.Module):
def __init__(self, input_size, hidden_size,num_layers=1):
super(Encoder, self).__init__()
self.hidden_size = hidden_size # number of features in hidden state
self.num_layers=num_layers # number of recurrent layers
self.embedding = nn.Embedding(input_size, hidden_size) #this creates simple lookup table used to store word embeddings and retrieve them, note number of entries is same as size of hindi vocabulary
# and size of the embedding vector is same as hidden size
self.gru = nn.GRU(hidden_size, hidden_size) # applies a GRU to an input sequence.
def forward(self, input, hidden):
output = self.embedding(input).view(1, 1, -1) # gives the foward pass, i.e. given input tensors get output tensors
output, hidden = self.gru(output, hidden)
return output, hidden
def initializeHiddenState(self):
return torch.zeros(self.num_layers, 1, self.hidden_size, device=device) # fucntion to initialize the Hidden State
# + id="PU95VyZ_GSJv"
# class Encoder(nn.Module):
# def __init__(self, input_size, hidden_size,num_layers=2):
# super(Encoder, self).__init__()
# self.hidden_size = hidden_size # number of features in hidden state
# self.num_layers=num_layers # number of recurrent layers
# self.embedding = nn.Embedding(input_size, hidden_size) #this creates simple lookup table used to store word embeddings and retrieve them, note number of entries is same as size of hindi vocabulary
# # and size of the embedding vector is same as hidden size
# self.lstm = nn.LSTM(hidden_size, hidden_size,num_layers) # applies a GRU to an input sequence.
# def forward(self, input, hidden):
# output = self.embedding(input).view(1, 1, -1) # gives the foward pass, i.e. given input tensors get output tensors
# output, hidden,cell = self.lstm(output, hidden)
# return output, hidden, cell
# def initializeHiddenState(self):
# return torch.zeros(2, 1, self.hidden_size, device=device)
# + [markdown] id="dVqaFfgfGfgG"
# # **Defining the Decoder(LSTM) Architecture**
#
#
# In the following cell the architecture of the Decoder is defined. I have used a GRU as a decoder along with attention. Please note I've made the following design choices.
#
# Number of recurrent layers- 1
#
# Number of features in the hidden state-512
#
# Size of the embedding vector created using nn.Embedding() - 512
#
# Dropout probability- 0.4
#
#
#
#
# + id="qOgxG3EKMK3u"
class Decoder(nn.Module):
def __init__(self, hidden_size, output_size, num_layers=1, dropout_p=0.4, max_length=50):
super(Decoder, self).__init__()
self.hidden_size = hidden_size # number of features in the hidden state
self.output_size = output_size # output size which is same as the size of the english vocabulary
self.dropout_p = dropout_p # dropout probability , used for regularization
self.max_length = max_length # max length of decoded woutput
self.num_layers=num_layers # number of recurrent layers
self.embedding = nn.Embedding(self.output_size, self.hidden_size) #this creates simple lookup table used to store word embeddings and retrieve them, note number of entries is same as size of english vocabulary
# size of embedding vector is same as hidden size
self.attention = nn.Linear(self.hidden_size * 2, self.max_length) #applies a linear transformation to the inputs
self.attention_combine = nn.Linear(self.hidden_size * 2, self.hidden_size) # applies a linear transformation to the output
self.dropout = nn.Dropout(self.dropout_p) # introductes a dropout layer with dropout probability 0.4
self.gru = nn.GRU(self.hidden_size, self.hidden_size) # applies a GRU layer
self.out = nn.Linear(self.hidden_size, self.output_size) # applies a linear transformation
def forward(self, input, hidden, encoder_outputs):
embedded = self.embedding(input).view(1, 1, -1)
embedded = self.dropout(embedded)
attention_weights = F.softmax(self.attention(torch.cat((embedded[0], hidden[0]), 1)), dim=1)
applied = torch.bmm(attention_weights.unsqueeze(0), encoder_outputs.unsqueeze(0)) # computes a batch matrix product of the given inputs as parameters
output = torch.cat((embedded[0], applied[0]), 1)
output= F.relu(self.attention_combine(output).unsqueeze(0))
output, hidden = self.gru(output, hidden)
output = F.log_softmax(self.out(output[0]), dim=1)
return output, hidden, attention_weights
def InitializaHiddenState(self):
return torch.zeros(self.num_layers, 1, self.hidden_size, device=device)
# + [markdown] id="alm1A5P8H5sw"
# # **Defining Utility funtions**
# + id="0F9-eazl5bV7"
def createTensor(x): # this fucntion serves the puporse of coverting a (hindi, english) sentence pair to a pair of tensors (hindi tensor, english tensor)
hin_wds=tokenize_hindi(x[0]) # tokenizing hindi sentence
eng_wds=tokenize_english(x[1]) # tokenizing english sentence
hin_idx=[]
eng_idx=[]
for wd in hin_wds:
hin_idx.append(hindi.word2index[wd]) # retriving the index correspoinding to the hindi word
hin_idx.append(1) # adding the indices to the list
for wd in eng_wds:
eng_idx.append(english.word2index[wd]) # retrieving the index corresponding to the english word
eng_idx.append(1)
hin_tensor= torch.tensor(hin_idx, dtype=torch.long, device=device).view(-1, 1) # creating the hidndi tensor
eng_tensor= torch.tensor(eng_idx, dtype=torch.long, device=device).view(-1, 1) # creating the endish tensor
return hin_tensor, eng_tensor
# + id="zgHgrTreQLv4"
def sentence2tensor(x): # fucntion to covert a hindi sentence to its corresponding tensor
hin_wds=tokenize_hindi(x) # first tokenize sentence
hin_idx=[]
for wd in hin_wds:
hin_idx.append(hindi.word2index[wd]) # each token is coverted to its correspondeing index in the hindi vocabulary and added to list
hin_idx.append(1)
hin_tensor= torch.tensor(hin_idx, dtype=torch.long, device=device).view(-1, 1) # creating the hindi tensor
return hin_tensor
# + id="KNZheTwrJ462"
def train(input_tensor, target_tensor, encoder, decoder, encoder_optimizer, decoder_optimizer, criterion, max_length=50, tfr=0.5):
hin_len = input_tensor.size(0)
eng_len= target_tensor.size(0)
enc_hidden=encoder.initializeHiddenState()
encoder_optimizer.zero_grad()
decoder_optimizer.zero_grad()
enc_ops = torch.zeros(max_length, encoder.hidden_size, device=device)
loss = 0 # initial loss set to 0
for x in range(hin_len):
enc_op, enc_hidden= encoder(input_tensor[x], enc_hidden)
enc_ops[x] = enc_op[0, 0]
decoder_input = torch.tensor([[0]], device=device) # adding sos token to start, sos token is 0
decoder_hidden = enc_hidden
if random.random()<tfr:
for x in range(eng_len): # teacher forcing enabled i.e. the encoder uses the actual target word as the next input
decoder_output, decoder_hidden, decoder_attention = decoder(decoder_input, decoder_hidden, enc_ops)
loss += criterion(decoder_output, target_tensor[x]) # adding the decoder loss
decoder_input = target_tensor[x] # using target word
else:
for x in range(eng_len): # teacher forcing disabled, i.e. the decoder uses its predicted word as the next input
decoder_output, decoder_hidden, decoder_attention = decoder(decoder_input, decoder_hidden, enc_ops)
topv, topi = decoder_output.topk(1)
decoder_input = topi.squeeze().detach() # using predicted word as the next input
loss += criterion(decoder_output, target_tensor[x])
if decoder_input.item() == 1:
break
loss.backward()
encoder_optimizer.step()
decoder_optimizer.step()
total_loss=loss.item()
actual_loss=total_loss/eng_len # the actual loss will be the total loss encountered by the decoder
return actual_loss
# + id="YLDVKx--OkHw"
def trainSeq2Seq(encoder, decoder, epochs, learning_rate):
encoder.train()
decoder.train()
encoder_optimizer = optim.Adam(encoder.parameters(), lr=learning_rate) # using Adam optimizer for encoder training
decoder_optimizer = optim.Adam(decoder.parameters(), lr=learning_rate) # using Adam optimizer for decoder training
samples= [createTensor(random.choice(clean_data)) for i in range(epochs)] # getting a list of random training samples (tensors) from the data set. List size is the number of epochs, as we have used stochastic gradient descent
criterion = nn.CrossEntropyLoss() # using Cross Entroppy loss function for training
count=1000
for i in range(0, epochs):
input_tensor = samples[i][0] # note for every sample i.e sample[i], the 0th index stores the hindi tensor
target_tensor = samples[i][1] # and the 1st index stores the english tensor
epoch_loss = train(input_tensor, target_tensor, encoder, decoder, encoder_optimizer, decoder_optimizer, criterion)
if count==0:
count=1000
print(str(i)," epochs completed.") # printing the status of training i.e number of epochs completed after every 1000 epochs
else:
count=count-1
# + id="mLicPM7OKW-v"
def translate(encoder, decoder, sentence, max_length=50):
with torch.no_grad():
input_tensor=sentence2tensor(sentence)
input_length = input_tensor.size()[0]
encoder_hidden=encoder.initializeHiddenState()
encoder_outputs = torch.zeros(max_length, encoder.hidden_size, device=device)
for x in range(input_length):
encoder_output, encoder_hidden = encoder(input_tensor[x], encoder_hidden)
encoder_outputs[x] += encoder_output[0, 0]
decoder_input = torch.tensor([[0]], device=device) # adding sos token i.e. 0
decoder_hidden = encoder_hidden # last hidden state of the encoder is passed as the first hidden state of the decoder
decoded_words = [] # to store the decoded words
for di in range(max_length):
decoder_output, decoder_hidden, decoder_attention = decoder(decoder_input, decoder_hidden, encoder_outputs)
topv, topi = decoder_output.data.topk(1)
if topi.item() == 1: # if word predicted by decder is eos token i.e. 1
decoded_words.append('<eos>')
break
else:
decoded_words.append(english.index2word[topi.item()])
decoder_input = topi.squeeze().detach()
translated=' '.join(decoded_words[:-1])
return translated
# + id="gmqLfAdXDTVf"
# def translate(encoder, decoder, sentence, max_length=50):
# with torch.no_grad():
# input_tensor=sentence2tensor(sentence)
# input_length = input_tensor.size()[0]
# encoder_hidden=encoder.initializeHiddenState()
# encoder_outputs = torch.zeros(max_length, encoder.hidden_size, device=device)
# for x in range(input_length):
# encoder_output, encoder_hidden = encoder(input_tensor[x], encoder_hidden)
# encoder_outputs[x] += encoder_output[0, 0]
# decoder_input = torch.tensor([[0]], device=device) # adding sos token i.e. 0
# decoder_hidden = encoder_hidden # last hidden state of the encoder is passed as the first hidden state of the decoder
# decoded_words = [] # to store the decoded words
# for di in range(max_length):
# decoder_output, decoder_hidden, decoder_attention = decoder(decoder_input, decoder_hidden, encoder_outputs)
# topv, topi = decoder_output.data.topk(2)
# print(10 ** topv.item() )
# if topi.item() == 1: # if word predicted by decder is eos token i.e. 1
# decoded_words.append('<eos>')
# break
# else:
# decoded_words.append(english.index2word[topi.item()])
# decoder_input = topi.squeeze().detach()
# translated=' '.join(decoded_words[:-1])
# return translated
# + [markdown] id="gjqFIF1AH2G5"
# # **Training the Model**
# + id="a62JR8QZLTeK" colab={"base_uri": "https://localhost:8080/"} outputId="fe76eb88-cb3f-4462-cf6c-624cf06845c0"
epochs=40000 # please note training swill take somewhere between 1-2 hrs
hidden_size = 512
learning_rate=0.0001
encoder = Encoder(hindi.total_words, hidden_size).to(device)
decoder = Decoder(hidden_size, english.total_words, dropout_p=0.4).to(device)
print('------------training seq2seq---------------')
trainSeq2Seq(encoder,decoder, epochs,learning_rate)
print('----------------training done------------------')
#while True:pass
# + colab={"base_uri": "https://localhost:8080/"} id="Gp0cWTowRhFQ" outputId="7a588a68-4505-4e30-bc1a-0b5d2588c781"
encoder.eval()
decoder.eval()
sentence="वे कहते हैं कि जहाज पर आप की जरूरत है।"
print(translate(encoder, decoder, sentence))
# + id="TVA6eKkd7zqp"
def showAttention(input_sentence, output_words, attentions):
# Set up figure with colorbar
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(attentions.numpy(), cmap='bone')
fig.colorbar(cax)
# Set up axes
ax.set_xticklabels([''] + input_sentence.split(' ') +
['<EOS>'], rotation=90)
ax.set_yticklabels([''] + output_words)
# Show label at every tick
ax.xaxis.set_major_locator(ticker.MultipleLocator(1))
ax.yaxis.set_major_locator(ticker.MultipleLocator(1))
plt.show()
def evaluateAndShowAttention(input_sentence):
output_words, attentions = evaluate(
encoder, decoder, input_sentence)
print('input =', input_sentence)
print('output =', ' '.join(output_words))
showAttention(input_sentence, output_words, attentions)
# + id="RKUUvLBn8KsD"
def evaluate(encoder, decoder, sentence, max_length=50):
with torch.no_grad():
input_tensor = sentence2tensor(sentence)
input_length = input_tensor.size()[0]
encoder_hidden = encoder.initializeHiddenState()
encoder_outputs = torch.zeros(max_length, encoder.hidden_size, device=device)
for ei in range(input_length):
encoder_output, encoder_hidden = encoder(input_tensor[ei],
encoder_hidden)
encoder_outputs[ei] += encoder_output[0, 0]
decoder_input = torch.tensor([[0]], device=device) # SOS
decoder_hidden = encoder_hidden
decoded_words = []
decoder_attentions = torch.zeros(max_length, max_length)
for di in range(max_length):
decoder_output, decoder_hidden, decoder_attention = decoder(
decoder_input, decoder_hidden, encoder_outputs)
decoder_attentions[di] = decoder_attention.data
topv, topi = decoder_output.data.topk(1)
if topi.item() == 1:
decoded_words.append('<EOS>')
break
else:
decoded_words.append(english.index2word[topi.item()])
decoder_input = topi.squeeze().detach()
return decoded_words, decoder_attentions[:di + 1]
# + id="tmGySzCT-_1q"
import matplotlib.pyplot as plt
plt.switch_backend('agg')
import matplotlib.ticker as ticker
import numpy as np
def showPlot(points):
plt.figure()
fig, ax = plt.subplots()
# this locator puts ticks at regular intervals
loc = ticker.MultipleLocator(base=0.2)
ax.yaxis.set_major_locator(loc)
plt.plot(points)
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="_Zw2u4M57lmR" outputId="899dbde5-09f4-461d-c9c9-be147bbf2a58"
evaluateAndShowAttention("वे कहते हैं कि जहाज पर आप की जरूरत है।")
# + [markdown] id="6lc2CA_8HHW2"
# # **Generating the translated sentences of the development set**
# + id="ltOW4xvolsp-"
with open('/content/drive/MyDrive/AssignmentNLP/week4/hindistatements.csv', newline='') as f1: # reading the input file (dev set) which has the hindi sentences to be translated
reader = csv.reader(f1)
hs = list(reader)
# + colab={"base_uri": "https://localhost:8080/"} id="i9HztRiMqhsy" outputId="2802a33c-4225-43d6-c181-b09470568bbe"
for i in range(0,10):
print(hs[i])
# + id="tdZPA-f-qs3n"
hs_list=hs[1:]
# + colab={"base_uri": "https://localhost:8080/"} id="oM3HzuNcqyCF" outputId="e0cae81c-3ceb-4e10-c98c-6d943ae9ee81"
print(len(hs_list))
# + colab={"base_uri": "https://localhost:8080/"} id="h55AU2IDq4JL" outputId="d451c1af-9b20-46c1-88ed-b212a3b5692f"
hindi_list=[]
c=0
for i in hs_list: # extracting each hindi sentence, along with standardizing it and dealing with out of vocabulary tokens
s=i[2]
s=standardize(s)
word_list=tokenize_hindi(s)
#word_len=len(word_list)
s=''
for wd in word_list:
if wd not in hindi.word2index:
continue
else:
s=s+wd+' '
word_list=tokenize_hindi(s)
word_len=len(word_list)
if word_len>30:
s=''
for j in range(0,30):
s=s+word_list[j]+' '
c=c+1
s.strip()
hindi_list.append(s)
print(c)
# + colab={"base_uri": "https://localhost:8080/"} id="keZkwtqwr5NG" outputId="6743ef96-713c-40a9-b48f-851dbfc9119a"
for i in range(0,10):
print(hindi_list[i])
# + colab={"base_uri": "https://localhost:8080/"} id="p9kr5RfGudsu" outputId="4618ed55-6000-45b9-9631-378213fb790f"
len(hindi_list)
# + colab={"base_uri": "https://localhost:8080/"} id="xnipuL3cuZV_" outputId="8fd56e74-284b-462b-cca8-ba15168d0213"
op=[]
c1=0 # this is a simple white space de-tokenizer implemented by me which is being used for processing the translated english sentence
for i in range(0,len(hindi_list)):
sentence=hindi_list[i]
if len(sentence)==0:
c1=c1+1
op.append('')
continue
translated=translate(encoder,decoder, sentence)
op.append(translated.strip()) # adding the translated sentence to the list of outputs i.e. op
print(c1)
# + colab={"base_uri": "https://localhost:8080/"} id="4QZBsTpw3m8x" outputId="7482e897-4905-42ad-b193-7307382531b7"
for i in range(0,10):
print(op[i])
# + colab={"base_uri": "https://localhost:8080/"} id="QKkruz7M5WC_" outputId="dd4f61f4-4125-41b5-c696-dead6d308315"
print(len(op))
# + id="7nQTHecUvWgl"
with open('/content/drive/MyDrive/AssignmentNLP/answer.txt', 'w') as f: # creating and witing the translated english sentences to the output file
for item in op:
f.write("%s\n" % item)
# + id="MjzqRjpLY3TW"
torch.save(encoder.state_dict(), '/content/drive/MyDrive/AssignmentNLP/model/ph4sub1-enc')
torch.save(decoder.state_dict(), '/content/drive/MyDrive/AssignmentNLP/model/ph4sub1-dec')
# model = TheModelClass(*args, **kwargs)
# model.load_state_dict(torch.load(PATH))
# model.eval()
# + id="F0onoYIoubxW"
while True:pass
# + id="4sYHaNmv5qwz"
| code/phase4.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (FAI)
# language: python
# name: fai3
# ---
# 01 SEP 2017
# +
# %matplotlib inline
import importlib
import os, sys; sys.path.insert(1, os.path.join('../utils'))
import utils2; importlib.reload(utils2)
from utils2 import *
from scipy.optimize import fmin_l_bfgs_b
from scipy.misc import imsave
from keras import metrics
from vgg16_avg import VGG16_Avg
# -
from bcolz_array_iterator import BcolzArrayIterator
limit_mem()
path = '../data/'
dpath = path
rn_mean = np.array([123.68, 116.779, 103.939], dtype=np.float32)
preproc = lambda x: (x - rn_mean)[:, :, :, ::-1]
deproc = lambda x,s: np.clip(x.reshape(s)[:, :, :, ::-1] + rn_mean, 0, 255)
arr_lr = bcolz.open(dpath+'trn_resized_72.bc')
arr_hr = bcolz.open(path+'trn_resized_288.bc')
parms = {'verbose': 0, 'callbacks': [TQDMNotebookCallback(leave_inner=True)]}
parms = {'verbose': 0, 'callbacks': [TQDMNotebookCallback(leave_inner=True)]}
def conv_block(x, filters, size, stride=(2,2), mode='same', act=True):
x = Convolution2D(filters, size, size, subsample=stride, border_mode=mode)(x)
x = BatchNormalization(mode=2)(x)
return Activation('relu')(x) if act else x
def res_block(ip, nf=64):
x = conv_block(ip, nf, 3, (1,1))
x = conv_block(x, nf, 3, (1,1), act=False)
return merge([x, ip], mode='sum')
def up_block(x, filters, size):
x = keras.layers.UpSampling2D()(x)
x = Convolution2D(filters, size, size, border_mode='same')(x)
x = BatchNormalization(mode=2)(x)
return Activation('relu')(x)
def get_model(arr):
inp=Input(arr.shape[1:])
x=conv_block(inp, 64, 9, (1,1))
for i in range(4): x=res_block(x)
x=up_block(x, 64, 3)
x=up_block(x, 64, 3)
x=Convolution2D(3, 9, 9, activation='tanh', border_mode='same')(x)
outp=Lambda(lambda x: (x+1)*127.5)(x)
return inp,outp
inp,outp=get_model(arr_lr)
# +
shp = arr_hr.shape[1:]
vgg_inp=Input(shp)
vgg= VGG16(include_top=False, input_tensor=Lambda(preproc)(vgg_inp))
for l in vgg.layers: l.trainable=False
# -
def get_outp(m, ln): return m.get_layer(f'block{ln}_conv2').output
vgg_content = Model(vgg_inp, [get_outp(vgg, o) for o in [1,2,3]])
vgg1 = vgg_content(vgg_inp)
vgg2 = vgg_content(outp)
def mean_sqr_b(diff):
dims = list(range(1,K.ndim(diff)))
return K.expand_dims(K.sqrt(K.mean(diff**2, dims)), 0)
w=[0.1, 0.8, 0.1]
def content_fn(x):
res = 0; n=len(w)
for i in range(n): res += mean_sqr_b(x[i]-x[i+n]) * w[i]
return res
m_sr = Model([inp, vgg_inp], Lambda(content_fn)(vgg1+vgg2))
m_sr.compile('adam', 'mae')
def train(bs, niter=10):
targ = np.zeros((bs, 1))
bc = BcolzArrayIterator(arr_hr, arr_lr, batch_size=bs)
for i in range(niter):
hr,lr = next(bc)
m_sr.train_on_batch([lr[:bs], hr[:bs]], targ)
its = len(arr_hr)//16; its
arr_lr.chunklen, arr_hr.chunklen
# %time train(64, 18000)
# Finally starting to understand this problem. So `ResourceExhaustedError` isn't system memory (or at least not only) but graphics memory. The card (obviously) cannot handle a batch size of 64. But batch size must be a multiple of chunk length, which here is 64.. so I have to find a way to reduce the chunk length down to something my system can handle: no more than 8.
arr_lr_c8 = bcolz.carray(arr_lr, chunklen=8, rootdir=path+'trn_resized_72_c8.bc')
arr_lr_c8.flush()
arr_hr_c8 = bcolz.carray(arr_hr, chunklen=8, rootdir=path+'trn_resized_288_c8.bc')
arr_hr_c8.flush()
arr_lr_c8.chunklen, arr_hr_c8.chunklen
# That looks successful, now to redo the whole thing with the `_c8` versions:
arr_lr_c8 = bcolz.open(path+'trn_resized_72_c8.bc')
arr_hr_c8 = bcolz.open(path+'trn_resized_288_c8.bc')
# +
inp,outp=get_model(arr_lr_c8)
shp = arr_hr_c8.shape[1:]
vgg_inp=Input(shp)
vgg= VGG16(include_top=False, input_tensor=Lambda(preproc)(vgg_inp))
for l in vgg.layers: l.trainable=False
vgg_content = Model(vgg_inp, [get_outp(vgg, o) for o in [1,2,3]])
vgg1 = vgg_content(vgg_inp)
vgg2 = vgg_content(outp)
m_sr = Model([inp, vgg_inp], Lambda(content_fn)(vgg1+vgg2))
m_sr.compile('adam', 'mae')
def train(bs, niter=10):
targ = np.zeros((bs, 1))
bc = BcolzArrayIterator(arr_hr_c8, arr_lr_c8, batch_size=bs)
for i in range(niter):
hr,lr = next(bc)
m_sr.train_on_batch([lr[:bs], hr[:bs]], targ)
# -
# %time train(8, 18000) # not sure what exactly the '18000' is for
arr_lr.shape, arr_hr.shape, arr_lr_c8.shape, arr_hr_c8.shape
# 19439//8 = 2429
# %time train(8, 2430)
| FAI02_old/Lesson9/neural_sr_attempt2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/creamcheesesteak/test_python/blob/master/python_language.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="p7CupXoWTi-7"
# + id="wEzt1Vh_VRfy"
# + id="zCH92ktCVRrb"
# + id="oIywJ5GJVR1I"
# + id="1oRTE4ClVR-c"
# + id="PiCuNQJ7VSHT"
# + id="zf4D_VW2VSUi"
# + id="Fdk2L0MnVSfh"
# + id="mmYbzcZkVSmL"
import numpy as np
# + colab={"base_uri": "https://localhost:8080/", "height": 35} id="ID8NUUubVSub" outputId="ed782d0b-7949-44b2-d2cf-674edade38b0"
np.__version__
# + [markdown] id="pho__i5QVxoC"
# `__000__ `는 내장변수
# + id="nR8E4WcI2dXz"
def onehotcylinder(cylinders):
if cylinders == 3:
cylinder = np.array([1,0,0,0,0])
elif cylinders == 4:
cylinder = np.array([0,1,0,0,0])
elif cylinders == 5:
cylinder = np.array([0,0,1,0,0])
elif cylinders == 6:
cylinder = np.array([0,0,0,1,0])
else : # 8
cylinder = np.array([0,0,0,0,1])
return cylinder
# + id="AvhAz1Q1VSz4"
# input을 받음
displacement = 307.0
horsepower = 130.0
weight = 3504.0
accel = 12.0
cylinders = 8
origin = 1
# if cylinders = 8, origin = 1 --> onehot encoding
import numpy as np
if cylinders == 3:
cylinder = np.array([1,0,0,0,0])
elif cylinders == 4:
cylinder = np.array([0,1,0,0,0])
elif cylinders == 5:
cylinder = np.array([0,0,1,0,0])
elif cylinders == 6:
cylinder = np.array([0,0,0,1,0])
else : # 8
cylinder = np.array([0,0,0,0,1])
x_continus = np.array([displacement, horsepower, weight, accel])
# cylinder = np.array([0,0,0,0,1,]) # 8
cylinder = onehotcylinder(cylinders)
org = np.array([1,0,0]) # 1
# x_continus = [[307.0, 130.0, 3504.0, 12.0]]
# + colab={"base_uri": "https://localhost:8080/"} id="7HalDDEO3Grz" outputId="e6c83f77-3d4d-4ed0-b93f-0b51ccc4a2a3"
onehotcylinder(3), onehotcylinder(6)
# + colab={"base_uri": "https://localhost:8080/"} id="4LROEtLN3Jh8" outputId="15d0f43a-d5e8-4662-a130-dab900ac3424"
cylinder
# + id="GbJgNyjq3Gju"
# + colab={"base_uri": "https://localhost:8080/"} id="UT-_JQelVS5-" outputId="779f7950-1a7a-4d7b-c962-6755d42e8305"
type(x_continus), type(cylinder), type(org)
# + colab={"base_uri": "https://localhost:8080/"} id="HNlOJIfjVTA9" outputId="441ab041-e05e-4d1b-9085-8530bf4b2693"
# np.concatenate((a, b), axis=None)
result = np.concatenate((x_continus, cylinder, org), axis=None)
result.shape, result
# + colab={"base_uri": "https://localhost:8080/"} id="XKF6Z_gmVTGi" outputId="5166be4e-54d6-4b03-b028-1116e79762ec"
# [[307.0, 130.0, 3504.0, 12.0, 0, 0, 0, 0, 1]]
# result.reshape(1, 12)
result = result.reshape(-1,result.size)
result, result.shape, result.size
# + id="b03RoZyjVTNH"
# + id="ISWV57a6icWe"
# input을 받음
displacement = 307.0
horsepower = 130.0
weight = 3504.0
accel = 12.0
cylinders = 8
origin = 1
# + id="XBBH0Wqbicah"
# + [markdown] id="nLHH99jwio5S"
# # function 이해
# + colab={"base_uri": "https://localhost:8080/"} id="z1h44Q_Sicly" outputId="bd962091-0693-4c61-a64b-3bab32085584"
21 + 14
# + colab={"base_uri": "https://localhost:8080/"} id="2YjSIyWeicvq" outputId="d679792b-5f8c-4515-f38b-f4afbb7a22d1"
32 + 45
# + colab={"base_uri": "https://localhost:8080/"} id="9LgKy4Dhic3_" outputId="28a1626d-1958-4473-ded3-d98f7fd980f3"
first = 21
second = 14
result = first + second
result
# + id="21A5Ewt8idBP"
def sum(n1,n2):
first = n1
second = n2
result = first + second
return result
# + colab={"base_uri": "https://localhost:8080/"} id="zJOu5sAQnAAp" outputId="c5286c6f-9354-46a8-eef6-8bef025a03d2"
sum(32,34)
# + id="dZW-HYmOpG7z"
def mul(n1,n2,n3):
first = n1
second = n2
third = n3
result = first * second * third
return result
# + colab={"base_uri": "https://localhost:8080/"} id="alsuCMTRtnsL" outputId="df0d646f-c892-4854-dea1-0a461b0f1561"
mul(21,32,2)
# + colab={"base_uri": "https://localhost:8080/"} id="n4zQwCmGtnxG" outputId="7826da6b-3e86-4a07-c7f8-755c0c497d8f"
mul(31,63,41)
# + id="r2ZQt8iutn3s"
def sum(f01, s02):
first = f01
second = s02
result = first + second
return result, second, first
# + id="O_lrPoDWtn8n"
_, r02, r03 = sum(21,564)
# + colab={"base_uri": "https://localhost:8080/"} id="1EVwD2XytoB8" outputId="e0589c01-1b9f-4184-ec7d-4bc194dfdabb"
print(r02,r03)
# + id="jXvd4jifws6f"
# def differenceVars(*var01, **args02)
def diffparam(*var_list, **var_dict):
print(var_dict)
result01 = len(var_list)
return result01
# * 이것은 변수인데 리스트라는 의미 ,**는 딕셔너리라는 의미
# + colab={"base_uri": "https://localhost:8080/"} id="WbztOLl9xSWd" outputId="cd6bac1b-6bff-4ca4-e917-0eac2fd58dbe"
diffparam(1,2,3,a=2, b=3)
# + colab={"base_uri": "https://localhost:8080/"} id="Th_qlTBHzZhd" outputId="e592b936-5c75-4708-96e6-6920d2976dd3"
diffparam(1,2,3,4,5,c=5, d=6, a=2, b=3)
# + id="lYiidiMy0AAZ"
| python_language.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # SVC Test
#
# This is an example use case of a support vector classifier. We will infer a data classification function from a set of training data.
#
# We will use the SVC implementation in the [scikit-learn](http://scikit-learn.org/) toolbox.
# +
from sklearn import svm
import pandas as pd
import pylab as pl
import seaborn as sns
# %matplotlib inline
# -
# We begin by defining a set of training points. This is the set which the classifier will use to infer the data classification function. Each row represents a data point, with x,y coordinates and classification value.
fit_points = [
[2,1,1],
[1,2,1],
[3,2,1],
[4,2,0],
[4,4,0],
[5,1,0]
]
# To understand the data set, we can plot the points from both classes (1 and 0). Points of class 1 are in black, and points from class 0 in red.
sns.set(style="darkgrid")
pl.scatter([point[0] if point[2]==1 else None for point in fit_points],
[point[1] for point in fit_points],
color = 'black')
pl.scatter([point[0] if point[2]==0 else None for point in fit_points],
[point[1] for point in fit_points],
color = 'red')
pl.grid(True)
pl.show()
# The SVC uses [pandas](http://pandas.pydata.org/) data frames to represent data. The [data frame](http://pandas.pydata.org/pandas-docs/dev/generated/pandas.DataFrame.html) is a convenient data structure for tabular data, which enables column labels.
df_fit = pd.DataFrame(fit_points, columns=["x", "y", "value"])
print(df_fit)
# We need to select the set of columns with the data features. In our example, those are the `x` and `y` coordinates.
train_cols = ["x", "y"]
# We are now able to build and train the classifier.
clf = svm.SVC()
clf.fit(df_fit[train_cols], df_fit.value)
# The classifier is now trained with the fit points, and is ready to be evaluated with a set of test points, which have a similiar structure as the fit points: `x`, `y` coordinates, and a value.
test_points = [
[5,3],
[4,5],
[2,5],
[2,3],
[1,1]
]
# We separate the features and values to make clear were the data comes from.
test_points_values = [0,0,0,1,1]
# We build the test points dataframe with the features.
df_test = pd.DataFrame(test_points, columns=['x','y'])
print(df_test)
# We can add the values to the dataframe.
df_test['real_value'] = test_points_values
print(df_test)
# Right now we have a dataframe similar to the one with the fit points. We'll use the classifier to add a fourth column with the predicted values. Our goal is to have the same value in both `real_value` and `predicted_value` columns.
df_test['predicted_value'] = clf.predict(test_points)
print(df_test)
# THe classifier is pretty successfull at predicting values from the `x` and `y`coordinates. We may also apply the classifier to the fit points - it's somewhat pointless, because those are the points used to infer the data classification function.
df_fit[''] = clf.predict([x[0:2] for x in fit_points])
print(df_fit)
# To better understand the data separation between values 1 and 0, we'll plot both the fit points and the test points.
#
# Following the same color code as before, points that belong to class 1 are represented in black, and points that belong to class 0 in red. Fit points are represented in a full cirle, and the test points are represented by circunferences.
sns.set(style="darkgrid")
for i in range(0,2):
pl.scatter(df_fit[df_fit.value==i].x,
df_fit[df_fit.value==i].y,
color = 'black' if i == 1 else 'red')
pl.scatter(df_test[df_test.predicted_value==i].x,
df_test[df_test.predicted_value==i].y,
marker='o',
facecolor='none',
color='black' if i == 1 else 'red')
pl.grid(True)
pl.show()
| SVM Test.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="tpeNMQEgIAo1"
#Parsing Important details of subject such as diet,lifestyle,age,height,width
import os
import re
import pandas as pd
class rparser:
# Code adapted from https://github.com/arsen-movsesyan/springboard_WESAD/blob/master/parsers/readme_parser.py
VALUE_EXTRACT_KEYS = {
"age": {
'search_key': 'Age',
'delimiter': ':'
},
"height": {
'search_key': 'Height',
'delimiter': ':'
},
"weight": {
'search_key': 'Weight',
'delimiter': ':'
},
"gender": {
'search_key': 'Gender',
'delimiter': ':'
},
"dominant_hand": {
'search_key': 'Dominant',
'delimiter': ':'
},
"coffee_today": {
'search_key': 'Did you drink coffee today',
'delimiter': '? '
},
"coffee_last_hour": {
'search_key': 'Did you drink coffee within the last hour',
'delimiter': '? '
},
"sport_today": {
'search_key': 'Did you do any sports today',
'delimiter': '? '
},
"smoker": {
'search_key': 'Are you a smoker',
'delimiter': '? '
},
"smoke_last_hour": {
'search_key': 'Did you smoke within the last hour',
'delimiter': '? '
},
"feel_ill_today": {
'search_key': 'Do you feel ill today',
'delimiter': '? '
}
}
# Add your path to Dataset
DATA_PATH = 'Path to Dataset'
parse_file_suffix = '_readme.txt'
def __init__(self):
self.readme_locations = {subject_directory: self.DATA_PATH + subject_directory + '/'
for subject_directory in os.listdir(self.DATA_PATH)
if re.match('^S[0-9]{1,2}$', subject_directory)}
# Check if parsed readme file is available ( should be as it is saved above )
if not os.path.isfile('data/readmes.csv'):
print('Parsing Readme files')
self.parse_all_readmes()
else:
print('Files already parsed.')
self.merge_with_feature_data()
def parse_readme(self, subject_id):
with open(self.readme_locations[subject_id] + subject_id + self.parse_file_suffix, 'r') as f:
x = f.read().split('\n')
readme_dict = {}
for item in x:
for key in self.VALUE_EXTRACT_KEYS.keys():
search_key = self.VALUE_EXTRACT_KEYS[key]['search_key']
delimiter = self.VALUE_EXTRACT_KEYS[key]['delimiter']
if item.startswith(search_key):
d, v = item.split(delimiter)
readme_dict.update({key: v})
break
return readme_dict
def parse_all_readmes(self):
dframes = []
for subject_id, path in self.readme_locations.items():
readme_dict = self.parse_readme(subject_id)
df = pd.DataFrame(readme_dict, index=[subject_id])
dframes.append(df)
df = pd.concat(dframes)
df.to_csv(self.DATA_PATH + 'readmes.csv')
def merge_with_feature_data(self):
# Confirm feature files are available
if os.path.isfile('data/stress.csv'):
feat_df = pd.read_csv('data/stress.csv', index_col=0)
else:
print('No feature data available. Exiting...')
return
# Combine data and save
df = pd.read_csv(f'{self.DATA_PATH}readmes.csv', index_col=0)
dummy_df = pd.get_dummies(df)
dummy_df['subject'] = dummy_df.index.str[1:].astype(int)
dummy_df = dummy_df[['age', 'height', 'weight', 'gender_ female', 'gender_ male',
'coffee_today_YES', 'sport_today_YES', 'smoker_NO', 'smoker_YES',
'feel_ill_today_YES', 'subject']]
merged_df = pd.merge(feat_df, dummy_df, on='subject')
merged_df.to_csv('data/stress.csv')
if __name__ == '__main__':
rp = rparser()
| sub_info_parser.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import datetime
import gc
from sklearn.ensemble import (
BaggingRegressor, RandomForestRegressor, AdaBoostRegressor)
from sklearn.metrics import mean_squared_error
from technical_indicators import * # import all function
from sklearn.model_selection import TimeSeriesSplit
#import parfit as pf
from sklearn.metrics import r2_score
import numpy as np
from sklearn.model_selection import ParameterGrid
from sklearn.tree import DecisionTreeRegressor
import matplotlib.pyplot as plt
from sklearn import linear_model
# read in our data
url = "https://raw.githubusercontent.com/meenmo/Stat479_Project/master/Data/IBM.csv"
df_ORIGINAL = pd.read_csv(url)
df_ORIGINAL.drop(['Close'], axis = 1, inplace = True)
df_ORIGINAL.columns = ['Date', 'High', 'Low', 'Open', 'Volume', 'Close']
#df_ORIGINAL = pd.read_csv('IBM.csv')
df_features = df_ORIGINAL
df_features['Date'] = pd.to_datetime(df_features['Date'])
df_features.head()
### Create Lag Features
# this is exactly what the book does for create_lag_features from chapter 29
def create_lag_features(df, lag_days):
df_ret = df.copy()
# iterate through the lag days to generate lag values up to lag_days + 1
for i in range(1,lag_days + 2):
df_lag = df_ret[['Date', 'Close']].copy()
# generate dataframe to shift index by i day.
df_lag['Date'] = df_lag['Date'].shift(-i)
df_lag.columns = ['Date', 'value_lag' + str(i)]
# combine the valuelag
df_ret = pd.merge(df_ret, df_lag, how = 'left', left_on = ['Date'], right_on = ['Date'])
#frees memory
del df_lag
# calculate today's percentage lag
df_ret['Today'] = (df_ret['Close'] - df_ret['value_lag1'])/(df_ret['value_lag1']) * 100.0
# calculate percentage lag
for i in range(1, lag_days + 1):
df_ret['lag' + str(i)] = (df_ret['value_lag'+ str(i)] - df_ret['value_lag'+ str(i+1)])/(df_ret['value_lag'+str(i+1)]) * 100.0
# drop unneeded columns which are value_lags
for i in range(1, lag_days + 2):
df_ret.drop(['value_lag' + str(i)], axis = 1, inplace = True)
return df_ret
df_features = create_lag_features(df_features, 5) # 5 lag features
df_features.head(7)
# drop earlier data with missing lag features
df_features.dropna(inplace=True)
# +
# reset index
df_features.reset_index(drop = True, inplace = True)
#### GENERATE TECHNICAL INDICATORS FEATURES
df_features = standard_deviation(df_features, 14)
df_features = relative_strength_index(df_features, 14) # periods
df_features = average_directional_movement_index(df_features, 14, 13) # n, n_ADX
df_features = moving_average(df_features, 21) # periods
df_features = exponential_moving_average(df_features, 21) # periods
df_features = momentum(df_features, 14) #
df_features = average_true_range(df_features, 14)
df_features = bollinger_bands(df_features, 21)
df_features = ppsr(df_features)
df_features = stochastic_oscillator_k(df_features)
df_features = stochastic_oscillator_d(df_features, 14)
df_features = trix(df_features, 14)
df_features = macd(df_features, 26, 12)
df_features = mass_index(df_features)
df_features = vortex_indicator(df_features, 14)
df_features = kst_oscillator(df_features, 10, 10, 10, 15, 10, 15, 20, 30)
df_features = true_strength_index(df_features, 25, 13)
#df_features = accumulation_distribution(df_features, 14) # Causes Problems, apparently
df_features = chaikin_oscillator(df_features)
df_features = money_flow_index(df_features, 14)
df_features = on_balance_volume(df_features, 14)
df_features = force_index(df_features, 14)
df_features = ease_of_movement(df_features, 14)
df_features = commodity_channel_index(df_features, 14)
df_features = keltner_channel(df_features, 14)
df_features = ultimate_oscillator(df_features)
df_features = donchian_channel(df_features, 14)
# drop earlier data with missing lag features
df_features.dropna(inplace=True)
df_features = df_features.reset_index(drop = True)
# -
display(df_features.head())
# +
# do time series split DOING NUMBER 2
splits = TimeSeriesSplit(n_splits=5)
# stores all labels
y = df_features['Today']
# drop unneded columns
df_features.drop(['Open','Close','Low','High','Today'], axis = 1, inplace = True)
# all training data
X_train_all = df_features.loc[(df_features['Date'] < '2018-01-01')]
y_train_all = y[X_train_all.index]
# creates all test data which is all after January 2018
X_test = df_features.loc[(df_features['Date'] >= '2018-01-01'),:]
y_test = y[X_test.index]
# +
trainsamples = []
model1metrics = []
model2metrics = []
model3metrics = []
model4metrics = []
for train_index, test_index in splits.split(X_train_all.loc[:,'Date']):
# do the split for train, Take row 0 to last element of train index
X_train = X_train_all.loc[0:train_index[len(train_index) - 1],:]
y_train = y[X_train.index]
# do the split for validation, Take first element of test index to last element of test index
X_val = X_train_all.loc[test_index[0]:test_index[len(test_index) - 1],:]
y_val = y[X_val.index]
# print some useful info
print('Observations: ', (X_train.shape[0] + X_test.shape[0]))
print('Cutoff date, or first date in validation data: ', X_val.iloc[0,0])
print('Training Observations: ', (X_train.shape[0]))
print('Testing Observations: ', (X_test.shape[0]))
# drop date
X_train.drop(['Date'], axis = 1, inplace = True)
X_val.drop(['Date'], axis = 1, inplace = True)
trainsamples.append(X_train.shape[0])
# random forest regression based on default parameter
rf = RandomForestRegressor(random_state = 0)
rf.fit(X_train, y_train)
model1metrics.append([mean_squared_error(y_val, rf.predict(X_val)),r2_score(y_val, rf.predict(X_val))])
# bagging based on default parameter
bag = BaggingRegressor(DecisionTreeRegressor())
bag.fit(X_train, y_train)
model2metrics.append([mean_squared_error(y_val, bag.predict(X_val)),r2_score(y_val, bag.predict(X_val))])
# boosting on default parameter
boost = AdaBoostRegressor(DecisionTreeRegressor(), random_state = 0, learning_rate=0.01)
boost.fit(X_train, y_train)
model3metrics.append([mean_squared_error(y_val, boost.predict(X_val)),r2_score(y_val, boost.predict(X_val))])
# linear regression on default parameter
lr = linear_model.LinearRegression()
lr.fit(X_train, y_train)
model4metrics.append([mean_squared_error(y_val, lr.predict(X_val)),r2_score(y_val, lr.predict(X_val))])
# +
plt.plot(trainsamples,[item[0] for item in model1metrics], color = 'r', label = 'Random Forest')
plt.plot(trainsamples,[item[0] for item in model2metrics], color = 'g', label = 'Bagging')
plt.plot(trainsamples,[item[0] for item in model3metrics], color = 'b', label = 'AdaBoost')
plt.plot(trainsamples,[item[0] for item in model4metrics], color = 'orange', label = 'Linear Regression')
plt.title('MSE on validation data vs number of training samples for TimeSeriesSplit')
plt.xlabel('training samples')
plt.ylabel('MSE')
plt.legend()
#plt.rcParams['figure.figsize'] = [15,10]
plt.show()
# +
plt.plot(trainsamples,[item[1] for item in model1metrics], color = 'r', label = 'RandomForest')
plt.plot(trainsamples,[item[1] for item in model2metrics], color = 'g', label = 'Bagging')
plt.plot(trainsamples,[item[1] for item in model3metrics], color = 'b', label = 'Boosting')
plt.plot(trainsamples,[item[1] for item in model4metrics], color = 'orange', label = 'Linear Regression')
plt.title('R2 on validation data vs number of training samples for TimeSeriesSplit')
plt.xlabel('training samples')
plt.ylabel('R2')
plt.legend()
plt.rcParams['figure.figsize'] = [15,10]
plt.show()
# -
# ## Hyperparameter Tuning on last validation fold
# +
rf = RandomForestRegressor(random_state = 0)
# hyperparameter tuning on randomforest
paramGrid = ParameterGrid({
'n_estimators': [10,15,20],
'max_depth' : [10,15],
'min_samples_leaf': [3,5,10]})
best_model, best_score, all_models, all_scores = pf.bestFit(rf, paramGrid, X_train, y_train, X_val, y_val, metric=mean_squared_error, greater_is_better=False, scoreLabel='MSE')
print(best_model)
# +
bag = BaggingRegressor(DecisionTreeRegressor(), random_state = 0)
# hyperparameter tuning on bagging
paramGrid = ParameterGrid({
'max_features': [10,15,20],
'n_estimators' : [5,10,15,20],
})
best_model, best_score, all_models, all_scores = pf.bestFit(bag, paramGrid, X_train, y_train, X_val, y_val, metric=mean_squared_error, greater_is_better=False, scoreLabel='MSE')
print(best_model)
# +
boost = AdaBoostRegressor(DecisionTreeRegressor(), random_state = 0)
# hyperparameter tuning on Boosting
paramGrid = ParameterGrid({
'learning_rate': [0.1,0.01],
'n_estimators' : [5,10,15,20],
})
best_model, best_score, all_models, all_scores = pf.bestFit(boost, paramGrid, X_train, y_train, X_val, y_val, metric=mean_squared_error, greater_is_better=False, scoreLabel='MSE')
print(best_model)
# +
rf = RandomForestRegressor(bootstrap=True, criterion='mse', max_depth=15,
max_features='auto', max_leaf_nodes=None,
min_impurity_decrease=0.0, min_impurity_split=None,
min_samples_leaf=3, min_samples_split=2,
min_weight_fraction_leaf=0.0, n_estimators=20, n_jobs=1,
oob_score=False, random_state=0, verbose=0, warm_start=False)
bag = BaggingRegressor(base_estimator=DecisionTreeRegressor(criterion='mse', max_depth=None, max_features=None,
max_leaf_nodes=None, min_impurity_decrease=0.0,
min_impurity_split=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
presort=False, random_state=None, splitter='best'),
bootstrap=True, bootstrap_features=False, max_features=20,
max_samples=1.0, n_estimators=20, n_jobs=1, oob_score=False,
random_state=0, verbose=0, warm_start=False)
boost = AdaBoostRegressor(base_estimator=DecisionTreeRegressor(criterion='mse', max_depth=None, max_features=None,
max_leaf_nodes=None, min_impurity_decrease=0.0,
min_impurity_split=None, min_samples_leaf=1,
min_samples_split=2, min_weight_fraction_leaf=0.0,
presort=False, random_state=None, splitter='best'),
learning_rate=0.01, loss='linear', n_estimators=15,
random_state=0)
lr = linear_model.LinearRegression(copy_X=True, fit_intercept=True, n_jobs=1, normalize=False)
rf.fit(X_train, y_train)
bag.fit(X_train, y_train)
boost.fit(X_train, y_train)
lr.fit(X_train, y_train)
# -
# mse and r2 on validation fold
print('mse level2: ', mean_squared_error(y_val, lr.predict(X_val)))
print('r2_score: ' , r2_score(y_val, lr.predict(X_val)))
# Linear Regression is the best model
X_test.drop(['Date'], axis = 1, inplace = True)
y_pred = lr.predict(X_test)
# mse and r2 on test fold
print('mse level2: ', mean_squared_error(y_test, y_pred))
print('r2_score: ' , r2_score(y_test, y_pred))
# +
plt.plot(y_test.index, y_pred)
plt.plot(y_test.index, y_test)
plt.xlabel('index')
plt.ylabel('% return')
plt.title('% return on test dataset')
plt.show()
# -
| Code/.ipynb_checkpoints/DT_Revised_v1-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/brucebra000/DS-Unit-1-Sprint-1-Dealing-With-Data/blob/master/Copy_of_LS_DS_113_Making_Data_backed_Assertions_Assignment.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="Okfr_uhwhS1X" colab_type="text"
# # Lambda School Data Science - Making Data-backed Assertions
#
# This is, for many, the main point of data science - to create and support reasoned arguments based on evidence. It's not a topic to master in a day, but it is worth some focused time thinking about and structuring your approach to it.
# + [markdown] id="lOqaPds9huME" colab_type="text"
# ## Assignment - what's going on here?
#
# Consider the data in `persons.csv` (already prepared for you, in the repo for the week). It has four columns - a unique id, followed by age (in years), weight (in lbs), and exercise time (in minutes/week) of 1200 (hypothetical) people.
#
# Try to figure out which variables are possibly related to each other, and which may be confounding relationships.
#
# Try and isolate the main relationships and then communicate them using crosstabs and graphs. Share any cool graphs that you make with the rest of the class in Slack!
# + id="TGUS79cOhPWj" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 363} outputId="bb6722bc-1464-4b6b-dff9-b5c1226921ed"
# TODO - your code here
# Use what we did live in lecture as an example
# HINT - you can find the raw URL on GitHub and potentially use that
# to load the data with read_csv, or you can upload it yourself
import pandas as pd
import matplotlib.pyplot as plt
persons = "https://raw.githubusercontent.com/brucebra000/DS-Unit-1-Sprint-1-Dealing-With-Data/master/module3-databackedassertions/persons.csv"
df = pd.read_csv(persons)
firstFew = df[:30] #A changable value that will only grab a couple of rows to make the charts a bit more readable.
df.head(10)
# + id="g3YTXX-i0oSC" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 290} outputId="260edbeb-b7e3-4198-be9b-b16c10cedeb2"
weightVsEt = ['weight', 'exercise_time']
weightAndEt = firstFew[weightVsEt]
weightAndEt.plot(kind = 'bar');
print("People who have spent more time exercising tend to be lighter in weight.")
# + id="v1cNKZXT4KnU" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 290} outputId="36aa026a-994e-42dc-ef86-08ddd7b1c961"
ageVsWeight = ['age', 'weight']
ageAndWeight = firstFew[ageVsWeight]
ageAndWeight.plot(kind = 'bar');
print("Age does not seem to have any correlation to a person's weight.")
# + id="aJyoiyhYD1j7" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 290} outputId="9da2cf6c-175a-4781-adf1-3a83b6258907"
ageVsEt = ['age', 'exercise_time']
ageAndEt = firstFew[ageVsEt]
ageAndEt.plot(kind = 'bar')
print("A persons age tends to affect the time they spend exercising, but this is not always the case.")
# + [markdown] id="BT9gdS7viJZa" colab_type="text"
# ### Assignment questions
#
# After you've worked on some code, answer the following questions in this text block:
#
# 1. What are the variable types in the data?
# -Age is a continuous variable, weight is caregorical, and exercise_time is continuous.
# 2. What are the relationships between the variables?
# -Weight seems to go down as the time spent exercising goes up. Age doesn't seem to have any affect on weight, as each person's weight greatly fluctuates no matter the age they are.
# 3. Which relationships are "real", and which spurious?
# -Time spent exercising and weight have a real relationship. While age can seem affect the time someone spends exercising, it is probably health issuse and motivation that have a more direct affect on the time they spend exercising.
#
# + [markdown] id="_XXg2crAipwP" colab_type="text"
# ## Stretch goals and resources
#
# Following are *optional* things for you to take a look at. Focus on the above assignment first, and make sure to commit and push your changes to GitHub.
#
# - [Spurious Correlations](http://tylervigen.com/spurious-correlations)
# - [NIH on controlling for confounding variables](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4017459/)
#
# Stretch goals:
#
# - Produce your own plot inspired by the Spurious Correlation visualizations (and consider writing a blog post about it - both the content and how you made it)
# - Pick one of the techniques that NIH highlights for confounding variables - we'll be going into many of them later, but see if you can find which Python modules may help (hint - check scikit-learn)
| Copy_of_LS_DS_113_Making_Data_backed_Assertions_Assignment.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
#ignore
from IPython.core.display import HTML,Image
import sys
sys.path.append('/anaconda/')
import config
HTML('<style>{}</style>'.format(config.CSS))
# -
#
# ### Introduction
#
# If you haven't yet seen the [first post](data_management.html) for this series, please take a minute to read that first... Are you back? Great. Let's enter the wormhole...
#
#
# <img src="images/DataVortex.jpg" width="400">
#
#
# This post is going to delve into the mechanics of _feature engineering_ for the sorts of time series data that you may use as part of a stock price prediction modeling system.
#
# I'll cover the basic concept, then offer some useful python code recipes for transforming your raw source data into features which can be fed directly into a ML algorithm or ML pipeline.
#
# Anyone who has dabbled with any systems-based trading or charting already has experience with simple forms of feature engineering, whether or not they realized it. For instance:
# * Converting a series of asset prices into percent change values is a simple form of feature engineering
# * Charting prices vs. a moving average is an implicit form of feature engineering
# * Any technical indicator (RSI, MACD, etc...) are also forms of feature engineering
#
# The process takes in one or more columns of "raw" input data (e.g., OHLC price data, 10-Q financials, social media sentiment, etc...) and converts it into _many_ columns of engineered features.
#
#
# ### Motivation
#
# I believe (and I don't think I'm alone here!) that featue engineering is the most under-appreciated part of the art of machine learning. It's certainly the most time consuming and tedious, but it's creative and "fun" (for those who like getting their hands dirty with data, anyway...).
#
# Feature engineering is also one of the key areas where those with domain expertise can shine. Those whose expertise in investing is greater than their skill in machine learning will find that feature engineering will allow them to make use of that domain expertise.
#
# _Feature engineering_ is a term of art for data science and machine learning which refers to pre-processing and transforming raw data into a form which is more easily used by machine learning algorithms. Much like industrial processing can extract pure gold from trace elements within raw ore, feature engineering can extract valuable "alpha" from very noisy raw data.
#
#
# <img src="images/gold_mine.jpg" width="400">
#
# You have to dig through a lot of dirt to find gold.
#
# ### Principles and guidelines
#
# Feature engineering is fundamentally a creative process which should not be overly constrained by rules or limits.
#
# However, I do believe there are a few guidelines to be followed:
#
# * __No peeking:__ Peeking (into the future) is the "original sin" of feature engineering (and prediction modeling in general). It refers to using information about the future (or information which would not yet be known by us...) to engineer a piece of data.
#
# This can be obvious, like using next_12_months_returns. However, it's most often quite subtle, like using the mean or standard deviation across the full time period to normalize data points (which implicitly leaks future information into our features. The test is whether you would be able to get __the exact same value__ if you were calculating the data point at that point in time rather than today.
#
#
# * __Only the knowable:__ A corrolary to the above, you also need to be honest about what you would have known at the time, not just what had happened at the time. For instance, short borrowing data is reported by exchanges with a considerable time lag. You would want to stamp the feature with the date on which you _would have known_ it.
#
#
# * __Complete the table:__ Many machine learning algorithms expect that every input feature will have a value (of a certain type) for each observation. If you envision a spreadsheet where each feature is a column and each observation is a row, there should be a value in each cell of the table. Quite often, some features in the table will naturally update themselves more frequently than others.
#
# Price data updates almost continuously, while short inventory, analyst estimates, or EBITDA tend to update every few weeks or months. In these cases, we'll use a scheme like last observation carried forward (LOCF) to always have a value for each feature in the naturally lower frequency columns. Of course, we will be careful to avoid inadvertent peeking!
#
#
# * __Avoid false ordinality:__ Finally, it's extremely important to represent features in a way that captures ordinality only if it has meaning. For instance, it's usually a bad idea to represent "day of the week" as an integer 1 to 7 since this implicitly tells the model to treat Friday as very similar to Thursday, but "a little more". It would also say that Sunday and Monday are totally different (if Sunday =7 and Monday =1). We could miss all manner of interesting patterns in the data.
#
# ### Getting Started
# I will begin by extracting some toy data into a dataframe using free data from [quandl](https://www.quandl.com/):
#
# First, we'll make a utility function which downloads one or more symbols from quandl and returns the adjusted OHLC data (I generally find adjusted data to be best).
# +
import numpy as np
import pandas as pd
pd.core.common.is_list_like = pd.api.types.is_list_like # remove once updated pandas-datareader issue is fixed
# https://github.com/pydata/pandas-datareader/issues/534
import pandas_datareader.data as web
# %matplotlib inline
def get_symbols(symbols,data_source, begin_date=None,end_date=None):
out = pd.DataFrame()
for symbol in symbols:
df = web.DataReader(symbol, data_source,begin_date, end_date)\
[['AdjOpen','AdjHigh','AdjLow','AdjClose','AdjVolume']].reset_index()
df.columns = ['date','open','high','low','close','volume'] #my convention: always lowercase
df['symbol'] = symbol # add a new column which contains the symbol so we can keep multiple symbols in the same dataframe
df = df.set_index(['date','symbol'])
out = pd.concat([out,df],axis=0) #stacks on top of previously collected data
return out.sort_index()
prices = get_symbols(['AAPL','CSCO'],data_source='quandl',\
begin_date='2015-01-01',end_date='2017-01-01')
# -
# With the data collected, we can create a new dataframe called "features" which will be used to compile all of the features we engineer. Good practice is to create this dataframe with an index from your downloaded data, since you should only have new feature values as you have new primary source data.
#
# As the simple example below illustrates, we can then construct features from the data and store into multiple feature columns. Note that there will often be null values inserted if the formula doesn't produce valid values for each row index.
#
features = pd.DataFrame(index=prices.index).sort_index()
features['f01'] = prices.close/prices.open-1 # daily return
features['f02'] = prices.open/prices.groupby(level='symbol').close.shift(1)-1
print(features.tail())
# >__Side note__: I favor following a bland naming convention like f01, f02, etc... for each feature (and then documenting what each feature represents...) rather than using descriptive column names. My reasons for this are three-fold:
#
# >1. Descriptive names tend to be long and cumbersome to use,
# 2. They're rarely truely self-describing, and
# 3. It's often useful to create an abstraction to conceal from the modeler (either yourself or someone else) what each represents. Think of it like a blind taste test.
#
# Following this basic code pattern, we can generate infinite variations into our features. This is where your domain expertise and analytical creativity come into play!
#
# My suggestion is to make sure you have a reasonable hypothesis before you create any feature, but don't be afraid to try many variations on a theme. There is much to be learned from trying several flavors of feature transformations out.
#
#
# ### Common Transforms
# In the interest of accelerating your creativity, I've listed below a series of "recipes" for some of the transforms I've found useful - especially when using linear or quasi-linear models - extract meaningful relationships.
#
# #### Logs
# Many times, values like market cap, volume, revenue can map better to the prediction target if put into log space. This is easy to do via pandas+numpy:
features['f03'] = prices.volume.apply(np.log) # log of daily volume
# #### Differencing
# It's often more important to know how a value is changing than to know the value itself. The `diff()` method will calculate the change in value since the prior period (i.e., current minus prior). NOTE: the "groupby" is critically important since if it were omitted we would be comparing the difference in volume between symbols, which is not what we want.
features['f03'] = prices.groupby(level='symbol').volume.diff() # change since prior day
features['f04'] = prices.groupby(level='symbol').volume.diff(50) # change since 50 days prior
# #### Rate of Change
# Even more common is to want the rate of change as expressed by percent change. Pandas has the handy `pct_change()` method for this purpose, but beware that you'll get [odd behavior](https://stackoverflow.com/questions/50915213/using-pct-change-with-multiindex-groupby) if you mix this with `groupby()` as shown above. I prefer to create my own lambda wrapper function as shown below.
pct_chg_fxn = lambda x: x.pct_change()
features['f05'] = prices.groupby(level='symbol').volume.apply(pct_chg_fxn)
# #### Moving Averages
# Sometimes, we'd rather use the moving average of a value as part of a feature. This can be the value itself if you want to minimize how "jittery" a value is. Or, more commonly, you may want to compare a value with a trailing moving average of itself.
#
# Again, we need to use groupby since our dataframe has info on multiple symbols - and again, we need to use a lambda function wrapper to avoid error. There are other patterns which will accomplish the same thing but I find this to be cleanest.
#
#
# +
# log of 5 day moving average of volume
ma_5 = lambda x: x.rolling(5).mean()
features['f06'] = prices.volume.groupby(level='symbol').apply(ma_5)\
.apply(np.log)
# daily volume vs. 200 day moving average
ma_200 = lambda x: x.rolling(200).mean()
features['f07'] = prices.volume/ prices.volume.groupby(level='symbol')\
.apply(ma_200)-1
# daily closing price vs. 50 day exponential moving avg
ema_50 = lambda x: x.ewm(span=50).mean()
features['f08'] = prices.close/ prices.close.groupby(level='symbol')\
.apply(ema_50)-1
# -
# >__Side note__: Since the rolling window is an important - and somewhat arbitrary - value, you should try a range of reasonable values. I have found that it's better to use an exponentially increasing range of values rather than a linear range. In other words, use [5,10,20,40,80,160] rather than [10,20,30,40...,100].
#
# >The rationale is that values like 90 and 100 are really quite similar whereas 10 and 20 are quite different. Therefore, if you choose linearly spaced values, you'll effectively be giving the higher end of the value range more chances to succeed than the lower values - and you'll increase the likelihood of overfitting a high value.
# #### Z-Scores
# A very popular/useful transformation for financial time series data is the [z-score](http://stattrek.com/statistics/dictionary.aspx?definition=z-score). We can easily define a generalized lambda function for this, which we can use whenever needed. Importantly, it allows us to mix together very different symbols (some high-beta, some low-beta) in a way that considers the statistical significance of any movement.
#
zscore_fxn = lambda x: (x - x.mean()) / x.std()
features['f09'] =prices.groupby(level='symbol').close.apply(zscore_fxn)
features.f09.unstack().plot.kde(title='Z-Scores (not quite accurate)')
# However, the above example has a subtle but important bug. It uses the mean _of the whole time frame_ and the standard deviation _of the whole time frame_ to calculate each datapoint. This means we are peeking ahead into the future and the feature is potentially very danger-prone (it'll work famously well in sample and fail to work out of sample...).
#
# Fixing this is cumbersome, but necessary.
zscore_fun_improved = lambda x: (x - x.rolling(window=200, min_periods=20).mean())\
/ x.rolling(window=200, min_periods=20).std()
features['f10'] =prices.groupby(level='symbol').close.apply(zscore_fun_improved)
features.f10.unstack().plot.kde(title='Z-Scores (accurate)')
# #### Percentile
# Less commonly used - but equally useful - is the percentile transformation. Getting this done properly in pandas (with groupby and rolling) is possible but tricky. The below example returns the percentile rank (from 0.00 to 1.00) of traded volume for each value as compared to a trailing 200 day period.
#
# Note that we need to use _a lambda within a lambda_ to make this work properly. We're on the bleeding edge.
#
rollrank_fxn = lambda x: x.rolling(200,min_periods=20)\
.apply(lambda x: pd.Series(x).rank(pct=True)[0])
features['f11'] = prices.groupby(level='symbol').volume.apply(rollrank_fxn)
# Another interesting application of this same pattern is to rank each stock _cross-sectionally_ rather than _longitudinally_ as above. In other words, where does this stock rank within all of the stocks on that day, not for all prior days of that stock. The below example isn't very meaningful with only two stocks, but quite useful when using a realistic universe. In this example, we're also making use of an earlier feature (relative volume) to compare which symbol is most heavily traded _for that stock's normal range_ in a given day. Also note that we need to `dropna()` prior to ranking because `rank` doesn't handle nulls very gracefully.
features['f12'] = features['f07'].dropna().groupby(level='date').rank(pct=True)
# #### Technical Indicators
#
# <img src="images/TA.jpg" width="400">
#
# Those with a taste for technical analysis may find it difficult to let go of your favored TA techniques.
#
# While this is not _my_ favored approach, you'll have no problem engineering features using these methods. From my cursory googling, it looked as though the [open source package `ta`](https://github.com/bukosabino/ta) would be a good place to start.
#
# Very new and only one contributor but it looks fairly comprehensive and well documented. If you find that it's missing your favorite indicators, consider contributing to the package. If you know of better such packages, please post in the comments below...
#
# As an example:
#
import ta # technical analysis library: https://technical-analysis-library-in-python.readthedocs.io/en/latest/
# money flow index (14 day)
features['f13'] = ta.momentum.money_flow_index(prices.high,
prices.low, prices.close, \
prices.volume, n=14, fillna=False)
# mean-centered money flow index
features['f14'] = features['f13'] - features['f13']\
.rolling(200,min_periods=20).mean()
# ### Alternative Representations
# A bit different than transforms are "representations", i.e., other ways to represent continuous values. All of the transforms above returned continuous values rather than "labels", and that's often a good place to start - especally for early prototypes.
#
# However, you may want to represent the data in different ways, especially if using classification-based approaches or worried about the [curse of dimensionality](https://en.wikipedia.org/wiki/Curse_of_dimensionality) due to large numbers of features.
# #### Binning
# We can easily convert a continous variable to discrete "bins" (like 1 to 10). This loses information, of course, but sometimes loss of information is a good thing if you are removing more noise than signal.
#
# The below example shows volumes converted into ten equally sized buckets. In other words, we've converted a continuous variable into a discrete one.
#
# NOTE: this example is not applied in a rolling fashion, so it __does suffer from some data peeking__, which I've described as the original sin. At the moment, I'm failing in my efforts to implement it in a rolling way. I'd be grateful for code snippets if anyone knows how to do this offhand.
n_bins = 10
bin_fxn = lambda y: pd.qcut(y,q=n_bins,labels = range(1,n_bins+1))
features['f15'] = prices.volume.groupby(level='symbol').apply(bin_fxn)
# #### Signing
# Very simply, you may wish to convert continuous variables into positive or negative (1 or -1) values, depending on input. For instance, was volume increasing or decreasing today?
#
features['f16'] = features['f05'].apply(np.sign)
# #### Plus-Minus
# You may be interested in how many days in a row a value has increased (or decreased). Below is a simple pattern to do just that - it calculates the number of up-days minus the number of down days.
#
plus_minus_fxn = lambda x: x.rolling(20).sum()
features['f17'] = features['f16'].groupby(level='symbol').apply(plus_minus_fxn)
# #### One-Hot Encoding
# Possibly the most common alternative representation is "one-hot encoding" where a categorical variable is represented as a binary. For instance, month_of_year would be represented as twelve different columns, each of which was either 0 or 1. January would be [1,0,0,0,...0] etc...
#
# This is absolutely crucial in a few circumstances. The first is where there is false meaning in the "ordinality" of values. If we were looking to test the "santa claus effect" hypothesis, it wouldn't be helpful to use a month_of_year feature where January was "the least" and December was "the most".
#
# The second is in cases where we are representing events or "states". Does the word "lawsuit" appear within the 10-Q footnotes? Is the company in the blackout period for share buybacks?
#
# Finally, the particular machine learning algorithm (tree-based, neural networks) may find it easier to use binary representations than continuous or discrete ones.
#
# The below example creates twelve one-hot features, one for each month, and names them automatically
# +
month_of_year = prices.index.get_level_values(level='date').month
one_hot_frame = pd.DataFrame(pd.get_dummies(month_of_year))
one_hot_frame.index = prices.index # Careful! This is forcing index values without usual pandas alignments!
# create column names
begin_num = int(features.columns[-1][-2:]) + 1 #first available feature
feat_names = ['f'+str(num) for num in list(range(begin_num,begin_num+12,1))]
# rename columns and merge
one_hot_frame.columns = feat_names
features = features.join(one_hot_frame)
print(features.iloc[:,-12:].tail())
# -
# With raw data series `month` transformed into 12 individual one-hot encoding columns, we can train a model which will learn whether the fact that it's July should materially affect our predictions, based on historical patterns.
#
# ### Summary
#
# Whew! We made it.
#
# From this post, you've seen a simple code pattern for creating any number of "features" from raw input data, as well as some suggestions for useful transformations and alternative representations.
#
# If we've done it right, this has resulted in many variations of similar features. Thus, before we start building models, we need a method of selecting only those features which provide greatest insight and robustness.
#
# I'll cover this in the next post: [Feature Selection](feature_selection.html)
# ### One last thing...
#
# If you've found this post useful, please follow [@data2alpha](https://twitter.com/data2alpha) on twitter and forward to a friend or colleague who may also find this topic interesting.
#
# Finally, take a minute to leave a comment below - either to discuss this post or to offer an idea for future posts. Thanks for reading!
| content/02_Feature_Engineering.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# cf. [`stackoverflow` How to Implement a Binary Tree?](https://stackoverflow.com/questions/2598437/how-to-implement-a-binary-tree)
# +
# binary tree
class Node:
def __init__(self,val):
self.l=None
self.r=None
self.v=val
class Tree:
def __init__(self):
self.root = None
def getRoot(self):
return self.root
def add(self, val):
if (self.root == None):
self.root = Node(val)
else:
self._add(val, self.root)
def _add(self, val, node):
if(val <node.v):
if (node.l != None):
self._add(val, node.l)
else:
node.l = Node(val)
else:
if(node.r !=None):
self._add(val, node.r)
else:
node.r = Node(val)
def find(self, val):
if(self.root != None):
return self._find(val, self.root)
else:
return None
def _find(self, val, node):
if (val== node.v):
return node
elif(val < node.v and node.l !=None):
self._find(val, node.l)
elif(val >node.v and node.r !=None):
self._find(val, node.r)
def deleteTree(self):
# garbage collector will do this for us
self.root =None
def printTree(self):
if (self.root != None):
self._printTree(self.root)
def _printTree(self, node):
if (node!= None):
self._printTree(node.l)
print str(node.v) + ' '
self._printTree(node.r)
# -
tree = Tree()
tree.add(3)
tree.add(4)
tree.add(0)
tree.add(8)
tree.add(2)
tree.printTree()
print (tree.find(3)).v
print tree.find(10)
tree.deleteTree()
tree.printTree()
# From [Rahul's](https://stackoverflow.com/users/4040998/rahul) answer, I got to this tutorial: [6.13. Search Tree Implementation](http://interactivepython.org/runestone/static/pythonds/Trees/SearchTreeImplementation.html)
#
# I went back to the beginning of [Problem Solving with Algorithms and Data Structures using Python](http://interactivepython.org/runestone/static/pythonds/index.html).
#
# I started here, with [6.4. List of Lists Representation](http://interactivepython.org/runestone/static/pythonds/Trees/ListofListsRepresentation.html)
myTree = ['a', # root
['b', # left subtree
['d', [], []],
['e', [], []] ],
['c', # right subtree
['f', [], []],
[]]]
myTree
# Nice features of list of lists approach:
# - stucture of a list representing subtree adheres to structre defined for a tree; structure itself is recursive!
# - is that it generalizes to a tree that has many subtrees. In case, where tree is more than a binary tree, another subtree is just another list.
print(myTree)
print('left subtree = ', myTree[1])
print('root = ', myTree[0])
print('right subtree = ', myTree[2])
def BinaryTree(r):
""" @name BinaryTree
@brief Construct list with root node and 2 empty sublists fo children
"""
return [r, [], []]
def insertLeft(root, newBranch):
""" @name insertLeft
"""
t = root.pop(1) # remove element indexed at i=1 and return it (in this case, to t)
if len(t) > 1: # should be another binary tree with root and 2 children
root.insert(1,[newBranch,t,[]]) #
else:
root.insert(1,[newBranch,[],[]])
return root
# Miscellaneous
child = []
print(len(child))
def insertRight(root,newBranch):
t = root.pop(2)
if len(t) > 1:
root.insert(2,[newBranch,[],t])
else:
root.insert(2,[newBranch,[],[]])
return root
# To round out this set of tree-making functions, write a couple of access functions for getting and setting the root value, as well as getting the left or right subtrees.
# +
def getRootVal(root):
return root[0]
def setRootVal(root, newVal):
root[0] = newVal
def getLeftChild(root):
return root[1]
def getRightChild(root):
return root[2]
# -
r = BinaryTree(3)
print(r)
insertLeft(r,4)
print(r)
insertLeft(r,5)
print(r)
insertRight(r,6)
print(r)
insertRight(r,7)
print(r)
l = getLeftChild(r)
print(l)
setRootVal(l,9)
print(r)
insertLeft(l,11)
print(r)
print(getRightChild(getRightChild(r)))
# ** Self Check **
#
# *Q-26*
x = BinaryTree('a')
insertLeft(x,'b')
insertRight(x, 'c')
insertRight(getRightChild(x),'d')
insertLeft(getRightChild(getRightChild(x)),'e')
print(x)
x = BinaryTree('a')
insertLeft(x, 'd')
insertLeft(x, 'b')
insertRight(x,'f')
insertRight(x,'c')
insertLeft( getRightChild(x), 'e')
print(x)
# ## Nodes and References, Sec. 6.5 of Problem Solving with Algorithms and Data Structures
# +
class BinaryTree:
def __init__(self, rootObj):
self.key = rootObj
self.leftChild = None
self.rightChild = None
def insertLeft(self, newNode):
if self.leftChild == None:
self.leftChild = BinaryTree(newNode)
else:
t = BinaryTree(newNode)
t.leftChild = self.leftChild
self.leftChild = t
def insertRight(self, newNode):
if self.rightChild == None:
self.rightChild = BinaryTree(newNode)
else:
t = BinaryTree(newNode)
t.rightChild = self.rightChild
self.rightChild = t
def getRightChild(self):
return self.rightChild
def getLeftChild(self):
return self.leftChild
def setRootVal(self,obj):
self.key = obj
def getRootVal(self):
return self.key
# -
r = BinaryTree('a')
print(r.getRootVal())
print(r.getLeftChild())
r.insertLeft('b')
print(r.getLeftChild())
print(r.getLeftChild().getRootVal())
r.insertRight('c')
# ## Parse Tree; 6.6 Parse Tree
#
# 1st. tokenize expression string (i.e. breakup each term in an expression string as a token so to create a list of tokens). The different kinds of tokens will say whether to create a new tree, finished an expression, leaf node, or if it'll have left and right child.
#
# Initially, start out with parse tree that consists of empty root node.
# +
from __future__ import print_function
class BinaryTree:
""" @name BinaryTree
@brief Recursive implementation of Binary Tree, using links and nodes approach.
@note Modified to allow for trees to be constructed from other trees rather than
always creating a new tree in the insertLeft or insertRight """
def __init__(self,rootObj):
self.key = rootObj
self.leftChild = None
self.rightChild = None
def insertLeft(self,newNode):
if isinstance(newNode, BinaryTree):
t = newNode
else:
t= BinaryTree(newNode)
if self.leftChild is not None:
t.left = self.leftChild
self.leftChild = t
def insertRight(self, newNode):
if isinstance(newNode, BinaryTree):
t = newNode
else:
t = BinaryTree(newNode)
if self.rightChild is not None:
t.right = self.rightChild
self.rightChild = t
def isLeaf(self):
return ((not self.leftChild) and (not self.rightChild))
def getRightChild(self):
return self.rightChild
def getLeftChild(self):
return self.leftChild
def setRootVal(self,obj):
self.key = obj
def getRootVal(self,):
return self.key
def inorder(self):
if self.leftChild:
self.leftChild.inorder()
print(self.key)
if self.rightChild:
self.rightChild.inorder()
def postorder(self):
if self.leftChild:
self.leftChild.postorder()
if self.rightChild:
self.rightChild.postorder()
print(self.key)
def preorder(self):
print(self.key)
if self.leftChild:
self.leftChild.preorder()
if self.rightChild:
self.rightChild.preorder()
def printexp(self):
if self.leftChild:
print('(', end=' ')
self.leftChild.printexp()
print(self.key, end=' ')
if self.rightChild:
self.rightChild.printexp()
print(')', end=' ')
def postordereval(self):
opers = {'+':operator.add, '-':operator.sub, '*':operator.mul, '/':operator.truediv}
res1 = None
res2 = None
if self.leftChild:
res1 = self.leftChild.postordereval() #// \label{peleft}
if self.rightChild:
res2 = self.rightChild.postoreval() # // \label{peright}
if res1 and res2:
return opers[self.key](res1,res2) #// \label{peeval}
else:
return self.key
# +
# stack.py,
# cf. https://github.com/bnmnetp/pythonds/blob/master/basic/stack.py
class Stack:
def __init__(self):
self.items = []
def isEmpty(self):
return self.items == []
def push(self, item):
self.items.append(item)
def pop(self):
return self.items.pop()
def peek(self):
return self.items[len(self.items)-1]
def size(self):
return len(self.items)
# -
def buildParseTree(fpexp):
""" @name buildParseTree
@param fpexp - string with a mathematical expression (assumed to be correct)
"""
fplist = fpexp.split()
pStack = Stack()
eTree = BinaryTree('')
pStack.push(eTree)
currentTree = eTree
for i in fplist:
if i == '(': # create new node as left child of root; make current node this new child
currentTree.insertLeft('')
pStack.push(currentTree)
currentTree=currentTree.getLeftChild()
elif i not in ['+','-','*','/',')']: # case of a number; set root value of current node to number and go back up the tree to the parent
currentTree.setRootVal(int(i))
parent = pStack.pop()
currentTree=parent
elif i in ['+', '-','*','/']: # set root value of current node to this operator and add new node as right child; new right child becomes current node
currentTree.setRootVal(i)
currentTree.insertRight('')
pStack.push(currentTree)
currentTree = currentTree.getRightChild()
elif i ==')': # make parent of operator current node
currentTree= pStack.pop()
else:
raise ValueError
return eTree
pt = buildParseTree("( ( 10 + 5 ) * 3 )")
pt.postorder()
# As we have done with past recursive algorithms, we'll begin the design for the recursive evaluation function by identifying the base case.
#
# A natural base case for recursive algorithms that operate on trees is to check for a leaf node.
# +
# https://docs.python.org/2/library/operator.html
# Standard operators as functions
import operator
# operator.add(a,b) return a+b
operator.add
# operator.div(a,b) return a/b when __future__.division is not in effect. This is also known as "classic" division
operator.div
# operator.truediv(a,b) return a/b when __future__.division is in effect. This is also known as "true" division.
operator.truediv
# -
def evaluate(parseTree):
opers = {'+':operator.add, '-':operator.sub, '*':operator.mul, '/':operator.truediv}
leftC = parseTree.getLeftChild()
rightC = parseTree.getRightChild()
if leftC and rightC: # recursive step that moves function toward base case
fn = opers[parseTree.getRootVal()]
return fn(evaluate(leftC),evaluate(rightC)) # recursive call effectively moves us down the tree, toward a leaf node
else: # base case, check for a leaf node; in parse tree, leaf nodes will always be operands
return parseTree.getRootVal()
evaluate(pt)
# ## Tree Traversals, 6.7 Tree Traversals
# +
def preorder(tree):
if tree:
print(tree.getRootVal())
preorder(tree.getLeftChild())
preorder(tree.getRightChild())
# -
preorder(r)
def postorder(tree):
if tree != None:
postorder(tree.getLeftChild())
postorder(tree.getRightChild())
print(tree.getRootVal())
def postordereval(tree):
opers = {'+':operator.add, '-':operator.sub, '*':operator.mul, '/':operator.truediv}
res1=None
res2=None
if tree:
res1 = postordereval(tree.getLeftChild())
res2=postordereval(tree.getRightChild())
if res1 and res2:
return opers[tree.getRootVal()](res1,res2)
else:
return tree.getRootVal()
def inorder(tree):
if tree != None:
inorder(tree.getLeftChild())
print(tree.getRootVal())
inorder(tree.getRightChild())
def printexp(tree):
eVal = ""
if tree:
sVal = '(' + printexp(tree.getLeftChild())
sVal = sVal + str(Tree.getRootVal())
sVal = sVal + printexp(tree.getRightChild())+')'
return sVal
# +
# delMin() returns item with minimum key value, removing item from the heap
# -
# ### Binary Heap `binheap.py`
#
# cf. [` pythonds/trees/binheap.py`](https://github.com/bnmnetp/pythonds/blob/master/trees/binheap.py)
#
# +
# this heap takes key value pairs, we'll assume that keys are integers
class BinHeap:
def __init__(self):
self.heapList = [0] # 1-based counting
self.currentSize = 0
def buildHeap(self,alist):
""" @fn buildHeap(self,alist)
&brief builds a new heap from a list of keys
"""
i = len(alist) // 2 # integer division is //
self.currentSize = len(alist)
self.heapList = [0] + alist[:]
print(len(self.heapList),i)
while (i>0):
print(self.heapList, i )
self.percDown(i)
i=i-1
print(self.heapList,i)
def percDown(self,i):
while(i*2) <= self.currentSize: # check with i*2 because we need to know if i has children or not
mc = self.minChild(i)
if self.heapList[i] > self.heapList[mc]: # violates heap order property that parent is less than child in value
tmp = self.heapList[i]
self.heapList[i] = self.heapList[mc] # do the switch between them
self.heapList[mc] = tmp
i = mc # check now the children of the child of i
def minChild(self,i):
""" @fn minChild
@brief finding the minimum amongst the children of parent ith node
"""
if i * 2 + 1 > self.currentSize:
return i * 2
else:
if self.heapList[i*2] < self.heapList[i * 2 + 1]:
return i * 2
else:
return i * 2 + 1
def percUp(self,i):
while i // 2 > 0: # i // 2 is integer division (node of parent of ith node)
if self.heapList[i] < self.heapList[i//2]: # violates heap order property
tmp = self.heapList[i//2] # parent is now tmp
self.heapList[i//2]= self.heapList[i] # percolate up the ith node to where its parent was
self.heapList[i] = tmp # put parent in where i was
i = i // 2 # keep doing this check for heap order property violations
def insert(self,k):
""" @fn insert
@brief adds new item to the heap
"""
self.heapList.append(k)
self.currentSize = self.currentSize + 1
self.percUp(self.currentSize) # check for heap order property violations
def delMin(self):
""" @fn delMin
@brief returns item with minimum key value, leaving item in the heap
@note Since heap order property requires root of tree be smallest item in tree, finding the minimum
item is easy. Hard part is restoring full compliance with heap structure and heap order property.
"""
retval = self.heapList[1]
self.heapList[1] = self.heapList[self.currentSize]
self.currentSize = self.currentSize - 1 # take last item in list and move it to root position
self.heapList.pop()
self.percDown(1)
return retval
def isEmpty(self):
""" @fn isEmpty
@brief returns true if the heap is empty, false otherwise
"""
if currentSize == 0:
return True
else:
return False
# +
print( 5 // 2 )
bh = BinHeap()
print(bh.heapList)
bh.insert(5)
print(bh.heapList)
bh.insert(7)
print(bh.heapList)
bh.insert(3)
print(bh.heapList)
bh.insert(11)
print(bh.heapList)
# -
print(bh.delMin())
print(bh.delMin())
print(bh.delMin())
print(bh.delMin())
# # Binary Heap Implementation; 6.10. Binary Heap Implmentation
# ## The Structure Property 6.10.1 The Structure Property
#
# In order to make our heap work efficiently, take advantage of the logarithmic nature of the binary tree, to represent our heap. In order to guarantee logarithmic performance, keep tree balanced.
#
# A balanced binary tree has roughly the same number of nodes in the left and right subtrees of the root.
#
# In our heap implementation, we keep the tree balanced by creating a **complete binary tree**. A complete binary tree is a tree in which each level has all of its nodes. Exception to this is the bottom level of the tree, which we fill in from left to right.
#
# Another interesting property of a complete tree is that we can represent it using a single list.
#
# cf. https://courses.cs.washington.edu/courses/cse373/06sp/handouts/lecture10.pdf
#
# ** heap order property** -
# - every node is less than or equal to its children
# - or every node is greater than or equal to its children
#
# and so the root node is always the smallest node, or the largest, depending on the heap order.
#
# With heap order property, each *path* is sorted, but the subtrees are not sorted (necessarily) relative to each other.
#
# ### Complexity
#
# *Assertion*: build heap in $O(n)$.
#
# Key to understanding the proof is you can build the heap in $O(n)$ because remember, that the $\log{n}$ factor is derived from height of the tree. For most of the work in `buildHeap`, tree is shorter than $\log{n}$.
#
# Using fact that you can build a heap from a list in $O(n)$ time, you'll construct sorting algorithm that uses heap and sorts a list in $O(n\log{n})$ as exercise at the end.
# cf. [Why is Binary Heap Preferred over BST for Priority Queue?](http://www.geeksforgeeks.org/why-is-binary-heap-preferred-over-bst-for-priority-queue/)
#
# Priority Queue wants
# 1. Get top priority element (get minimum or maximum)
# 2. insert an element
# 3. remove top priority element
# 4. decrease key
#
# binary heap's time complexities:
# 1. O(1)
# 2. O($\log{n}$)
# 3. O($\log{n}$)
# 4. O($\log{n}$)
#
# cf. [Time Complexity of building a heap](http://www.geeksforgeeks.org/time-complexity-of-building-a-heap/)
#
# build heap in $O(n)$.
#
# It's not $O(n\log{n})$ even though heapify costs $O(\log{n})$
#
# observe heapify run time depends on height of tree $h$, (which is equal to $\log{n}$, and heights of most subtrees are small.
#
# $T(n) = \sum_{h=0}^{\log{n}} \frac{n}{2^{h+1} }$
#
#
# # Binary Search Trees, 6.11 Binary Search Trees, 6.13. Search Tree Implementation
#
# From wikipedia, the binary search tree property states that the key in each node must be greater than or equal to any key stored in the left sub-tree, and less than or equal to any key stored in the right sub-tree.
#
# From [6.13. Search Tree Implementation](http://interactivepython.org/runestone/static/pythonds/Trees/SearchTreeImplementation.html), binary search tree property (**bst property**), that keys that are less than the parent are found in the left subtree, and keys that are greater than the parent are found in the right subtree.
#
# Because we must be able to create and work with a binary search tree that's empty, our implementation will use 2 classes.
#
# The `BinarySearchTree` has reference to `TreeNode` that is the root of the binary search tree. "In most cases, the external methods defined in outer class simply check to see if tree is empty". If there are nodes in the tree, the request is just passed on to a private method defined in the `BinarySearchTree` class that takes the root as a parameter.
#
# In the case where the tree is empty or we want to delete the key at the root of the tree, we must take special action.
#
#
import os, sys
print( os.getcwd() )
print(os.listdir( os.getcwd() ))
sys.path.insert(0, os.getcwd() + '/trees_arbres/')
from bst import BinarySearchTree, TreeNode
# +
mytree = BinarySearchTree()
print( mytree.length() )
print( mytree.root )
print( mytree.size)
mytree[3] = "red"
print( mytree.length() )
print( mytree.root )
print( mytree.root.key )
print( mytree.root.payload )
print( mytree.root.leftChild )
print( mytree.root.rightChild )
print( mytree.root.parent )
print( mytree.root.balanceFactor )
print( mytree.size)
# -
mytree[4] = "blue"
print( mytree.length() )
print( mytree.root )
print( mytree.root.key )
print( mytree.root.payload )
print( mytree.root.leftChild )
print( mytree.root.rightChild )
print( mytree.root.parent )
print( mytree.root.balanceFactor )
print( mytree.size)
mytree[6] = "yellow"
print( mytree.length() )
print( mytree.root )
print( mytree.root.key )
print( mytree.root.payload )
print( mytree.root.leftChild )
print( mytree.root.rightChild )
print( mytree.root.parent )
print( mytree.root.balanceFactor )
print( mytree.size)
mytree[2] = "at"
print( mytree.length() )
print( mytree.root )
print( mytree.root.key )
print( mytree.root.payload )
print( mytree.root.leftChild )
print( mytree.root.rightChild )
print( mytree.root.parent )
print( mytree.root.balanceFactor )
print( mytree.size)
print(mytree[6])
print(mytree[2])
# cf. [6.14. Search Tree Analysis](http://interactivepython.org/runestone/static/pythonds/Trees/SearchTreeAnalysis.html)
#
# $n = $ number of nodes in the tree
# $h = $ height of tree is going to be, $h=\log_2{n}$
# $2^h = n$
# # AVL Tree, Adelson-Velskii, Landis, cf. 6.15. Balanced Binary Search Trees
# automatically makes sure tree remains balanced.
#
# For each node,
# $$
# \text{balanceFactor} = \text{height}(\text{leftSubTree}) - \text{height}(\text{rightSubTree}) = h(\text{leftSubTree}) - h(\text{rightSubTree})
# $$
#
# For purposes of implementing an AVL tree, and gaining the benefit of having a balanced tree, we'll define a tree to be in balance if the balance factor is $-1,0,1$.
#
#
from bst import BinarySearchTree, TreeNode
# +
class AVLTree(BinarySearchTree):
"""
@brief Implement a binary search tree with the following interface
@details functions:
__contains__(y) <==> y in x
__getitem__(y) <==> x[y]
__init__()
__len__() <==> len(x)
__setitem__(k,v) <==> x[k] = v
clear()
get(k)
has_key(k)
items()
keys()
values()
put(k,v)
"""
def _put(self,key,val,currentNode):
if key < currentNode.key:
if currentNode.hasLeftChild():
self._put(key,val,currentNode.leftChild)
else: # current Node doesn't have a left child
currentNode.leftChild = TreeNode(key,val,parent=currentNode)
self.updateBalance(currentNode.leftChild)
else:
if currentNode.hasRightChild():
self._put(key,val,currentNode.rightChild)
else:
currentNode.rightChild = TreeNode(key,val,parent=currentNode)
self.updateBalance(currentNode.rightChild)
def updateBalance(self,node):
if node.balanceFactor > 1 or node.balanceFactor < -1:
self.rebalance(node)
return
if node.parent != None:
if node.isLeftChild():
node.parent.balanceFactor += 1
elif node.isRightChild():
node.parent.balanceFactor -= 1
if node.parent.balanceFactor != 0: # then algorithm continues to work its way up tree toward root
self.updateBalance(node.parent)
def rebalance(self,node):
if node.balanceFactor <0:
if node.rightChild.balanceFactor >0:
# Do an LR Rotation
self.rotateRight(node.rightChild)
self.rotateLeft(node)
else:
# single left
self.rotateLeft(node)
elif node.balanceFactor > 0:
if node.leftChild.balanceFactor <0:
# Do a RL Rotation
self.rotateLeft(node.leftChild)
self.rotateRight(node)
else:
# single right
self.rotateRight(node)
def rotateLeft(self,rotRoot): # left rotation around the subtree rooted at rotRoot
""" @fn rotateLeft
@brief left rotation around subtree rooted at rotRoot
"""
newRoot = rotRoot.rightChild # promote right child to be root of the subtree
rotRoot.rightChild = newRoot.leftChild # replace right child of old root with left child of new
# next step is to adjust the parent pointers of the 2 nodes
if newRoot.leftChild != None: # if new root B already had left child, then make it right child of new left child A
newRoot.leftChild.parent = rotRoot
newRoot.parent = rotRoot.parent
if rotRoot.isRoot():
self.root = newRoot
else:
if rotRoot.isLeftChild():
rotRoot.parent.leftChild = newRoot
else:
rotRoot.parent.rightChild = newRoot
newRoot.leftChild = rotRoot # move old root to be leftchild of new root
rotRoot.parent = newRoot
rotRoot.balanceFactor = rotRoot.balanceFactor + 1 - min(newRoot.balanceFactor,0)
newRoot.balanceFactor = newRoot.balanceFactor + 1 - max(rotRoot.balanceFactor, 0)
def rotateRight(self,rotRoot):
newRoot = rotRoot.leftChild # promote left child of rotRoot to be root of the subtree
rotRoot.leftChild = newRoot.rightChild # replace left child of old root with right child of new
# next step is to adjust the parent pointers of the 2 nodes
if newRoot.rightChild != None: # then make it the left child of new right child
newRoot.rightChild.parent = rotRoot
newRoot.parent = rotRoot.parent
if rotRoot.isRoot():
self.root = newRoot
else:
if rotRoot.isRightChild():
rotRoot.parent.rightChild = newRoot
else:
rotRoot.parent.leftChild = newRoot
newRoot.rightChild = rotRoot # move old root to be right child of new root
rotRoot.parent = newRoot
rotRoot.balanceFactor = rotRoot.balanceFactor - 1 - max(newRoot.balanceFactor,0) #
newRoot.balanceFactor = newRoot.balanceFactor - 1 + min(rotRoot.balanceFactor,0)
# -
import os, sys
print( os.getcwd() )
print(os.listdir( os.getcwd() ))
# # Binary Tree
#
# cf. [Binary Tree | Set 1 (Introduction)](http://www.geeksforgeeks.org/binary-tree-set-1-introduction/)
sys.path.insert(0, os.getcwd() + '/trees_arbres/')
from binaryTree import binaryTree
root = binaryTree(1)
root.insertl(2)
root.insertr(3)
root.insertl(4)
root.l.insertr(5)
root.inorder()
root.preorder()
root.postorder()
root=binaryTree(1)
root.insertl(2)
root.insertr(3)
root.l.insertl(4)
root.l.insertr(5)
root.inorder()
root.preorder()
root.postorder()
root=binaryTree(1)
root.l=binaryTree(2)
root.r=binaryTree(3)
root.l.l=binaryTree(4)
root.l.r =binaryTree(5)
root.inorder()
root.preorder()
root.postorder()
# ### Time Complexity of Binary Search , O(n)
#
# $O(n)$
#
# Complexity function $T(n)$ - for all problem where tree traversal is involved, can be defined as:
# $T(n) = T(k) + T(n-k+1) + c$
# where $k$ is number of nodes on 1 side of root, and $n-k-1$ on the other side
#
# Auxiliary Space: If we don't consider size of stack for function calls, then $O(1)$, otherwise $O(n)$.
| crack/TreesGraphs.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Scenario A - Noise Level Variation (multiple runs for init mode)
#
# In this scenario the noise level on a generated dataset is varied in three steps: low/medium/high,
# the rest of the parameters in the dataset is kept constant.
#
# The model used in the inference of the parameters is formulated as follows:
#
# \begin{equation}
# \large y = f(x) = \sum\limits_{m=1}^M \big[A_m \cdot e^{-\frac{(x-\mu_m)^2}{2\cdot\sigma_m^2}}\big] + \epsilon
# \end{equation}
#
#
# This file runs a series of runs for a single sampler init mode. It does not store the traces or plots, only the summary statistics are stored.
# +
# %matplotlib inline
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import pymc3 as pm
import arviz as az
#az.style.use('arviz-darkgrid')
print('Running on PyMC3 v{}'.format(pm.__version__))
# -
# ## Import local modules
import datetime
import os
import sys
sys.path.append('../../modules')
import datagen as dg
import models as mdl
import results as res
import figures as fig
import settings as cnf
# ## Local configuration
# +
# output for results and images
out_path = './output_mruns_lognormal_adapt'
file_basename = out_path + '/scenario_noise'
# if dir does not exist, create it
if not os.path.exists(out_path):
os.makedirs(out_path)
conf = {}
# scenario name
conf['scenario'] = 'noise variation'
# initialization method for sampler ('jitter+adapt_diag'/'advi+adapt_diag'/'adapt_diag')
conf['init_mode'] = 'adapt_diag'
# probabilistic model (priors)
conf['prior_model'] = 'lognormal'
# provide peak positions to the model as testvalues ('yes'/'no')
conf['peak_info'] = 'yes'
# absolute peak shift (e.g. 2%(4), 5%(10) or 10%(20) of X-min.)
conf['peak_shift'] = 0.0
# dataset directory
conf['dataset_dir'] = './input_datasets'
# number of runs over the dataset
conf['nruns'] = 1
# number of cores to run
conf['ncores'] = 2
# number of samples per chain
conf['nsamples'] = 2000
# -
conf
# ## Save configuration
cnf.save(out_path, conf)
# # Generate data and plot
# +
# list of wavelengths (x-values)
xval = [i for i in range(200, 400, 2)]
ldata = []
lpeaks = []
# number of spectra per noise level
nsets = 10
# noise level is 1%, 2% and 5% of the minimal signal amplitude
noise_levels = [0.05, 0.10, 0.25]
# total number of datasets
tsets = nsets * len(noise_levels)
# load pre-generated datasets from disk
ldata, lpeaks, _ = dg.data_load(tsets, conf['dataset_dir'])
# add peakshift
lpeaks = dg.add_peakshift(lpeaks, conf['peak_shift'])
# +
# plot datasets
#fig.plot_datasets(ldata, lpeaks, dims=(int(tsets/2),2), figure_size=(12,int(tsets*(1.8))),
# savefig='yes', fname=file_basename)
# -
# # Initialize models and run inference
# +
# convert pandas data to numpy arrays
x_val = np.array(xval, dtype='float32')
# store dataset y-values in list
cols = ldata[0].columns
y_val = [ldata[i][cols].values for i in range(len(ldata))]
# +
# initialize models and run inference
models = []
traces = []
for r in range(conf['nruns']):
print("running loop {0}/{1} over datasets".format(r+1,conf['nruns']))
for i in range(len(ldata)):
if conf['peak_info'] == 'yes':
plist = lpeaks[i].flatten()
plist.sort()
model_g = mdl.model_pvoigt(xvalues=x_val, observations=y_val[i], npeaks=3,
mu_peaks=plist, pmodel=conf['prior_model'])
else:
model_g = mdl.model_pvoigt(xvalues=x_val, observations=y_val[i], npeaks=3,
pmodel=conf['prior_model'])
models.append(model_g)
with model_g:
print("({0}:{1}) running inference on dataset #{2}/{3}".format(r+1,conf['nruns'],i+1,len(ldata)))
trace_g = pm.sample(conf['nsamples'], init=conf['init_mode'], cores=conf['ncores'])
traces.append(trace_g)
# -
# # Model visualization
pm.model_to_graphviz(models[0])
# save model figure as image
img = pm.model_to_graphviz(models[0])
img.render(filename=file_basename + '_model', format='png');
# # Collect results and save
# posterior predictive traces
ppc = [pm.sample_posterior_predictive(traces[i], samples=500, model=models[i]) for i in range(len(traces))]
# +
varnames = ['amp', 'mu', 'sigma', 'epsilon']
nruns = conf['nruns']
# total dataset y-values, noise and run number list
ly_val = [val for run in range(nruns) for idx, val in enumerate(y_val)]
lnoise = [nl for run in range(nruns) for nl in noise_levels for i in range(nsets)]
lruns = ['{0}'.format(run+1) for run in range(nruns) for i in range(tsets)]
# collect the results and display
df = res.get_results_summary(varnames, traces, ppc, ly_val, epsilon_real=lnoise, runlist=lruns)
df
# -
# save results to .csv
df.to_csv(file_basename + '.csv', index=False)
cnf.close(out_path)
| code/scenarios/scenario_a/scenario_noise_mruns.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pickle
import numpy as np
import matplotlib.pyplot as plt
from os import listdir
from os.path import isfile, join
onlyfiles = [f for f in listdir('./data') if isfile(join('./data', f))]
# Index in farfield arrays for 8 degrees
# 8 degrees from normal = pi/2 - 0.1396
# What fraction of pi :
ind = np.radians(90-8) / np.pi * 1000
ind
90 - np.degrees(np.angle(np.cos(np.pi*(ind/1000)) + 1j*np.sin(np.pi*(ind/1000))))
# So pick 456
# +
periods = []
FFs = []
S11s = []
wav_0 = []
wav_1 = []
farfield_powers = []
Prs = []
Pnorms = []
for file in onlyfiles:
dbfile = open('./data/' + file, 'rb')
db = pickle.load(dbfile)
# Parameters
params = db['params']
periods.append(params['a'])
FFs.append(params['FF'])
# Waveguide port
res_waveguide = db['res_waveguide']
wav_0.append(res_waveguide.alpha[0,0,0])
wav_1.append(res_waveguide.alpha[0,0,1])
S11s.append(np.abs(res_waveguide.alpha[0,0,1])**2/np.abs(res_waveguide.alpha[0,0,0])**2)
# Far field at 8 degrees
farfield_powers = db['farfield_power']
Ex=farfield_powers[:,0]
Ey=farfield_powers[:,1]
Ez=farfield_powers[:,2]
Hx=farfield_powers[:,3]
Hy=farfield_powers[:,4]
Hz=farfield_powers[:,5]
Ex=np.conj(Ex)
Ey=np.conj(Ey)
Ez=np.conj(Ez)
Px=np.real(np.multiply(Ey,Hz)-np.multiply(Ez,Hy))
Py=np.real(np.multiply(Ez,Hx)-np.multiply(Ex,Hz))
Pz=np.real(np.multiply(Ex,Hy)-np.multiply(Ey,Hx))
Pr=np.sqrt(np.square(Px)+np.square(Py))
Pnorm = Pr/np.max(Pr)
Prs.append(Pr[int(ind)])
Pnorms.append(Pnorm[int(ind)])
dbfile.close()
# +
import pandas as pd
dict_arr = {'period': periods,
'FF': FFs,
'S11': S11s,
'wav_0':wav_0,
'wav_1':wav_1,
'Prs':Prs,
'Pnorms':Pnorms
}
df = pd.DataFrame(dict_arr)
df
# +
def neff(FF):
return 1.93*FF + (1-FF)
def angle(a, neff):
return np.degrees(neff - np.arcsin(0.635/a))
# -
np.max(np.abs(df['Prs'].to_numpy()))
df.loc[df['Prs'] == np.max(df['Prs'].to_numpy())]
# +
file = '06082021_05:55:59_a_0.8400_FF_0.7500_theta_0.0000_x_0.0000_source_1.0000_.pickle'
dbfile = open('./data/' + file, 'rb')
db = pickle.load(dbfile)
dbfile.close()
farfield_power = db['farfield_power']
farfield_angles = db['farfield_angles']
Ex=farfield_power[:,0]
Ey=farfield_power[:,1]
Ez=farfield_power[:,2]
Hx=farfield_power[:,3]
Hy=farfield_power[:,4]
Hz=farfield_power[:,5]
Ex=np.conj(Ex)
Ey=np.conj(Ey)
Ez=np.conj(Ez)
Px=np.real(np.multiply(Ey,Hz)-np.multiply(Ez,Hy))
Py=np.real(np.multiply(Ez,Hx)-np.multiply(Ex,Hz))
Pz=np.real(np.multiply(Ex,Hy)-np.multiply(Ey,Hx))
Pr=np.sqrt(np.square(Px)+np.square(Py))
Pnorm = Pr/np.max(Pr)
fig, ax = plt.subplots(subplot_kw={'projection': 'polar'})
ax.plot(farfield_angles, Pnorm)
ax.set_rmax(1)
ax.set_rticks([0.25, 0.5, 0.75, 1]) # Less radial ticks
ax.set_rlabel_position(-22.5) # Move radial labels away from plotted line
ax.grid(True)
# ax.set_title("A line plot on a polar axis", va='bottom')
plt.show()
# -
np.max(np.abs(df['Pnorms'].to_numpy()))
df.loc[df['Pnorms'] == np.max(df['Pnorms'].to_numpy())]
# +
file = '06082021_03:48:05_a_0.4200_FF_0.7500_theta_0.0000_x_0.0000_source_1.0000_.pickle'
dbfile = open('./data/' + file, 'rb')
db = pickle.load(dbfile)
dbfile.close()
farfield_power = db['farfield_power']
farfield_angles = db['farfield_angles']
Ex=farfield_power[:,0]
Ey=farfield_power[:,1]
Ez=farfield_power[:,2]
Hx=farfield_power[:,3]
Hy=farfield_power[:,4]
Hz=farfield_power[:,5]
Ex=np.conj(Ex)
Ey=np.conj(Ey)
Ez=np.conj(Ez)
Px=np.real(np.multiply(Ey,Hz)-np.multiply(Ez,Hy))
Py=np.real(np.multiply(Ez,Hx)-np.multiply(Ex,Hz))
Pz=np.real(np.multiply(Ex,Hy)-np.multiply(Ey,Hx))
Pr=np.sqrt(np.square(Px)+np.square(Py))
Pnorm = Pr/np.max(Pr)
fig, ax = plt.subplots(subplot_kw={'projection': 'polar'})
ax.plot(farfield_angles, Pnorm)
ax.set_rmax(1)
ax.set_rticks([0.25, 0.5, 0.75, 1]) # Less radial ticks
ax.set_rlabel_position(-22.5) # Move radial labels away from plotted line
ax.grid(True)
# ax.set_title("A line plot on a polar axis", va='bottom')
plt.show()
# +
resolution = 32 # pixels/unit length (1 um)
hSiN = 0.44
hSiO2 = 3.2
hSi = 1
hair = 4
dgrat = 0.42 * 0.75
dgap = 0.42 * (1-0.75)
a = dgrat + dgap
N = 20
N = N+1
dtaper = 12
dbuffer = 0.5
dpml = 1
# Fiber parameters, from SMF-633-4/125-1-L or PMF-633-4/125-0.25-L
fiber_core = 4
fiber_clad = 120
fiber_angle = 8
fiber_angle = np.radians(fiber_angle)
fiber_xposition = 2
fiber_air_gap = 1
hfiber = 3
haircore = 2
hfiber_geom = 100 # Some large number to make fiber extend into PML
# Index from 0.10 @ 633 nm numerical aperture
# cladding 635 nm real index, From NA, n=1.4535642400664652
nClad = 1.4535642400664652
Clad = mp.Medium(index=nClad)
# Pure fused silica core core 635 nm real index (will be SiO2 below)
# MEEP's computational cell is always centered at (0,0), but code has beginning of grating at (0,0)
sxy = 2*dpml + dtaper + a*N + 2*dbuffer # sx here
sz = 2*dbuffer + hSiO2 + hSiN + hair + hSi + 2*dpml # sy here
comp_origin_x = dpml + dbuffer + dtaper
# comp_origin_x = 0
meep_origin_x = sxy/2
x_offset = meep_origin_x - comp_origin_x
# x_offset = 0
comp_origin_y = dpml + hSi + hSiO2 + hSiN/2
# comp_origin_y = 0
meep_origin_y = sz/2
y_offset = meep_origin_y - comp_origin_y
# y_offset = 0
x_offset_vector = mp.Vector3(x_offset,0)
offset_vector = mp.Vector3(x_offset, y_offset)
# offset_vector = mp.Vector3(0,0,0)
# Si3N4 635 nm real index
nSiN = 2.0102
SiN = mp.Medium(index=nSiN)
# SiO2 635 nm real index
nSiO2 = 1.4569
SiO2 = mp.Medium(index=nSiO2)
# Si substrate 635 nm complex index, following https://meep.readthedocs.io/en/latest/Materials/#conductivity-and-complex
# eps = 15.044 + i*0.14910
Si = mp.Medium(epsilon=15.044, D_conductivity=2*math.pi*0.635*0.14910/15.044)
# We will do x-z plane simulation
cell_size = mp.Vector3(sxy,sz)
geometry = []
# Fiber (defined first to be overridden)
# Core
# fiber_offset = mp.Vector3(fiber_xposition + extrax, hSiN/2 + hair + haircore + extray) - offset_vector
geometry.append(mp.Block(material=Clad,
center=mp.Vector3(x=fiber_xposition) - offset_vector,
size=mp.Vector3(fiber_clad, hfiber_geom),
e1=mp.Vector3(x=1).rotate(mp.Vector3(z=1), -1*fiber_angle),
e2=mp.Vector3(y=1).rotate(mp.Vector3(z=1), -1*fiber_angle),
)
)
geometry.append(mp.Block(material=SiO2,
center=mp.Vector3(x=fiber_xposition) - offset_vector,
size=mp.Vector3(fiber_core, hfiber_geom),
e1=mp.Vector3(x=1).rotate(mp.Vector3(z=1), -1*fiber_angle),
e2=mp.Vector3(y=1).rotate(mp.Vector3(z=1), -1*fiber_angle),
)
)
# air
geometry.append(mp.Block(material=mp.air, center=mp.Vector3(0,haircore/2)-offset_vector, size=mp.Vector3(mp.inf,haircore)))
# waveguide
geometry.append(mp.Block(material=SiN, center=mp.Vector3(0,0)-offset_vector, size=mp.Vector3(mp.inf,hSiN)))
# grating etch
for n in range(0,N):
geometry.append(mp.Block(material=mp.air, center=mp.Vector3(n*a+dgap/2,0)-offset_vector, size=mp.Vector3(dgap,hSiN)))
geometry.append(mp.Block(material=mp.air, center=mp.Vector3(sxy-comp_origin_x-0.5*(dpml + dbuffer),0)-offset_vector, size=mp.Vector3(dpml + dbuffer,hSiN)))
# BOX
geometry.append(mp.Block(material=SiO2, center=mp.Vector3(0,-0.5*(hSiN + hSiO2))-offset_vector, size=mp.Vector3(mp.inf,hSiO2)))
# Substrate
geometry.append(mp.Block(material=Si, center=mp.Vector3(0,-0.5*(hSiN + hSi + dpml + dbuffer) - hSiO2)-offset_vector, size=mp.Vector3(mp.inf,hSi+dpml+dbuffer)))
# PMLs
boundary_layers = [ mp.PML(dpml) ]
# Source
# mode frequency
fcen = 1/0.635
waveguide_port_center = mp.Vector3(-1*dtaper,0)-offset_vector
waveguide_port_size = mp.Vector3(0,2*haircore-0.1)
fiber_port_center = mp.Vector3((0.5*sz-dpml+y_offset - 1)*np.sin(fiber_angle) + fiber_xposition, 0.5*sz-dpml+y_offset - 1)-offset_vector
fiber_port_size = mp.Vector3(sxy*3/5-2*dpml - 2,0)
# Waveguide source
# sources = [mp.EigenModeSource(src=mp.GaussianSource(fcen, fwidth=0.1*fcen),
# size=waveguide_port_size,
# center=waveguide_port_center,
# eig_band=1,
# direction=mp.X,
# eig_match_freq=True,
# eig_parity=mp.ODD_Z)]
# Fiber source
sources = [mp.EigenModeSource(src=mp.GaussianSource(fcen, fwidth=0.1*fcen),
size=fiber_port_size,
center=fiber_port_center,
eig_band=1,
direction=mp.NO_DIRECTION,
eig_kpoint=mp.Vector3(y=-1).rotate(mp.Vector3(z=1), -1*fiber_angle),
eig_match_freq=True,
eig_parity=mp.ODD_Z)]
#symmetries = [mp.Mirror(mp.Y,-1)]
symmetries = []
sim = mp.Simulation(resolution=resolution,
cell_size=cell_size,
boundary_layers=boundary_layers,
geometry=geometry,
#geometry_center=mp.Vector3(x_offset, y_offset),
sources=sources,
dimensions=2,
symmetries=symmetries,
eps_averaging=False)
# Ports
waveguide_monitor_port = mp.ModeRegion(center=waveguide_port_center+mp.Vector3(x=0.2), size=waveguide_port_size)
waveguide_monitor = sim.add_mode_monitor(fcen, 0, 1, waveguide_monitor_port, yee_grid=True)
fiber_monitor_port = mp.ModeRegion(center=fiber_port_center-mp.Vector3(y=0.2), size=fiber_port_size, direction=mp.NO_DIRECTION)
fiber_monitor = sim.add_mode_monitor(fcen, 0, 1, fiber_monitor_port)
# nearfield = sim.add_near2far(fcen, 0, 1, mp.Near2FarRegion(mp.Vector3(x_offset,0.5*sz-dpml+y_offset)-offset_vector, size=mp.Vector3(sxy-2*dpml,0)))
# -
# %%capture
sim.init_sim()
f = plt.figure(dpi=100)
sim.plot2D(ax=f.gca())
plt.show()
f = plt.figure(dpi=100)
animate = mp.Animate2D(sim,mp.Ez,f=f,normalize=True)
sim.run(mp.at_every(1,animate), until=100)
plt.close()
animate.to_jshtml(10)
res_waveguide = sim.get_eigenmode_coefficients(waveguide_monitor,
[1],
eig_parity=mp.ODD_Z,
direction=mp.X)
print(res_waveguide.alpha[0,0,0], res_waveguide.alpha[0,0,1])
kpoint = mp.Vector3(y=-1).rotate(mp.Vector3(z=1), -1*fiber_angle)
res_fiber = sim.get_eigenmode_coefficients(fiber_monitor,
[1],
direction=mp.NO_DIRECTION,
eig_parity=mp.ODD_Z,
kpoint_func=lambda f,n: kpoint,
)
np.abs(res_waveguide.alpha[0,0,1])**2 / np.abs(res_fiber.alpha[0,0,0])**2
res_fiber.alpha
# +
resolution = 32 # pixels/unit length (1 um)
hSiN = 0.44
hSiO2 = 3.2
hSi = 1
hair = 4
dgrat = 0.42 * 0.75
dgap = 0.42 * (1-0.75)
a = dgrat + dgap
N = 20
N = N+1
dtaper = 12
dbuffer = 0.5
dpml = 1
# Fiber parameters, from SMF-633-4/125-1-L or PMF-633-4/125-0.25-L
fiber_core = 4
fiber_clad = 120
fiber_angle = 8
fiber_angle = np.radians(fiber_angle)
fiber_xposition = 0
fiber_air_gap = 1
hfiber = 3
haircore = 2
hfiber_geom = 100 # Some large number to make fiber extend into PML
# Index from 0.10 @ 633 nm numerical aperture
# cladding 635 nm real index, From NA, n=1.4535642400664652
nClad = 1.4535642400664652
Clad = mp.Medium(index=nClad)
# Pure fused silica core core 635 nm real index (will be SiO2 below)
# MEEP's computational cell is always centered at (0,0), but code has beginning of grating at (0,0)
sxy = 2*dpml + dtaper + a*N + 2*dbuffer # sx here
sz = 2*dbuffer + hSiO2 + hSiN + hair + hSi + 2*dpml # sy here
# comp_origin_x = dpml + dbuffer + dtaper
comp_origin_x = 0
# meep_origin_x = sxy/2
# x_offset = meep_origin_x - comp_origin_x
x_offset = 0
# comp_origin_y = dpml + hSi + hSiO2 + hSiN/2
comp_origin_y = 0
# meep_origin_y = sz/2
# y_offset = meep_origin_y - comp_origin_y
y_offset = 0
# x_offset_vector = mp.Vector3(x_offset,0)
# offset_vector = mp.Vector3(x_offset, y_offset)
offset_vector = mp.Vector3(0,0,0)
# Si3N4 635 nm real index
nSiN = 2.0102
SiN = mp.Medium(index=nSiN)
# SiO2 635 nm real index
nSiO2 = 1.4569
SiO2 = mp.Medium(index=nSiO2)
# Si substrate 635 nm complex index, following https://meep.readthedocs.io/en/latest/Materials/#conductivity-and-complex
# eps = 15.044 + i*0.14910
Si = mp.Medium(epsilon=15.044, D_conductivity=2*math.pi*0.635*0.14910/15.044)
# We will do x-z plane simulation
cell_size = mp.Vector3(sxy,sz)
geometry = []
# Fiber (defined first to be overridden)
# Core
# fiber_offset = mp.Vector3(fiber_xposition + extrax, hSiN/2 + hair + haircore + extray) - offset_vector
geometry.append(mp.Block(material=Clad,
center=mp.Vector3(x=fiber_xposition) - offset_vector,
size=mp.Vector3(fiber_clad, hfiber_geom),
e1=mp.Vector3(x=1).rotate(mp.Vector3(z=1), -1*fiber_angle),
e2=mp.Vector3(y=1).rotate(mp.Vector3(z=1), -1*fiber_angle),
)
)
geometry.append(mp.Block(material=SiO2,
center=mp.Vector3(x=fiber_xposition) - offset_vector,
size=mp.Vector3(fiber_core, hfiber_geom),
e1=mp.Vector3(x=1).rotate(mp.Vector3(z=1), -1*fiber_angle),
e2=mp.Vector3(y=1).rotate(mp.Vector3(z=1), -1*fiber_angle),
)
)
# air
geometry.append(mp.Block(material=mp.air, center=mp.Vector3(0,haircore/2)-offset_vector, size=mp.Vector3(mp.inf,haircore)))
# waveguide
geometry.append(mp.Block(material=SiN, center=mp.Vector3(0,0)-offset_vector, size=mp.Vector3(mp.inf,hSiN)))
# grating etch
for n in range(0,N):
geometry.append(mp.Block(material=mp.air, center=mp.Vector3(n*a+dgap/2,0)-offset_vector, size=mp.Vector3(dgap,hSiN)))
geometry.append(mp.Block(material=mp.air, center=mp.Vector3(sxy-comp_origin_x-0.5*(dpml + dbuffer),0)-offset_vector, size=mp.Vector3(dpml + dbuffer,hSiN)))
# BOX
geometry.append(mp.Block(material=SiO2, center=mp.Vector3(0,-0.5*(hSiN + hSiO2))-offset_vector, size=mp.Vector3(mp.inf,hSiO2)))
# Substrate
geometry.append(mp.Block(material=Si, center=mp.Vector3(0,-0.5*(hSiN + hSi + dpml + dbuffer) - hSiO2)-offset_vector, size=mp.Vector3(mp.inf,hSi+dpml+dbuffer)))
# PMLs
boundary_layers = [ mp.PML(dpml) ]
# Source
# mode frequency
fcen = 1/0.635
waveguide_port_center = mp.Vector3(-1*dtaper,0)-offset_vector
waveguide_port_size = mp.Vector3(0,2*haircore-0.1)
fiber_port_center = mp.Vector3((0.5*sz-dpml+y_offset - 1)*np.sin(fiber_angle) + fiber_xposition, 0.5*sz-dpml+y_offset - 1)-offset_vector
fiber_port_size = mp.Vector3(sxy*3/5-2*dpml - 2,0)
# Waveguide source
sources = [mp.EigenModeSource(src=mp.GaussianSource(fcen, fwidth=0.1*fcen),
size=waveguide_port_size,
center=waveguide_port_center,
eig_band=1,
direction=mp.X,
eig_match_freq=True,
eig_parity=mp.ODD_Z)]
# Fiber source
# sources = [mp.EigenModeSource(src=mp.GaussianSource(fcen, fwidth=0.1*fcen),
# size=fiber_port_size,
# center=fiber_port_center,
# eig_band=1,
# direction=mp.NO_DIRECTION,
# eig_kpoint=mp.Vector3(y=-1).rotate(mp.Vector3(z=1), -1*fiber_angle),
# eig_match_freq=True,
# eig_parity=mp.ODD_Z)]
#symmetries = [mp.Mirror(mp.Y,-1)]
symmetries = []
sim = mp.Simulation(resolution=resolution,
cell_size=cell_size,
boundary_layers=boundary_layers,
geometry=geometry,
#geometry_center=mp.Vector3(x_offset, y_offset),
sources=sources,
dimensions=2,
symmetries=symmetries,
eps_averaging=False)
# Ports
waveguide_monitor_port = mp.ModeRegion(center=waveguide_port_center+mp.Vector3(x=0.2), size=waveguide_port_size)
waveguide_monitor = sim.add_mode_monitor(fcen, 0, 1, waveguide_monitor_port, yee_grid=True)
fiber_monitor_port = mp.ModeRegion(center=fiber_port_center-mp.Vector3(y=0.2), size=fiber_port_size, direction=mp.NO_DIRECTION)
fiber_monitor = sim.add_mode_monitor(fcen, 0, 1, fiber_monitor_port)
# nearfield = sim.add_near2far(fcen, 0, 1, mp.Near2FarRegion(mp.Vector3(x_offset,0.5*sz-dpml+y_offset)-offset_vector, size=mp.Vector3(sxy-2*dpml,0)))
# -
# %%capture
sim.init_sim()
f = plt.figure(dpi=100)
sim.plot2D(ax=f.gca())
plt.show()
f = plt.figure(dpi=100)
animate = mp.Animate2D(sim,mp.Ez,f=f,normalize=True)
sim.run(mp.at_every(1,animate), until=100)
plt.close()
635 / (neff - np.sin(8 * np.pi/180))
| grating_coupler_meep/farfield_monitor/analysis_8deg.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: trainingAI
# language: python
# name: trainingai
# ---
# # Introduction
#
# **What?** `__repr__` vs. `__str__` methods
#
#
# # What is the difference?
#
# - They both represent the object but the `__repr__` method tells you more.
# - So much more that you can also reconstruct the object based on that information.
# - Also Python interpreter sessions use `__repr__` to inspect objects.
# - `__str__` should have a readable output.
#
#
# # Example #1
# +
import numpy as np
class Tensor(object):
def __init__(self, data):
"""__init__ method
Given a list, tunrs it into an array
"""
self.data = np.array(data)
def __add__(self, other):
"""__add__ method
Add two tensor together
"""
return Tensor(self.data + other.data)
def __repr__(self):
print("calling __repr__")
"""__repr__ method
Returns the object representation in string format.
Retunrs and official string representation of the object,
which can be ised to construt the object again.
"""
return str(self.data.__repr__())
def __str__(self):
print("calling __str__")
"""
Returns the string representation of the object. This method is called
when print() or str() function is invoked on an object. Retunrs a
human-redeable string format.
"""
return str(self.data.__str__())
# -
x = Tensor([1,2,3,4,5])
x
repr(x)
str(x)
print(x)
# # References
#
# - https://www.journaldev.com/22460/python-str-repr-functions
#
#
| GitHub_MD_rendering/__repr__ vs. __str__.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [Root]
# language: python
# name: Python [Root]
# ---
# + nbpresent={"id": "f999d3ea-acb8-4aa6-a8ec-09ae1b22120f"}
from itertools import islice, izip
from server import *
hist = list(islice(market(), 0, 1000))
def bucket(tdelta, series):
t0, y = None, 0
for t, yy in series:
if not t0 or t0 + tdelta < t:
t0 = t
yield t0, y
y = 0
y = y + yy
def top(bids, asks):
for bid, ask in izip(bids, asks):
yield bid and bid[0], ask and ask[0]
def _plot_orders(hist):
# %matplotlib inline
import matplotlib.pyplot as plt
ts, pxs, spd = zip(*hist)
ts, sides, ords, sizes = zip(*list(orders(hist)))
ts = list(ts)
ts, bids, asks = zip(*order_book(zip(ts, sides, ords, sizes), dict()))
top_buy, top_sell = zip(*top(bids, asks))
fig, ax1 = plt.subplots(figsize = (15, 5))
ax1.plot(ts, pxs, 'm')
bids = map(lambda (idx, px): px - spd[idx] / 2, enumerate(pxs))
asks = map(lambda (idx, px): px + spd[idx] / 2, enumerate(pxs))
ax1.fill_between(ts, bids, asks, color = 'c', alpha = 0.2)
ax1.scatter(ts, ords, c = map(lambda x: 'b' if x == 'buy' else 'g', sides), alpha = 0.15)
ax1.fill_between(ts, map(lambda x: x and x[0] or 120, top_buy), map(lambda x: x and x[0] or 120, top_sell), color='r', alpha = 0.2)
ts, sizes = zip(*bucket(timedelta(seconds = 5), zip(ts, sizes)))
ax2 = ax1.twinx()
ax2.fill_between(ts, sizes, alpha = 0.1, color = 'darkblue')
ax1.axis([ts[0], ts[-1], None, None])
ax2.axis([ts[0], ts[-1], 0, 16000])
plt.show()
_plot_orders(hist)
# -
| Task 1/market_plot.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# ### Content Based Recommendation System
# +
import pandas as pd
df = pd.read_csv('./movies_metadata.csv')
# -
df
df['tagline'].fillna('')
df['description'] = df['overview'] + df['tagline']
# df['description'] = df['description'].fillna('')
df.shape
df.dropna(subset=['description'], inplace=True)
df['title'].drop_duplicates(inplace=True)
df.shape
df.reset_index()
# +
from sklearn.feature_extraction.text import TfidfVectorizer
tf = TfidfVectorizer(analyzer='word', ngram_range=(1, 3), min_df=0, stop_words='english')
tfidf_matrix = tf.fit_transform(df['description'])
print(tfidf_matrix)
# -
tfidf_matrix.shape
# +
from sklearn.metrics.pairwise import linear_kernel
cosine_similarities = linear_kernel(tfidf_matrix, tfidf_matrix)
# -
cosine_similarities.shape
cosine_similarities[0].shape
# df = df.reset_index()
titles = df['title']
indices = pd.Series(df.index, index=df['title'])
def recommend(title):
idx = indices[title]
sim_scores = list(enumerate(cosine_similarities[idx]))
sim_scores = sorted(sim_scores, key=lambda x: x[1], reverse=True)
sim_scores = sim_scores[1:31]
movie_indices = [i[0] for i in sim_scores]
return titles.iloc[movie_indices]
recommend('The Godfather').head(10)
recommend('The Dark Knight Rises').head(10)
| CourseContent/09-Recommendation.Systems/Week1/ContentBasedRecommendation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Pandas dataframe
import pandas as pd
from pandas import DataFrame
# Scikit-Learn
import sklearn
from sklearn import datasets, linear_model, metrics, tree
from sklearn.model_selection import train_test_split, LeaveOneOut, KFold, cross_validate, RandomizedSearchCV
from sklearn.metrics import r2_score, mean_squared_error, mean_absolute_error, accuracy_score
# Models
from sklearn.linear_model import LinearRegression, ElasticNet, Lasso, Ridge
from sklearn.ensemble import (BaggingRegressor, ExtraTreesRegressor, GradientBoostingRegressor,
RandomForestRegressor, AdaBoostRegressor)
from sklearn.svm import SVR
from sklearn.tree import DecisionTreeRegressor
import joblib
from matplotlib import pyplot as plt
import numpy as np
import os
from tqdm import tqdm
# -
df_diam = pd.read_csv("dataset_scaled_diam.csv")
#Checks the column names, and ensures that they do not have any leading or trailing spaces
df_diam.columns = df_diam.columns.str.strip()
# +
# Input for ML models
input_col = ['in_amount_mmol', 'p_amount_mmol', 'ligand_amount_mmol', 'first_sol_amount_ml', 'second_sol_amount_ml',
'third_sol_amount_ml', 'other_1_amount_mmol', 'other_2_amount_mmol', 'total_volume_ml',
'temp_c', 'time_min', 'x0_chloroindium oxalate', 'x0_indium acetate', 'x0_indium bromide',
'x0_indium chloride', 'x0_indium iodide', 'x0_indium myristate', 'x0_indium oxalate',
'x0_indium palmitate', 'x0_indium trifluoroacetate', 'x0_indium tris(N,N-diisopropylacetamidinato)',
'x1_bis(trimethylsilyl)phosphine', 'x1_phosphine gas', 'x1_phosphorus trichloride', 'x1_sodium phosphide',
'x1_tris(diethylamino)phosphine', 'x1_tris(dimethylamino)phosphine', 'x1_tris(trimethylgermyl)phosphine',
'x1_tris(trimethylsilyl)phosphine', 'x1_white phosphorus', 'x2_None', 'x2_dodecanethiol',
'x2_lauric acid', 'x2_myristic acid', 'x2_oleic acid', 'x2_palmitic acid', 'x2_stearic acid',
'x3_4-ethylpyridine', 'x3_None', 'x3_dimethylformamide', 'x3_dodecylamine', 'x3_mesitylene',
'x3_octadecene', 'x3_oleylamine', 'x3_trioctylamine', 'x3_trioctylphosphine', 'x3_trioctylphosphine oxide',
'x4_None', 'x4_dioctyl ether', 'x4_dioctylamine', 'x4_hexadecylamine', 'x4_hexadecylamine',
'x4_octylamine', 'x4_oleylamine', 'x4_toluene', 'x4_trioctylphosphine', 'x4_trioctylphosphine oxide',
'x5_None', 'x5_trioctylphosphine', 'x6_None', 'x6_acetic acid', 'x6_superhydride',
'x6_tetrabutylammonium myristate', 'x6_zinc acetate', 'x6_zinc bromide', 'x6_zinc chloride',
'x6_zinc iodide', 'x6_zinc octanoate', 'x6_zinc oleate', 'x6_zinc stearate', 'x6_zinc undecylenate',
'x7_None', 'x7_copper bromide', 'x7_oleic acid', 'x7_water', 'x7_zinc iodide']
output_col = ['diameter_nm']
X = df_diam[input_col]
Y = df_diam[output_col]
# -
# Splitting dataset for training
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=45, shuffle=True)
# # Hyper parameter tuning
# ## Ensemble regressors
# ### Bagging
# +
# %%time
#sklearn random search over n iterations
#define ranges of paramaters to tune
#the internet tunes these parameters for bagging
params = {'n_estimators': range(10,1001)}
#n_jobs runs jobs in parallel, verbose prints updates in command line
rs_bag = RandomizedSearchCV(BaggingRegressor(), param_distributions=params, n_iter=200, n_jobs=-1, verbose=10,
scoring='neg_mean_absolute_error')
rs_bag.fit(X, np.ravel(Y))
print('Best MAE Score Through Random Search : %.3f'%rs_bag.best_score_)
print('Best Parameters : ',rs_bag.best_params_)
# +
#using random search parameters
B_regr = BaggingRegressor(n_estimators=126)
B_regr.fit(X_train, np.ravel(Y_train))
B_Y_pred = B_regr.predict(X_test)
B_mae = mean_absolute_error(Y_test, B_Y_pred)
print("Mean absolute error =", round(B_mae,3), 'nm')
plt.figure()
plt.title("Bagging")
plt.plot(Y_test, B_Y_pred, 'o')
plt.xlabel('Observed Values (nm)')
plt.ylabel('Predicted Values (nm)')
plt.plot([1,6],[1,6], color = 'r')
plt.text(1, 5, 'MAE=' , fontdict=None)
plt.text(1.49, 5, round(B_mae,3) , fontdict=None)
plt.text(1.94, 5, 'nm', fontdict=None)
plt.show()
# -
# ### Random Forest
# +
# %%time
#sklearn random search
params = {'n_estimators': range(100,1001)}
#n_jobs runs jobs in parallel, verbose prints updates
rs_rf = RandomizedSearchCV(RandomForestRegressor(), param_distributions=params, n_iter=200, n_jobs=-1, verbose=10,
scoring='neg_mean_absolute_error')
rs_rf.fit(X, np.ravel(Y))
print('Best MAE Score Through Random Search : %.3f'%rs_rf.best_score_)
print('Best Parameters : ',rs_rf.best_params_)
# +
#using grid search parameters
RF_regr = RandomForestRegressor(n_estimators=186)
RF_regr.fit(X_train, np.ravel(Y_train))
RF_Y_pred = RF_regr.predict(X_test)
RF_mae = mean_absolute_error(Y_test, RF_Y_pred)
print("Mean absolute error =", round(RF_mae,3), 'nm')
plt.figure()
plt.title("Random Forest")
plt.plot(Y_test, RF_Y_pred, 'o')
plt.xlabel('Observed Values (nm)')
plt.ylabel('Predicted Values (nm)')
plt.plot([1,6],[1,6], color = 'r')
plt.text(1, 5, 'MAE=' , fontdict=None)
plt.text(1.49, 5, round(RF_mae,3) , fontdict=None)
plt.text(1.94, 5, 'nm', fontdict=None)
plt.show()
# -
# ### Decision Tree
# +
#using grid search parameters
DT_regr = DecisionTreeRegressor()
DT_regr.fit(X_train, np.ravel(Y_train))
DT_Y_pred = DT_regr.predict(X_test)
DT_mae = mean_absolute_error(Y_test, DT_Y_pred)
print("Mean absolute error =", round(DT_mae,3), 'nm')
plt.figure()
plt.title("Decision Tree")
plt.plot(Y_test, DT_Y_pred, 'o')
plt.xlabel('Observed Values (nm)')
plt.ylabel('Predicted Values (nm)')
plt.plot([1,6],[1,6], color = 'r')
plt.text(1, 5, 'MAE=' , fontdict=None)
plt.text(1.49, 5, round(DT_mae,3) , fontdict=None)
plt.text(1.94, 5, 'nm', fontdict=None)
plt.show()
# -
# ### Extra Trees
# +
ET_regr = ExtraTreesRegressor()
ET_regr.fit(X_train, np.ravel(Y_train))
ET_Y_pred = ET_regr.predict(X_test)
ET_mae = mean_absolute_error(Y_test, ET_Y_pred)
print("Mean absolute error =", round(ET_mae,3), 'nm')
plt.figure()
plt.title("Extra Trees")
plt.plot(Y_test, ET_Y_pred, 'o')
plt.xlabel('Observed Values (nm)')
plt.ylabel('Predicted Values (nm)')
plt.plot([1,6],[1,6], color = 'r')
plt.text(1, 5, 'MAE=' , fontdict=None)
plt.text(1.49, 5, round(ET_mae,3) , fontdict=None)
plt.text(1.94, 5, 'nm', fontdict=None)
plt.show()
# -
# ### Ada Boost
# +
# %%time
#sklearn random search
params = {'n_estimators': range(50,501),
'learning_rate':range(1,11),
'loss':['linear', 'square', 'exponential']}
#n_jobs runs jobs in parallel, verbose prints updates
rs_ab = RandomizedSearchCV(AdaBoostRegressor(), param_distributions=params, n_iter=200, n_jobs=-1, verbose=10,
scoring='neg_mean_absolute_error')
rs_ab.fit(X, np.ravel(Y))
print('Best MAE Score Through Random Search : %.3f'%rs_ab.best_score_)
print('Best Parameters : ',rs_ab.best_params_)
# +
AB_regr = AdaBoostRegressor(n_estimators=129, loss='linear', learning_rate=1)
AB_regr.fit(X_train, np.ravel(Y_train))
AB_Y_pred = AB_regr.predict(X_test)
AB_mae = mean_absolute_error(Y_test, AB_Y_pred)
print("Mean absolute error =", round(AB_mae,3), 'nm')
plt.figure()
plt.title("Ada Boost")
plt.plot(Y_test, AB_Y_pred, 'o')
plt.xlabel('Observed Values (nm)')
plt.ylabel('Predicted Values (nm)')
plt.plot([1,6],[1,6], color = 'r')
plt.text(1, 5, 'MAE=' , fontdict=None)
plt.text(1.49, 5, round(AB_mae,3) , fontdict=None)
plt.text(1.94, 5, 'nm', fontdict=None)
plt.show()
# -
# ### Gradient Boosting
# +
# %%time
#sklearn random search
params = {'loss':['ls', 'lad', 'huber', 'quantile'],
'max_features':['auto', 'sqrt', 'log2']}
#n_jobs runs jobs in parallel, verbose prints updates
rs_gb = RandomizedSearchCV(GradientBoostingRegressor(), param_distributions=params, n_iter=200, n_jobs=-1, verbose=10,
scoring='neg_mean_absolute_error')
rs_gb.fit(X, np.ravel(Y))
print('Best MAE Score Through Random Search : %.3f'%rs_gb.best_score_)
print('Best Parameters : ',rs_gb.best_params_)
# +
# %%time
#sklearn random search
params = {'n_estimators': range(100,1001),
'min_samples_split':range(2,21),
'min_samples_leaf': range(1,11),
'max_depth':range(3,31)}
#n_jobs runs jobs in parallel, verbose prints updates
rs_gb = RandomizedSearchCV(GradientBoostingRegressor(max_features='log2', loss='lad'), param_distributions=params, n_iter=200, n_jobs=-1, verbose=10,
scoring='neg_mean_absolute_error')
rs_gb.fit(X, np.ravel(Y))
print('Best MAE Score Through Random Search : %.3f'%rs_gb.best_score_)
print('Best Parameters : ',rs_gb.best_params_)
# +
# %%time
#sklearn random search
params = {'random_state':range(1,1001)}
#n_jobs runs jobs in parallel, verbose prints updates
rs_gb = RandomizedSearchCV(GradientBoostingRegressor(max_features='log2', loss='lad',
n_estimators=801, min_samples_split=18,
min_samples_leaf=1, max_depth=17),
param_distributions=params, n_iter=100, n_jobs=-1, verbose=10, scoring='neg_mean_absolute_error')
rs_gb.fit(X, np.ravel(Y))
print('Best MAE Score Through Random Search : %.3f'%rs_gb.best_score_)
print('Best Parameters : ',rs_gb.best_params_)
# +
GB_regr = GradientBoostingRegressor(n_estimators=801, min_samples_split=18, min_samples_leaf=1, max_depth=17, random_state=174)
GB_regr.fit(X_train, np.ravel(Y_train))
GB_Y_pred = GB_regr.predict(X_test)
GB_mae = mean_absolute_error(Y_test, GB_Y_pred)
print("Mean absolute error =", round(GB_mae,3), 'nm')
plt.figure()
plt.title("Gradient Boosting")
plt.plot(Y_test, GB_Y_pred, 'o')
plt.xlabel('Observed Values (nm)')
plt.ylabel('Predicted Values (nm)')
plt.plot([1,6],[1,6], color = 'r')
plt.text(1, 5, 'MAE=' , fontdict=None)
plt.text(1.49, 5, round(GB_mae,3) , fontdict=None)
plt.text(1.94, 5, 'nm', fontdict=None)
plt.show()
# -
# ## Others
# ### Linear Regression
# +
#using random search parameters
Linear_regr = LinearRegression()
Linear_regr.fit(X_train, np.ravel(Y_train))
Linear_Y_pred = Linear_regr.predict(X_test)
Linear_mae = mean_absolute_error(Y_test, Linear_Y_pred)
print("Mean absolute error =", round(Linear_mae,3), 'nm')
plt.figure()
plt.title("Linear")
plt.plot(Y_test, Linear_Y_pred, 'o')
plt.xlabel('Observed Values (nm)')
plt.ylabel('Predicted Values (nm)')
plt.plot([1,6],[1,6], color = 'r')
plt.text(1, 5, 'MAE=' , fontdict=None)
plt.text(1.49, 5, round(Linear_mae,3) , fontdict=None)
plt.text(1.94, 5, 'nm', fontdict=None)
plt.show()
# -
# ### Lasso
# +
# %%time
#sklearn random search over n iterations
params = {'max_iter': range(1000,10001),
'selection': ['cyclic', 'random']}
#n_jobs runs jobs in parallel, verbose prints updates
rs_lasso = RandomizedSearchCV(Lasso(), param_distributions=params, n_iter=1000, n_jobs=-1, verbose=10,
scoring='neg_mean_absolute_error')
rs_lasso.fit(X, np.ravel(Y))
print('Best MAE Score Through Random Search : %.3f'%rs_lasso.best_score_)
print('Best Parameters : ',rs_lasso.best_params_)
# +
#using random search parameters
Lasso_regr = Lasso()
Lasso_regr.fit(X_train, np.ravel(Y_train))
Lasso_Y_pred = Lasso_regr.predict(X_test)
Lasso_mae = mean_absolute_error(Y_test, Lasso_Y_pred)
print("Mean absolute error =", round(Lasso_mae,3), 'nm')
plt.figure()
plt.title("Lasso")
plt.plot(Y_test, Lasso_Y_pred, 'o')
plt.xlabel('Observed Values (nm)')
plt.ylabel('Predicted Values (nm)')
plt.plot([1,6],[1,6], color = 'r')
plt.text(1, 5, 'MAE=' , fontdict=None)
plt.text(1.49, 5, round(Lasso_mae,3) , fontdict=None)
plt.text(1.94, 5, 'nm', fontdict=None)
plt.show()
# -
# ### ElasticNet
# +
# %%time
#sklearn random search over n iterations
params = {'max_iter': range(1000,10001),
'selection': ['cyclic', 'random']}
#n_jobs runs jobs in parallel, verbose prints updates
rs_en = RandomizedSearchCV(ElasticNet(), param_distributions=params, n_iter=1000, n_jobs=-1, verbose=10,
scoring='neg_mean_absolute_error')
rs_en.fit(X, np.ravel(Y))
print('Best MAE Score Through Random Search : %.3f'%rs_en.best_score_)
print('Best Parameters : ',rs_en.best_params_)
# +
#using random search parameters
EN_regr = ElasticNet()
EN_regr.fit(X_train, np.ravel(Y_train))
EN_Y_pred = EN_regr.predict(X_test)
EN_mae = mean_absolute_error(Y_test, EN_Y_pred)
print("Mean absolute error =", round(EN_mae,3), 'nm')
plt.figure()
plt.title("ElasticNet")
plt.plot(Y_test, EN_Y_pred, 'o')
plt.xlabel('Observed Values (nm)')
plt.ylabel('Predicted Values (nm)')
plt.plot([1,6],[1,6], color = 'r')
plt.text(1, 5, 'MAE=' , fontdict=None)
plt.text(1.49, 5, round(EN_mae,3) , fontdict=None)
plt.text(1.94, 5, 'nm', fontdict=None)
plt.show()
# -
# ### Ridge
# +
# %%time
#sklearn random search over n iterations
params = {'max_iter':range(1000,10001),
'random_state':range(1,1001)}
#n_jobs runs jobs in parallel, verbose prints updates
rs_ridge = RandomizedSearchCV(Ridge(solver='saga'), param_distributions=params, n_iter=1000, n_jobs=-1, verbose=10,
scoring='neg_mean_absolute_error')
rs_ridge.fit(X, np.ravel(Y))
print('Best MAE Score Through Random Search : %.3f'%rs_ridge.best_score_)
print('Best Parameters : ',rs_ridge.best_params_)
# +
#using random search parameters
Ridge_regr = Ridge()
Ridge_regr.fit(X_train, np.ravel(Y_train))
Ridge_Y_pred = Ridge_regr.predict(X_test)
Ridge_mae = mean_absolute_error(Y_test, Ridge_Y_pred)
print("Mean absolute error =", round(Ridge_mae,3), 'nm')
plt.figure()
plt.title("Ridge")
plt.plot(Y_test, Ridge_Y_pred, 'o')
plt.xlabel('Observed Values (nm)')
plt.ylabel('Predicted Values (nm)')
plt.plot([1,6],[1,6], color = 'r')
plt.text(1, 5, 'MAE=' , fontdict=None)
plt.text(1.49, 5, round(Ridge_mae,3) , fontdict=None)
plt.text(1.94, 5, 'nm', fontdict=None)
plt.show()
# -
# ### SVR kernel='linear'
# +
# %%time
#sklearn random search over n iterations
params = {'degree':range(1,10),
'cache_size':range(200,2001)}
#n_jobs runs jobs in parallel, verbose prints updates
rs_svrlin = RandomizedSearchCV(SVR(kernel='linear'), param_distributions=params, n_iter=1000, n_jobs=-1, verbose=10,
scoring='neg_mean_absolute_error')
rs_svrlin.fit(X, np.ravel(Y))
print('Best MAE Score Through Random Search : %.3f'%rs_svrlin.best_score_)
print('Best Parameters : ',rs_svrlin.best_params_)
# +
#using random search parameters
SVRlin_regr = SVR(kernel='linear')
SVRlin_regr.fit(X_train, np.ravel(Y_train))
SVRlin_Y_pred = SVRlin_regr.predict(X_test)
SVRlin_mae = mean_absolute_error(Y_test, SVRlin_Y_pred)
print("Mean absolute error =", round(SVRlin_mae,3), 'nm')
plt.figure()
plt.title("SVR (kernel='linear')")
plt.plot(Y_test, SVRlin_Y_pred, 'o')
plt.xlabel('Observed Values (nm)')
plt.ylabel('Predicted Values (nm)')
plt.plot([1,6],[1,6], color = 'r')
plt.text(1, 5, 'MAE=' , fontdict=None)
plt.text(1.49, 5, round(SVRlin_mae,3) , fontdict=None)
plt.text(1.94, 5, 'nm', fontdict=None)
plt.show()
# -
# ### SVR kernel='rbf'
# +
# %%time
#sklearn random search over n iterations
params = {'degree':range(1,10),
'cache_size':range(200,2001)}
#n_jobs runs jobs in parallel, verbose prints updates
rs_svrrbf = RandomizedSearchCV(SVR(kernel='rbf'), param_distributions=params, n_iter=1000, n_jobs=-1, verbose=10,
scoring='neg_mean_absolute_error')
rs_svrrbf.fit(X, np.ravel(Y))
print('Best MAE Score Through Random Search : %.3f'%rs_svrrbf.best_score_)
print('Best Parameters : ',rs_svrrbf.best_params_)
# +
#using random search parameters
SVRrbf_regr = SVR(kernel='rbf')
SVRrbf_regr.fit(X_train, np.ravel(Y_train))
SVRrbf_Y_pred = SVRrbf_regr.predict(X_test)
SVRrbf_mae = mean_absolute_error(Y_test, SVRrbf_Y_pred)
print("Mean absolute error =", round(SVRrbf_mae,3), 'nm')
plt.figure()
plt.title("SVR (kernel='rbf')")
plt.plot(Y_test, SVRrbf_Y_pred, 'o')
plt.xlabel('Observed Values (nm)')
plt.ylabel('Predicted Values (nm)')
plt.plot([1,6],[1,6], color = 'r')
plt.text(1, 5, 'MAE=' , fontdict=None)
plt.text(1.49, 5, round(SVRrbf_mae,3) , fontdict=None)
plt.text(1.94, 5, 'nm', fontdict=None)
plt.show()
# -
# ## Conclusion
# Extra Trees gave the best performance
# ## Saving model
# +
ET_regr = ExtraTreesRegressor()
ET_regr.fit(X_train, np.ravel(Y_train))
ET_Y_pred = ET_regr.predict(X_test)
joblib.dump(ET_regr, "./model_aug_diam_ExtraTrees.joblib")
# -
| notebook_85/flo/2. Flo diameter.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: CS5228
# language: python
# name: cs5228
# ---
# ## Use Weighted MRT + Distance to Nearest Amenties
# ## Train Random Forest & Predict on Test Data
# +
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler, StandardScaler, OneHotEncoder
import category_encoders as ce
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import AdaBoostRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
from geopy.distance import geodesic
import geopy
from sklearn.model_selection import GridSearchCV
from sklearn import tree
# -
# load pre-processed data
train_df = pd.read_csv('./data/full-train.csv', index_col=0)
test_df = pd.read_csv('./data/full-test.csv', index_col=0)
train_df
# +
# x y split
X_train = train_df.drop(columns = ['price'])
y_train = train_df['price']
X_test = test_df
X_test
# -
weighted_distance = pd.read_csv('./data/auxiliary-mrt-distance-weighted.csv', index_col=0)
weighted_distance_test = pd.read_csv('./data/auxiliary-mrt-distance-weighted-test.csv', index_col=0)
# +
# standardize auxiliary data
weighted_betweenness_distance = weighted_distance.drop(columns = ['closeness_closest_mrt', 'closest_mrt'])
num_scaler = StandardScaler()
weighted_betweenness_distance[weighted_betweenness_distance.columns] = num_scaler.fit_transform(weighted_betweenness_distance)
# -
# transform standardization to test data
weighted_betweenness_distance_test = weighted_distance_test.drop(columns = ['closeness_closest_mrt', 'closest_mrt'])
weighted_betweenness_distance_test[weighted_betweenness_distance_test.columns] = num_scaler.fit_transform(weighted_betweenness_distance_test)
x_train_distance_betweenness = X_train.join(weighted_betweenness_distance)
x_train_distance_betweenness
x_test_distance_betweenness = X_test.join(weighted_betweenness_distance_test)
x_test_distance_betweenness
best_param = {'bootstrap': True, 'max_depth': 50, 'max_features': None, 'min_samples_split': 5, 'n_estimators': 100}
rfr = RandomForestRegressor()
rfr.set_params(**best_param)
rfr.fit(x_train_distance_betweenness, y_train)
result = rfr.predict(x_test_distance_betweenness)
result
ID = range(0, len(result))
df = pd.DataFrame(ID, columns = ['Id'])
# df['Predicted'] = np.round(result, 1)
df['Predicted'] = result
df
# save predicted value
path = './data/random-forest-betweenness-distance-predict.csv'
df.to_csv(path, index = False)
| Auxiliary-data-nearest-distance-betweenness-test-result.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: algo
# language: python
# name: algo
# ---
import numpy as np
import pandas as pd
import requests
import xlsxwriter
import os
# # Value Investing
#
# Value investing is an investment strategy that involves picking stocks that appear to be trading for less than their intrinsic or book value. Value investors actively ferret out stocks they think the stock market is underestimating. ([Source: Investopedia](https://www.investopedia.com/terms/v/valueinvesting.asp))
# change the directiory to root to allow importing of py files saved in algorithmictrading
os.chdir(r'..\\')
from algorithmictrading.secrets import IEX_CLOUD_API_TOKEN
# # Data Loading - S&P 500 Index
# The S&P 500 Index is one of the most common benchmarks for US Large Cap stocks. It tracks the performance of 500 of the largest companies in the United States.
#
# You can substitute any list of tickers for this equal weight walk-through. The list of stocks should be aved in the `\data` folder.
stocks = pd.read_csv(r'.\data\sp_500_stocks.csv')
stocks.head()
# # Connecting to the IEX API
# We will be using the free IEX Cloud API for the market data. Data is purposefully scrambled and is NOT meant for production!
#
# [Documentation can be found here.](https://iexcloud.io/docs/api/#testing-sandbox)
#
# We can use the base URL and concatenate a string from the API IEX documentation in order to pull the data.
# We can pass the following into the string for a specific data request:
# - `symbol`
# - `token`
# +
BASE_URL = 'https://sandbox.iexapis.com/stable'
symbol = 'AAPL'
stats = f'/stock/{symbol}/stats?token={IEX_CLOUD_API_TOKEN}'
data = requests.get(BASE_URL+stats).json()
data
# -
data['peRatio']
# # Making Batch API Calls
# Making a single http request is really slow. We are much better served breaking up our security list into small batches. The IEX API limits 100 symbols per batch, so we we will make 6 http requests.
#
# For our first **value** factor, will will use:
# - `peRatio` - is the ratio for valuing a company that measures its current share price relative to its per-share earnings; also sometimes referred to as price multiple or the earnings multiple; it's used by investors and analysts to determine the relative value of a company's shares in an apples-to-apples comparison ([source: Investopedia](https://www.investopedia.com/terms/p/price-earningsratio.asp))
#
# +
def make_chunks(df):
return np.array_split(df['Ticker'].to_list(), np.ceil(len(df) / 100))
def get_data_batch(df):
df_list = []
chunks = make_chunks(df)
for chunk in chunks:
ticker_strings = ','.join(chunk)
batch_api_call_url = f'https://sandbox.iexapis.com/stable/stock/market/batch/?types=stats,quote&symbols={ticker_strings}&token={IEX_CLOUD_API_TOKEN}'
data = requests.get(batch_api_call_url).json()
tickers = [k for k in data.keys()]
latestprices = [data[k]['quote']['latestPrice'] for k in data.keys()]
pe_ratios = [data[k]['quote']['peRatio'] for k in data.keys()]
df = pd.DataFrame({'ticker': tickers, 'latest_price': latestprices, 'pe_ratio': pe_ratios})
df_list.append(df)
return pd.concat(df_list, ignore_index=True)
# -
value_df = get_data_batch(stocks)
value_df.head()
# # Filtering for Relative Value and Calculating Share Amounts
# The next step is to drop the securities that are "over-priced" (they have high a p/e ratio)
# - `sort_values()` by `pe_ratio`
# - take the top 50 stocks
# - Calculate the shares needed to buy, given a portfolio amount (assuming equal weighting)
# +
def transform_value_df(df, stock_cutoff=50):
df = df.copy()
return (df.sort_values('pe_ratio')
.loc[df['pe_ratio'] > 0] # filter out companies with negative earnings
.reset_index(drop=True)
.iloc[:stock_cutoff]
)
def get_share_amounts(df, portfolio_size=50000000):
share_amounts = portfolio_size / len(df.index)
return df.assign(recommended_trades= lambda x: np.floor(share_amounts / x['latest_price']))
# -
final_df = (transform_value_df(value_df)
.pipe(get_share_amounts)
)
final_df.head()
# # Improving On Our Value Strategy
# We will introduce additional ways to infer a company's intrinsic vaulation.
#
# The following factors will be extraced from IEX CLOUD:
# - `priceToBook`
# - Is calculated by dividing the company's stock price per share by its book value per share (BVPS). An asset's book value is equal to its carrying value on the balance sheet, and companies calculate it netting the asset against its accumulated depreciation
# - `priceToSales`
# - Is a valuation ratio that compares a company’s stock price to its revenues. It is an indicator of the value that financial markets have placed on each dollar of a company’s sales or revenues
# - The P/S ratio can be calculated either by dividing the company’s market capitalization by its total sales over a designated period (usually twelve months) or on a per-share basis by dividing the stock price by sales per share. The P/S ratio is also known as a sales multiple or revenue multiple
# - `enterpriseValue`
# - Is a measure of a company's total value, often used as a more comprehensive alternative to equity market capitalization
# - EV includes in its calculation the market capitalization of a company but also short-term and long-term debt as well as any cash on the company's balance sheet
# - We will use this in conjuction with `grossProfit` and `EBITDA` to calculate two additional ratios
# - `EBITDA` (earnings before interest, taxes, depreciation, and amortization)
# - Is a measure of a company's overall financial performance and is used as an alternative to net income in some circumstances
# - We will use this in conjuction with `enterpriseValue` to calculate a ratio
# - `grossProfit`
# - Is the profit a company makes after deducting the costs associated with making and selling its products, or the costs associated with providing its services
# - Gross profit will appear on a company's income statement and can be calculated by subtracting the cost of goods sold (COGS) from revenue (sales)
# - We will use this in conjuction with `enterpriseValue` to calculate a ratio
# - `enterpriseValue/EBITDA` (derived ratio)
# - Is a financial valuation ratio that measures a company's return on investment (ROI)
# - The EBITDA/EV ratio may be preferred over other measures of return because it is normalized for differences between companies
# - `enterpriseValue/grossProfit` (derived ratio)
# - Is a measure of the value of a stock that compares a company's enterprise value to its revenue
#
# [Source: Investopedia](https://www.investopedia.com/)
# +
def value_df_transform(df):
return (df.copy()
.assign(enterpriseValue_EBITDA= lambda x: x['enterpriseValue'] / x['EBITDA'],
enterpriseValue_grossProfit= lambda x: x['enterpriseValue'] / x['grossProfit'])
.drop(columns=['enterpriseValue', 'EBITDA', 'grossProfit'])
.rename(columns={'enterpriseValue_EBITDA': 'enterpriseValue/EBITDA',
'enterpriseValue_grossProfit' : 'enterpriseValue/grossProfit'})
)
def get_value_data_batch(df):
df_list = []
chunks = make_chunks(df)
for chunk in chunks:
ticker_strings = ','.join(chunk)
batch_api_call_url = f'https://sandbox.iexapis.com/stable/stock/market/batch/?types=stats,quote,advanced-stats&symbols={ticker_strings}&token={IEX_CLOUD_API_TOKEN}'
data = requests.get(batch_api_call_url).json()
# get variables
tickers = [k for k in data.keys()]
latestprices = [data[k]['quote']['latestPrice'] for k in data.keys()]
pe_ratios = [data[k]['quote']['peRatio'] for k in data.keys()]
price_books = [data[k]['advanced-stats']['priceToBook'] for k in data.keys()]
price_sales = [data[k]['advanced-stats']['priceToSales'] for k in data.keys()]
enterpriseValues = [data[k]['advanced-stats']['enterpriseValue'] for k in data.keys()]
ebitas = [data[k]['advanced-stats']['EBITDA'] for k in data.keys()]
grossprofits = [data[k]['advanced-stats']['grossProfit'] for k in data.keys()]
df = (pd.DataFrame({'ticker': tickers,
'latest_price': latestprices,
'peRatio': pe_ratios,
'priceToBook': price_books,
'priceToSales': price_sales,
'enterpriseValue': enterpriseValues,
'EBITDA': ebitas,
'grossProfit': grossprofits
})
.pipe(value_df_transform)
)
df_list.append(df)
return pd.concat(df_list, ignore_index=True)
# -
val_df2 = get_value_data_batch(stocks)
val_df2.head()
# # Solving for Missing Data
# Some values are not available - we can `fillna()` using the column average by leveraging `apply()` and `lambda` functionality.
# missing data
val_df2.loc[val_df2.isna().any(axis='columns')]
def fill_missing_vals(df):
df = df.copy()
return (df.set_index('ticker')
.apply(lambda x: x.fillna(x.mean()))
.reset_index()
)
val_df2_fill = fill_missing_vals(val_df2)
val_df2_fill.loc[val_df2.isna().any(axis='columns')]
# # Calculating Robust Value Scores
# - Normalize rankings by converting to percentile `rank(pct=True)`
# - Sorts for the lowest scoring stocks
# - Take the top 50 to create our stock list
# - Apply the `generate_robust_value` function introduced earlier to get the exact share amounts needed, given a portfolio size
def generate_robust_value_score(df, stock_cutoff=50):
return (df.copy()
.set_index(['ticker', 'latest_price'])
.rank(pct=True)
.assign(rv_score= lambda x: x.mean(axis='columns'))
.sort_values('rv_score')
.reset_index()
.head(stock_cutoff)
)
final_df2 = (generate_robust_value_score(val_df2_fill)
.pipe(get_share_amounts)
)
final_df2.head()
# # Exporting Data to Excel
#
# Pandas can easily output to a csv file of xlsx file natively. However, if we want to output to a styled xlsx file, we can use `xlsxwriter` to customize the output to a much greater degree.
# +
writer = pd.ExcelWriter(r'.\data\value_recommended_trades.xlsx', engine='xlsxwriter')
final_df2.to_excel(writer, sheet_name='Recommended Trades', index = False)
background_color = '#0a0a23'
font_color = '#ffffff'
string_format = writer.book.add_format(
{
'font_color': font_color,
'bg_color': background_color,
'border': 1,
'border_color': font_color
}
)
dollar_format = writer.book.add_format(
{
'num_format':'$0.00',
'font_color': font_color,
'bg_color': background_color,
'border': 1,
'border_color': font_color
}
)
pct_format = writer.book.add_format(
{
'num_format':'0.00%',
'font_color': font_color,
'bg_color': background_color,
'border': 1,
'border_color': font_color
}
)
float_format = writer.book.add_format(
{
'num_format':'0.00',
'font_color': font_color,
'bg_color': background_color,
'border': 1,
'border_color': font_color
}
)
integer_format = writer.book.add_format(
{
'num_format':'0',
'font_color': font_color,
'bg_color': background_color,
'border': 1,
'border_color': font_color
}
)
column_formats = {
'A': ['ticker', string_format],
'B': ['latest_price', dollar_format],
'C': ['peRatio', pct_format],
'D': ['priceToBook', pct_format],
'E': ['priceToSales', pct_format],
'F': ['enterpriseValue/EBITDA', pct_format],
'G': ['enterpriseValue/grossProfit', pct_format],
'H': ['rv_score', float_format],
'I': ['recommended_trades', integer_format]
}
for column in column_formats.keys():
writer.sheets['Recommended Trades'].set_column(f'{column}:{column}', 20, column_formats[column][1])
writer.sheets['Recommended Trades'].write(f'{column}1', column_formats[column][0], string_format)
writer.sheets['Recommended Trades'].hide_gridlines(2)
writer.save()
# -
| notebooks/quant-value-strategy.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# # Time-dependent FRH symmetry
import numpy as np
from frh_fx import mpl,frh,uts,blk
from matplotlib import pyplot as plt
% matplotlib inline
mpl.config(scale=1.5,print_keys=False)
def σ(t): return 0.10
def ρ(t): return -0.25
def γ(t): return t**0.5
θ = σ,ρ,γ
T = np.array([1/252,1/12,1])
k = uts.get_logstrikes(T)
p = frh.price(k,T,θ)
BS = blk.surface(k,T,p)
plot,axes = plt.subplots()
for i in range(len(T)):
axes.plot(k[i,:],100*BS[i,:])
axes.legend([r'$\mathrm{1D}$',
r'$\mathrm{1M}$',
r'$\mathrm{1Y}$',])
axes.set_xlabel(r'$k$')
axes.set_ylabel(r'$\bar{\sigma}(k,t=%.2f)$'%T[i])
uts.save_plot()
Δ = uts.get_deltas(BS,k,T,ρ=ρ(0))
plot,axes = plt.subplots()
for i in range(len(T)):
axes.plot(Δ[i,:],100*BS[i,:])
axes.legend([r'$\mathrm{1D}$',
r'$\mathrm{1M}$',
r'$\mathrm{1Y}$'])
axes.set_xlabel(r'$\Delta$')
axes.set_ylabel(r'$\bar{\sigma}(\Delta,t=%.2f)$'%T[i])
uts.save_plot()
| 5-symmetry.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 0.4.5
# language: julia
# name: julia-0.4
# ---
# # Cauchy Distribution
#
# The Cauchy distribution is defined by the probability density function:
#
# $f(x) = \dfrac{1}{\pi \, (1+x^2)}$
#
# I was intrigued by reading that this is a "pathological" distribution in that its mean and variance do not exist. This notebook is a little numerical investigation of that fact.
#
# First, let us plot the distribution around 0:
# +
using PyPlot
cauchy(x) = 1/(pi*(1+x^2))
x = linspace(-30, 30, 1000)
y = map(cauchy, x)
plot(x, y)
# -
# At first glance it looks similar to many other distributions like the Gaussian. Surely this has a mean? It's probably zero, right?
#
# Let's numerically compute the mean of a sample drawn from this distribution. To generate a sample, we use the fact that a random variable $X$ with a cumulative distribution function $F$ can be derived from a uniformly distributed variable $Y$ via:
#
# $X = F^{-1}(Y)$
#
# where $F^{-1}$ is the inverse of the CDF. For the Cauchy distribution, this works out to:
#
# $x = tan(\pi (y - \frac{1}{2}))$
function sample_mean()
x = map(y -> tan(pi * (rand() - 0.5)), 1:1000)
mean(x)
end
# Let's run this a few times. For a well-behaved distribution we'd expect the means of repeated samples to be fairly close together.
for i in 1:10
println(sample_mean())
end
# We see that the mean fluctuates wildly.
| Cauchy Distribution.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:snakes]
# language: python
# name: conda-env-snakes-py
# ---
import modapsclient
# Create a ModapsClient object.
a = modapsclient.ModapsClient()
# ### Some examples
# List satellite instruments
sats = a.listSatelliteInstruments()
sats
a.getMaxSearchResults()
# Find products, as full list, or grouped by Groups or Instruments.
a.listProductsByInstrument('NPP')
a.listProductGroups('NPP')
a.listProductGroups('PM1M')
a.listProductsByInstrument('NPP', group='NL1_A')
a.listProductsByInstrument('PM1M', group='L1_D')
# LAADS stores files in Collections, which correspond sometimes to algorithm versions.
a.getCollections('MYD021KM')
# ### Example how to search files, find their URLs and inspect a browse image (if available)
a.searchForFiles('MYD021KM', '2018-03-10', '2018-03-11', 65.5, 65.5, -147.7, -147.7, collection=6)
# We can also limit by time.
a.searchForFiles('MYD021KM', '2018-03-11 12:00', '2018-03-11 23:59', 65.5, 65.5, -147.7, -147.7, collection=6)
a.getFileProperties('3095418117')
a.getFileUrls('3095418117')
a.getFileOnlineStatuses('3095418117')
a.getBrowse('3095418117')
browseurl = a.getFileUrls('3095675990')
browseurl
from IPython.display import Image
from IPython.core.display import HTML
Image(url=browseurl[0], width=500)
| ipynb/modapsclient_basic_usage.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] tags=["remove_cell"]
# # Multiple Qubits and Entangled States
# -
# Single qubits are interesting, but individually they offer no computational advantage. We will now look at how we represent multiple qubits, and how these qubits can interact with each other. We have seen how we can represent the state of a qubit using a 2D-vector, now we will see how we can represent the state of multiple qubits.
# ## Contents
# 1. [Representing Multi-Qubit States](#represent)
# 1.1 [Exercises](#ex1)
# 2. [Single Qubit Gates on Multi-Qubit Statevectors](#single-qubit-gates)
# 2.1 [Exercises](#ex2)
# 3. [Multi-Qubit Gates](#multi-qubit-gates)
# 3.1 [The CNOT-gate](#cnot)
# 3.2 [Entangled States](#entangled)
# 3.3 [Visualizing Entangled States](#visual)
# 3.4 [Exercises](#ex3)
#
#
# ## 1. Representing Multi-Qubit States <a id="represent"></a>
#
# We saw that a single bit has two possible states, and a qubit state has two complex amplitudes. Similarly, two bits have four possible states:
#
# `00` `01` `10` `11`
#
# And to describe the state of two qubits requires four complex amplitudes. We store these amplitudes in a 4D-vector like so:
#
# $$ |a\rangle = a_{00}|00\rangle + a_{01}|01\rangle + a_{10}|10\rangle + a_{11}|11\rangle = \begin{bmatrix} a_{00} \\ a_{01} \\ a_{10} \\ a_{11} \end{bmatrix} $$
#
# The rules of measurement still work in the same way:
#
# $$ p(|00\rangle) = |\langle 00 | a \rangle |^2 = |a_{00}|^2$$
#
# And the same implications hold, such as the normalisation condition:
#
# $$ |a_{00}|^2 + |a_{01}|^2 + |a_{10}|^2 + |a_{11}|^2 = 1$$
#
# If we have two separated qubits, we can describe their collective state using the tensor product:
#
# $$ |a\rangle = \begin{bmatrix} a_0 \\ a_1 \end{bmatrix}, \quad |b\rangle = \begin{bmatrix} b_0 \\ b_1 \end{bmatrix} $$
#
# $$
# |ba\rangle = |b\rangle \otimes |a\rangle = \begin{bmatrix} b_0 \times \begin{bmatrix} a_0 \\ a_1 \end{bmatrix} \\ b_1 \times \begin{bmatrix} a_0 \\ a_1 \end{bmatrix} \end{bmatrix} = \begin{bmatrix} b_0 a_0 \\ b_0 a_1 \\ b_1 a_0 \\ b_1 a_1 \end{bmatrix}
# $$
#
# And following the same rules, we can use the tensor product to describe the collective state of any number of qubits. Here is an example with three qubits:
#
# $$
# |cba\rangle = \begin{bmatrix} c_0 b_0 a_0 \\ c_0 b_0 a_1 \\ c_0 b_1 a_0 \\ c_0 b_1 a_1 \\
# c_1 b_0 a_0 \\ c_1 b_0 a_1 \\ c_1 b_1 a_0 \\ c_1 b_1 a_1 \\
# \end{bmatrix}
# $$
#
# If we have $n$ qubits, we will need to keep track of $2^n$ complex amplitudes. As we can see, these vectors grow exponentially with the number of qubits. This is the reason quantum computers with large numbers of qubits are so difficult to simulate. A modern laptop can easily simulate a general quantum state of around 20 qubits, but simulating 100 qubits is too difficult for the largest supercomputers.
#
# Let's look at an example circuit:
# + tags=["thebelab-init"]
from qiskit import QuantumCircuit, Aer, assemble
from math import pi
import numpy as np
from qiskit.visualization import plot_histogram, plot_bloch_multivector
# -
qc = QuantumCircuit(3)
# Apply H-gate to each qubit:
for qubit in range(3):
qc.h(qubit)
# See the circuit:
qc.draw()
# Each qubit is in the state $|+\rangle$, so we should see the vector:
#
# $$
# |{+++}\rangle = \frac{1}{\sqrt{8}}\begin{bmatrix} 1 \\ 1 \\ 1 \\ 1 \\
# 1 \\ 1 \\ 1 \\ 1 \\
# \end{bmatrix}
# $$
# +
# Let's see the result
svsim = Aer.get_backend('statevector_simulator')
qobj = assemble(qc)
final_state = svsim.run(qobj).result().get_statevector()
# In Jupyter Notebooks we can display this nicely using Latex.
# If not using Jupyter Notebooks you may need to remove the
# array_to_latex function and use print(final_state) instead.
from qiskit_textbook.tools import array_to_latex
array_to_latex(final_state, pretext="\\text{Statevector} = ")
# -
# And we have our expected result.
# ### 1.2 Quick Exercises: <a id="ex1"></a>
#
# 1. Write down the tensor product of the qubits:
# a) $|0\rangle|1\rangle$
# b) $|0\rangle|+\rangle$
# c) $|+\rangle|1\rangle$
# d) $|-\rangle|+\rangle$
# 2. Write the state:
# $|\psi\rangle = \tfrac{1}{\sqrt{2}}|00\rangle + \tfrac{i}{\sqrt{2}}|01\rangle $
# as two separate qubits.
#
#
# ## 2. Single Qubit Gates on Multi-Qubit Statevectors <a id="single-qubit-gates"></a>
#
# We have seen that an X-gate is represented by the matrix:
#
# $$
# X = \begin{bmatrix} 0 & 1 \\ 1 & 0 \end{bmatrix}
# $$
#
# And that it acts on the state $|0\rangle$ as so:
#
# $$
# X|0\rangle = \begin{bmatrix} 0 & 1 \\ 1 & 0 \end{bmatrix}\begin{bmatrix} 1 \\ 0 \end{bmatrix} = \begin{bmatrix} 0 \\ 1\end{bmatrix}
# $$
#
# but it may not be clear how an X-gate would act on a qubit in a multi-qubit vector. Fortunately, the rule is quite simple; just as we used the tensor product to calculate multi-qubit statevectors, we use the tensor product to calculate matrices that act on these statevectors. For example, in the circuit below:
qc = QuantumCircuit(2)
qc.h(0)
qc.x(1)
qc.draw()
# we can represent the simultaneous operations (H & X) using their tensor product:
#
# $$
# X|q_1\rangle \otimes H|q_0\rangle = (X\otimes H)|q_1 q_0\rangle
# $$
#
# The operation looks like this:
#
# $$
# X\otimes H = \begin{bmatrix} 0 & 1 \\ 1 & 0 \end{bmatrix} \otimes \tfrac{1}{\sqrt{2}}\begin{bmatrix} 1 & 1 \\ 1 & -1 \end{bmatrix} = \frac{1}{\sqrt{2}}
# \begin{bmatrix} 0 \times \begin{bmatrix} 1 & 1 \\ 1 & -1 \end{bmatrix}
# & 1 \times \begin{bmatrix} 1 & 1 \\ 1 & -1 \end{bmatrix}
# \\
# 1 \times \begin{bmatrix} 1 & 1 \\ 1 & -1 \end{bmatrix}
# & 0 \times \begin{bmatrix} 1 & 1 \\ 1 & -1 \end{bmatrix}
# \end{bmatrix} = \frac{1}{\sqrt{2}}
# \begin{bmatrix} 0 & 0 & 1 & 1 \\
# 0 & 0 & 1 & -1 \\
# 1 & 1 & 0 & 0 \\
# 1 & -1 & 0 & 0 \\
# \end{bmatrix}
# $$
#
# Which we can then apply to our 4D statevector $|q_1 q_0\rangle$. This can become quite messy, you will often see the clearer notation:
#
# $$
# X\otimes H =
# \begin{bmatrix} 0 & H \\
# H & 0\\
# \end{bmatrix}
# $$
#
# Instead of calculating this by hand, we can use Qiskit’s `unitary_simulator` to calculate this for us. The unitary simulator multiplies all the gates in our circuit together to compile a single unitary matrix that performs the whole quantum circuit:
usim = Aer.get_backend('unitary_simulator')
qobj = assemble(qc)
unitary = usim.run(qobj).result().get_unitary()
# and view the results:
# In Jupyter Notebooks we can display this nicely using Latex.
# If not using Jupyter Notebooks you may need to remove the
# array_to_latex function and use print(unitary) instead.
from qiskit_textbook.tools import array_to_latex
array_to_latex(unitary, pretext="\\text{Circuit = }\n")
# If we want to apply a gate to only one qubit at a time (such as in the circuit below), we describe this using tensor product with the identity matrix, e.g.:
#
# $$ X \otimes I $$
qc = QuantumCircuit(2)
qc.x(1)
qc.draw()
# Simulate the unitary
usim = Aer.get_backend('unitary_simulator')
qobj = assemble(qc)
unitary = usim.run(qobj).result().get_unitary()
# Display the results:
array_to_latex(unitary, pretext="\\text{Circuit = } ")
# We can see Qiskit has performed the tensor product:
# $$
# X \otimes I =
# \begin{bmatrix} 0 & I \\
# I & 0\\
# \end{bmatrix} =
# \begin{bmatrix} 0 & 0 & 1 & 0 \\
# 0 & 0 & 0 & 1 \\
# 1 & 0 & 0 & 0 \\
# 0 & 1 & 0 & 0 \\
# \end{bmatrix}
# $$
#
# ### 2.1 Quick Exercises: <a id="ex2"></a>
#
# 1. Calculate the single qubit unitary ($U$) created by the sequence of gates: $U = XZH$. Use Qiskit's unitary simulator to check your results.
# 2. Try changing the gates in the circuit above. Calculate their tensor product, and then check your answer using the unitary simulator.
#
# **Note:** Different books, softwares and websites order their qubits differently. This means the tensor product of the same circuit can look very different. Try to bear this in mind when consulting other sources.
#
# ## 3. Multi-Qubit Gates <a id="multi-qubit-gates"></a>
#
# Now we know how to represent the state of multiple qubits, we are now ready to learn how qubits interact with each other. An important two-qubit gate is the CNOT-gate.
#
# ### 3.1 The CNOT-Gate <a id="cnot"></a>
#
# You have come across this gate before in _[The Atoms of Computation](../ch-states/the-atoms-of-computation)._ This gate is a conditional gate that performs an X-gate on the second qubit (target), if the state of the first qubit (control) is $|1\rangle$. The gate is drawn on a circuit like this, with `q0` as the control and `q1` as the target:
qc = QuantumCircuit(2)
# Apply CNOT
qc.cx(0,1)
# See the circuit:
qc.draw()
# When our qubits are not in superposition of $|0\rangle$ or $|1\rangle$ (behaving as classical bits), this gate is very simple and intuitive to understand. We can use the classical truth table:
#
# | Input (t,c) | Output (t,c) |
# |:-----------:|:------------:|
# | 00 | 00 |
# | 01 | 11 |
# | 10 | 10 |
# | 11 | 01 |
#
# And acting on our 4D-statevector, it has one of the two matrices:
#
# $$
# \text{CNOT} = \begin{bmatrix} 1 & 0 & 0 & 0 \\
# 0 & 0 & 0 & 1 \\
# 0 & 0 & 1 & 0 \\
# 0 & 1 & 0 & 0 \\
# \end{bmatrix}, \quad
# \text{CNOT} = \begin{bmatrix} 1 & 0 & 0 & 0 \\
# 0 & 1 & 0 & 0 \\
# 0 & 0 & 0 & 1 \\
# 0 & 0 & 1 & 0 \\
# \end{bmatrix}
# $$
#
# depending on which qubit is the control and which is the target. Different books, simulators and papers order their qubits differently. In our case, the left matrix corresponds to the CNOT in the circuit above. This matrix swaps the amplitudes of $|01\rangle$ and $|11\rangle$ in our statevector:
#
# $$
# |a\rangle = \begin{bmatrix} a_{00} \\ a_{01} \\ a_{10} \\ a_{11} \end{bmatrix}, \quad \text{CNOT}|a\rangle = \begin{bmatrix} a_{00} \\ a_{11} \\ a_{10} \\ a_{01} \end{bmatrix} \begin{matrix} \\ \leftarrow \\ \\ \leftarrow \end{matrix}
# $$
#
# We have seen how this acts on classical states, but let’s now see how it acts on a qubit in superposition. We will put one qubit in the state $|+\rangle$:
qc = QuantumCircuit(2)
# Apply H-gate to the first:
qc.h(0)
qc.draw()
# Let's see the result:
svsim = Aer.get_backend('statevector_simulator')
qobj = assemble(qc)
final_state = svsim.run(qobj).result().get_statevector()
# Print the statevector neatly:
array_to_latex(final_state, pretext="\\text{Statevector = }")
# As expected, this produces the state $|0\rangle \otimes |{+}\rangle = |0{+}\rangle$:
#
# $$
# |0{+}\rangle = \tfrac{1}{\sqrt{2}}(|00\rangle + |01\rangle)
# $$
#
# And let’s see what happens when we apply the CNOT gate:
qc = QuantumCircuit(2)
# Apply H-gate to the first:
qc.h(0)
# Apply a CNOT:
qc.cx(0,1)
qc.draw()
# Let's get the result:
qobj = assemble(qc)
result = svsim.run(qobj).result()
# Print the statevector neatly:
final_state = result.get_statevector()
array_to_latex(final_state, pretext="\\text{Statevector = }")
# We see we have the state:
#
# $$
# \text{CNOT}|0{+}\rangle = \tfrac{1}{\sqrt{2}}(|00\rangle + |11\rangle)
# $$
#
# This state is very interesting to us, because it is _entangled._ This leads us neatly on to the next section.
# ### 3.2 Entangled States <a id="entangled"></a>
#
# We saw in the previous section we could create the state:
#
# $$
# \tfrac{1}{\sqrt{2}}(|00\rangle + |11\rangle)
# $$
#
# This is known as a _Bell_ state. We can see that this state has 50% probability of being measured in the state $|00\rangle$, and 50% chance of being measured in the state $|11\rangle$. Most interestingly, it has a **0%** chance of being measured in the states $|01\rangle$ or $|10\rangle$. We can see this in Qiskit:
plot_histogram(result.get_counts())
# This combined state cannot be written as two separate qubit states, which has interesting implications. Although our qubits are in superposition, measuring one will tell us the state of the other and collapse its superposition. For example, if we measured the top qubit and got the state $|1\rangle$, the collective state of our qubits changes like so:
#
# $$
# \tfrac{1}{\sqrt{2}}(|00\rangle + |11\rangle) \quad \xrightarrow[]{\text{measure}} \quad |11\rangle
# $$
#
# Even if we separated these qubits light-years away, measuring one qubit collapses the superposition and appears to have an immediate effect on the other. This is the [‘spooky action at a distance’](https://en.wikipedia.org/wiki/Quantum_nonlocality) that upset so many physicists in the early 20th century.
#
# It’s important to note that the measurement result is random, and the measurement statistics of one qubit are **not** affected by any operation on the other qubit. Because of this, there is **no way** to use shared quantum states to communicate. This is known as the no-communication theorem.[1]
# ### 3.3 Visualizing Entangled States<a id="visual"></a>
#
# We have seen that this state cannot be written as two separate qubit states, this also means we lose information when we try to plot our state on separate Bloch spheres:
plot_bloch_multivector(final_state)
# Given how we defined the Bloch sphere in the earlier chapters, it may not be clear how Qiskit even calculates the Bloch vectors with entangled qubits like this. In the single-qubit case, the position of the Bloch vector along an axis nicely corresponds to the expectation value of measuring in that basis. If we take this as _the_ rule of plotting Bloch vectors, we arrive at this conclusion above. This shows us there is _no_ single-qubit measurement basis for which a specific measurement is guaranteed. This constrasts with our single qubit states, in which we could always pick a single-qubit basis. Looking at the individual qubits in this way, we miss the important effect of correlation between the qubits. We cannot distinguish between different entangled states. For example, the two states:
#
# $$\tfrac{1}{\sqrt{2}}(|01\rangle + |10\rangle) \quad \text{and} \quad \tfrac{1}{\sqrt{2}}(|00\rangle + |11\rangle)$$
#
# will both look the same on these separate Bloch spheres, despite being very different states with different measurement outcomes.
#
# How else could we visualize this statevector? This statevector is simply a collection of four amplitudes (complex numbers), and there are endless ways we can map this to an image. One such visualization is the _Q-sphere,_ here each amplitude is represented by a blob on the surface of a sphere. The size of the blob is proportional to the magnitude of the amplitude, and the colour is proportional to the phase of the amplitude. The amplitudes for $|00\rangle$ and $|11\rangle$ are equal, and all other amplitudes are 0:
from qiskit.visualization import plot_state_qsphere
plot_state_qsphere(final_state)
# Here we can clearly see the correlation between the qubits. The Q-sphere's shape has no significance, it is simply a nice way of arranging our blobs; the number of `0`s in the state is proportional to the states position on the Z-axis, so here we can see the amplitude of $|00\rangle$ is at the top pole of the sphere, and the amplitude of $|11\rangle$ is at the bottom pole of the sphere.
# ### 3.4 Exercise: <a id="ex3"></a>
# 1. Create a quantum circuit that produces the Bell state: $\tfrac{1}{\sqrt{2}}(|01\rangle + |10\rangle)$.
# Use the statevector simulator to verify your result.
#
# 2. The circuit you created in question 1 transforms the state $|00\rangle$ to $\tfrac{1}{\sqrt{2}}(|01\rangle + |10\rangle)$, calculate the unitary of this circuit using Qiskit's simulator. Verify this unitary does in fact perform the correct transformation.
#
# 3. Think about other ways you could represent a statevector visually. Can you design an interesting visualization from which you can read the magnitude and phase of each amplitude?
# ## 4. References
#
# [1] <NAME>, <NAME>, _Quantum Information and Relativity Theory,_ 2004, https://arxiv.org/abs/quant-ph/0212023
import qiskit
qiskit.__qiskit_version__
| notebooks/ch-gates/multiple-qubits-entangled-states.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
os.chdir("C:/Users/kennedy/Desktop/data")
X_train = pd.read_csv("4910797b-ee55-40a7-8668-10efd5c1b960.csv")
X_train.set_index('id', inplace = True)
X_train.info()
X_train = pd.read_csv("702ddfc5-68cd-4d1d-a0de-f5f566f76d91.csv")
X_train.set_index('id', inplace = True)
X_train.info()
Y_train = pd.read_csv("0bf8bc6e-30d0-4c50-956a-603fc693d966.csv")
#X_train.set_index('id', inplace = True)
import seaborn as sns
sns.countplot(Y_train['status_group'])
Y_train.info()
| Untitled.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
data = np.genfromtxt('train.csv', delimiter = ',')
data
xtrain=data[:,0].reshape(-1,1)
xtrain.shape
ytrain=data[:,10]
from sklearn import model_selection
from sklearn.linear_model import LinearRegression
alg1=LinearRegression()
alg1.fit(xtrain,ytrain)
m=alg1.coef_
c=alg1.intercept_
def pred(x,m,c):
ypred=x*m+c
num=len(ypred)
for i in range(num):
ypred[i]=round(ypred[i],5)
return ypred
# m
data2 = np.genfromtxt('test.csv', delimiter = ',')
xtest=data2[:,0:10]
ypred=pred(xtest,m,c)
ypred
np.savetxt('predictions.csv',ypred)
| Pandas and Numpy/notebook1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# ### An Introduction to the Simplified Molecular Input Line Entry System (SMILES)
# Install the necessary Python packages
# !pip install rdkit-pypi
# Load the necessary Python libraries
# +
from rdkit.Chem.Draw import IPythonConsole
from rdkit.Chem import rdDepictor
from rdkit import Chem
IPythonConsole.ipython_useSVG = True
IPythonConsole.molSize = 300, 300
rdDepictor.SetPreferCoordGen(True)
# -
# SMILES provides a simple, compact, somewhat human-readable notation for representing molecules. SMILES, which is an acronym for **S**implified **M**olecular **I**nput **L**ine **E**ntry **S**ystem, was developed by [<NAME>](https://pubs.acs.org/doi/10.1021/ci00057a005) in the 1980s, a time when software like ChemDraw or Marvin Sketch for drawing chemical structures was unheard of. While we now have easy access to chemical sketchers and don't typically have to create SMILES by hand, the format is still widely used. Over time, SMILES has become one of a small number of generally accepted formats for representing chemical structures. SMILES are also often referred to as "SMILES strings". While it's fine to refer to a SMILES as a SMILES string, one should **never** refer to a single SMILES as "SMILE". Remember, "S" stands for "system". Please don't refer to a "SMILE" or a "SMILE string". If you do this, I will roll my eyes. You have been warned!
#
# The canonical reference for SMILES is the [Daylight Theory Manual](https://www.daylight.com/dayhtml/doc/theory/), I've borrowed liberally from that here. The [Daylight SMILES Tutorial](https://www.daylight.com/dayhtml_tutorials/languages/smiles/index.html) is another essential reference. For a bit more information on the history and folklore around SMILES, please consult this article from [Chemistry World](https://www.chemistryworld.com/opinion/weiningers-smiles/4014639.article).
# ### Atoms
#
# SMILES are a combination of text symbols that represent atoms, bonds, and ring closures. Common organic elements are represented by their atomic symbols (B,C,N,O,S,F,Cl,Br,I) while less usual atomic symbols with two characters are represented in square brackets ([Au],[Ag],[Se]).
#
# ### Bonds
#
# Single bonds between adjacent atoms are implicit. For instance, "CCC" will represent "n-propane". Double bonds are indicated by "=" and triple bonds are indicated by "#". Let's take a look at how we can depict the SMILES for a few molecules. In order to do this, we'll use the RDKit Python library. We will start by importing the RDKit Chemistry library.
# We can use the RDKit function **Chem.MolFromSmiles** to covert a SMILES to a molecule object that can be depicted. There are two ways that we can display a molecule's structure in the RDKit. As shown below, we can create a molecule object and assign that object to a variable, here the variable is named "propane". If we put a variable name on the final line of a cell in a Jupyter notebook, the value of that variable is displayed. When that variable is a molecule, its structure is shown.
propane = Chem.MolFromSmiles("CCC")
propane
# We can also simply display the results of **Chem.MolFromSmiles**.
Chem.MolFromSmiles("CCC")
# Let's use a double bond to construct the SMILES for 2-pentene. You may ask why I didn't call the variable in the cell below **2-penente**. In Python, variable names cannot begin with a number. In python, the string "2-pentene" would be interpreted as "two minus pentene".
pentene = Chem.MolFromSmiles("CC=CCC")
pentene
# In a similar fashion, we can incorporate a triple bond and write the SMILES for 2-pentene.
pentyne = Chem.MolFromSmiles("CC#CCC")
pentyne
# Of course, we can go beyond simple hydrocarbons, let's look at some more interesting examples.
ethanol = Chem.MolFromSmiles("CCO")
ethanol
# The ethanol example above brings up another interesting point about SMILES, hydrogens are **usually** implicit. There are a few exceptions that we'll get to later. Note that when we wrote the SMILES for ethanol, we didn't have to specify the hydrogen on the oxygen. Oxygen normally has a valence of two, the oxygen in ethanol only has one attached hydrogen, therefore we assume it has one hydrogen attached. Similarly, the first carbon in ethanol has one other carbon attached, since carbon typically has a valence of four we assume that it has three hydrogens attached. Neither the hydrogen on the alcohol oxygen nor the carbons needs to be explicitly specified.
#
# This brings up another point. Hydrogen atoms can be used in SMILES, but the hydrogens must be contained within square brackets and associated with a heavy atom. For instance, it is valid to write the SMILES for ethanol as CC[OH], it's equally valid to write ethanol as [CH3][CH2][OH]. As an exercise, depict the two representations above for ethanol. Note that if you specify the wrong number of explicit hydrogens, the molecule will be considered to be a radical. Try depicting [CH2][CH2][OH].
# +
#Write your code here
# -
# ### Charges
#
# Positive and negative charges are included by adding "+" or "-" in square brackets with the atom type. In addition, hydrogens attached to the atom must be explicitly specified.
# Here's ethylamine
ethylamine = Chem.MolFromSmiles("CC[NH3+]")
ethylamine
# Here's acetate, more on parentheses and branching below.
acetate = Chem.MolFromSmiles("CC(=O)[O-]")
acetate
# ### Disconnected Structures
#
# In some cases, a molecule may contain multiple entities that are not connected by a covalent bond. In SMILES, the two entities are represented as two SMILES separated by a ".". For instance, let's look at acetate with a sodium salt. The positioning of the sodium salt in the depiction isn't ideal but the SMILES is valid.
acetate_salt = Chem.MolFromSmiles("CC(=O)[O-].[Na+]")
acetate_salt
# ### Branching
#
# Branching in SMILES is indicated with parentheses. For instance, one can depict acetone using the SMILES "CC(=O)C". Note how the parentheses are used to indicate the branching in the carbonyl.
acetone = Chem.MolFromSmiles("CC(=O)C")
acetone
# Let's take a look at a few more examples. In order to look at more than one example, we'll put the SMILES and the names of the associated molecules into a text buffer.
# define a text buffer for our examples
examples = """C(C)(C)O isopropanol
C(Cl)(Cl)(Cl)Cl carbon tetrachloride
CC(=O)O acetic acid"""
# not that we use the second argumen to split to only return two tokens
smiles_list = [x.split(" ",1) for x in examples.split("\n")]
smiles_list
# In order to make our lives a little easier, we're going to define a function that will use the MolsToGridImage function from the RDKit to display multiple structures.
# +
from rdkit.Chem.Draw import MolsToGridImage
def display_structures(smiles_list):
mol_list = []
name_list = []
for smiles, name in smiles_list:
mol_list.append(Chem.MolFromSmiles(smiles))
name_list.append(name)
return MolsToGridImage(mol_list,legends=name_list,molsPerRow=4)
# -
display_structures(smiles_list)
# ### Exercise
#
# Write the SMILES and display a table of chemical structures for the following. If you don't know the structures, google the names.
#
# 1. 2-menthylpentene
# 2. isopropanal
# 3. 2-pentyn-1-ol
# 4. 1,2,2,3-tetrafluorobutane
# 5. propanoic acid
# 6. 2-t-butyl-3-hydroxy-propane
# +
#Write your code here
# -
# ### Rings
# In SMILES, rings are indicated by specifying a "closure digit" indicating atoms that are connected. For instance, the SMILES for cyclohexane is C1CCCCC1.
Chem.MolFromSmiles("C1CCCCC1")
# Double bonds and heteroatoms can be put into rings in the same way they were in acyclic structures.
mol = Chem.MolFromSmiles("C1=CCOC1")
mol
# define a text buffer for our examples
examples = """C1CC1 cyclohexane
C1CCC1 cyclobutane
C1CCC=C1 cyclopentene"""
# note that we use the second argumen to split to only return two tokens
ring_smiles_list = [x.split(" ",1) for x in examples.split("\n")]
display_structures(ring_smiles_list)
# In the case of fused ring systems, a single atom can have multiple ring closure digits.
mol = Chem.MolFromSmiles("C12CCCCC1CCCC2")
mol
# Also, note that a ring closure digit can be "reused". In the SMILES below the digit "1" is used to indicate the closure of two different rings. Note that the SMILES could also be expressed as "C1CCCCC1C2CCCCO2". Go ahead, try it.
Chem.MolFromSmiles("C1CCCCC1C1CCCCO1")
# ### Exercise
# Write the SMILES and display a table of chemical structures for the following.
# 1. 1,2-dimethylcyclopropane
# 2. 1-methyl-3,3-dimethylcyclohexane
# 3. piperazine
# 4. hydrindane
# 5. norbornane
# 6. cyclopentanol
# +
#Write your code here
# -
# ### Aromaticity
#
# Aromatic atoms are indicated by lower case characters. According to Daylight C,N,P,O,S,As, and Se can be considered aromatic. As an example, benzene can be expressed as c1ccccc1 and pyridine as c1ccccn1. Note that in the same way there are implicit single bonds between uppercase characters (e.g. CCC) there are implicit aromatic bonds between lowercase characters.
# define a text buffer for our examples
examples = """c1ccccc1 benzene
c1ccccn1 pyridine"""
aromatic_smiles_list = [x.split(" ",1) for x in examples.split("\n")]
display_structures(aromatic_smiles_list)
# When an aromatic nitrogen has an attached hydrogen, this hydrogen must be explicitly specified. Note that if we don't specify an explicit hydrogen on the nitrogen in pyrrole, we get an error.
mol = Chem.MolFromSmiles("c1cccn1")
# If we include the attached hydrogen, the valid SMILES is processed.
mol = Chem.MolFromSmiles("c1ccc[nH]1")
mol
# We also need to include explicit hydrogens when we have an aromatic carbon with a charge. For instance, we must include an explicit hydrogen on the anionic carbon in cyclopentadienyl anion.
mol = Chem.MolFromSmiles("c1ccc[cH-]1")
mol
# ### Exercise
#
# Write the SMILES and display a table of chemical structures for the following.
#
# 1. furan
# 2. benzimidazole
# 3. toluene
# 4. indole
# 5. tropyllium cation
# 6. napthalene
# +
#Write your code here
# -
# ### Stereochemistry
#
# In SMILES, tetrahedral stereochemistry is indicated by "@" indicating anticlockwise and "@@" indicating clockwise. Quoting from the Daylight SMILES tutorial.
# <pre>
# Looking FROM the 1st neighbor listed in the SMILES TO the chiral atom, the other three neighbors appear anticlockwise or clockwise in the order listed.
# </pre>
# Also, note that a chiral atom will be listed in square brackets and any attached hydrogens must be explicitly specified.
Chem.MolFromSmiles("[C@H](C)(Cl)(Br)")
Chem.MolFromSmiles("[C@@H](C)(Cl)(Br)")
# E,Z isomerism in double bonds is shown with "/" and "\\". In the cells below, we can see the visual mnemonic with trans-2-butene as C/C=C/C and cis-2-butene as C/C=C\C.
Chem.MolFromSmiles("C/C=C/C")
Chem.MolFromSmiles("C/C=C\C")
# ### Canonical SMILES
# There are many ways to write a SMILES for the same molecule. For instance, in the cell below, we show five different ways to write the SMILES for pentane.
pentane_smiles_list = ["CCCCC","C(CCCC)","C-C-C-C-C","[CH3][CH2][CH2][CH2][CH3]","CC(CCC)"]
# If the line below doesn't make sense to you, take a look at list comprehensions in Python
pentane_mol_list = [Chem.MolFromSmiles(x) for x in pentane_smiles_list]
Chem.Draw.MolsToGridImage(pentane_mol_list)
# It's convenient to be able to write many SMILES for the same molecule, but this can also be a problem. Let's say we're trying to keep track of the number of unique molecules in a set. If the same molecule can have several SMILES, it can be difficult to keep track of which molecules are the same. Fortunately, [the RDKit](https://www.rdkit.org/docs/source/rdkit.Chem.rdmolfiles.html) and many other Cheminformatics toolkits have methods for creating what is called a **canonical** representation. These methods should always create the same SMILES for a molecule. Here's an example where we create a canonical SMILES for the pentane_smiles_list above. Note that all the input SMILES are different but all the output SMILES are the same. It's important to know that all Cheminformatics toolkits do not generate the same canonical SMILES.
pentane_smiles_list = ["CCCCC","C(CCCC)","C-C-C-C-C","[CH3][CH2][CH2][CH2][CH3]","CC(CCC)"]
# If the line below doesn't make sense to you, take a look at list comprehensions in Python
pentane_mol_list = [Chem.MolFromSmiles(x) for x in pentane_smiles_list]
for mol in pentane_mol_list:
pentane_smiles = Chem.MolToSmiles(mol)
print(pentane_smiles)
# Canonical SMILES typically don't resolve tautomers to the same structure. Specialized methods that are beyond the scope of this discussion are often required to create the same SMILES from Tautomers. In the example below we look at the SMILES for the keto and enol tautomers of pyridone.
tautomer_smiles_list = ["C1C=CC(=O)NC=1","C1=CC=C(O)N=C1"]
tautomer_mol_list = [Chem.MolFromSmiles(x) for x in tautomer_smiles_list]
Chem.Draw.MolsToGridImage(tautomer_mol_list)
# Now let's generate canonical SMILES for each of these species. In looking at the SMILES below, please notice two things.
# - The SMILES for the ring atoms have been converted to lowercase, indicating that this is an aromatic system.
# - The two tautomers still have different SMILES
for mol in tautomer_mol_list:
print(Chem.MolToSmiles(mol))
# ### Exercise
#
# Complete the code below to determine the number of unqiue molecule represented by **smiles_list**
buff = """C(=N)(N)N.Cl
Cn1c(=O)c2[nH]cnc2n(C)c1=O.Cn1c(=O)c2[nH]cnc2n(C)c1=O.NCCN
Nc1ccc(S(=O)(=O)Nc2ccccn2)cc1
Cl.N=C(N)N
CCC(C)C1(C(=O)NC(=O)[N-]C1=O)CC.[Na+]
C[C@]12CCC(=O)C=C1CC[C@@H]3[C@@H]2CC[C@]4([C@H]3CC[C@]4(C)O)C
CCC(C)C1(CC)C(=O)[N-]C(=O)NC1=O.[Na+]
Cn1c2c(c(=O)n(c1=O)C)[nH]cn2.Cn1c2c(c(=O)n(c1=O)C)[nH]cn2.C(CN)N
C[C@]1(O)CC[C@H]2[C@@H]3CCC4=CC(=O)CC[C@]4(C)[C@H]3CC[C@@]21C
c1ccnc(c1)NS(=O)(=O)c2ccc(cc2)N"""
smiles_list = buff.split("\n")
| fundamentals/SMILES_tutorial.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Artificial Intelligence, Machine Learning and Deep Learning - Are they same?
#
# 
#
# The most common confusion that beginners have when they start to learn Machine Learning is- “Are Artificial Intelligence(AI), Machine Learning(ML) and Deep Learning(DL) the same thing?”
#
# And that’s a question which I personally hear a lot from so many people. So, I thought why not make a blog post to explain it?
#
# ## Artificial Intelligence (AI)
#
# Artificial intelligence refers to the capability of a machines to imitate intelligent human behaviour. In other words, any device that is able to perform tasks that normally require human intelligence such as making decisions or giving (meaningful) responses in a conversation can be said to have some kind of artificial intelligence.
#
# Particularly, this is a broad area of computer science, which deals with the simulation of intelligent behavior in computers.
#
# Virtual assistants are very good examples of implementation of AI in real life. With the power of AI, virtual assistants such as Google Assistant or Siri (by Apple) can nowadays imitate human behaviours with an exceptional perfection.
#
#
# AI can be implemented through huge collection of explicitly programmed if-else statements, or a complex statistical model with advanced parameters. But, the end goal of AI remains same- “Simulation of human intelligence in machines”.
#
# The practice of explicitly programming if-else statements for implementing AI models is known as “Good Old-Fashioned Artificial Intelligence” (GOFAI), which was the dominant paradigm of AI research from the mid-1950s until the late 1980s.
#
# Some AI models might be termed as “weak AI” models since they only focus on getting the work done without figuring out how human reasoning works. One such example could be- bots created using Artificial Intelligence Markup Language (AIML). These bots try to give responses by simply matching some basic patterns against the input using an XML-like structure of conditions without trying to “understand” the conversation.
#
# On the other hand, AI systems which pretend to think exactly like humans do, are termed as “strong AI” models. We have already seen an example of this while discussing about Google Assistant.
#
# ## Machine Learning (ML)
#
# While AI is a broad area of cognitive computing, Machine Learning is a particular subset of AI which mainly deals with the study of how machines learn.
#
# Doesn’t make sense? Let me clear that for you.
#
# Early AI researchers found some problems way much harder to solve with the early available techniques of AI (GOFAI). Hard coded algorithms were simply not useful for solving those problems.
#
# For example, how would you make a computer to distinguish between pictures of cats and dogs? Will you write hard-coded algorithms for identifying each picture individually? What happens if it sees a picture that it has never seen before? What happens if it sees a partial picture? Will it still be able to recognize it?
#
# 
#
# It turns out that for solving these kind problems, instead of just imitating human behaviour, researchers started to find ways to imitate how humans learn. And that’s what gave rise to the field of Machine Learning. It is the field of study that gives computers the ability to learn without being explicitly programmed.
#
# More specifically, in case of machine learning problems, you need to give the computer some data (lots of cat and dog images in this case), tell it to “learn” from that data, and then predict the correct output for new, unseen data.
#
# Practically, this is nothing but a mathematical function (model) which maps inputs to its correct outputs. The task of Machine Learning engineers is to figure out and implement this “mathematical function” in such a way that programmers won’t need to write “hard-coded” algorithms for solving each individual problem.
#
# ## Deep Learning (DL)
#
# Before we discuss anything about this, I want you to have a look at the following image:
#
# 
#
# Now look at this image:
#
# 
#
# What did you see? 4?
#
# Easy, right? It took you just fraction of a second to figure out that both of these images represent the number four.
#
# Even though these images don’t look exactly alike (in terms of pixel values), you still were able to recognize these images and label them as the number four. Even poor resolution was not a hurdle for you. How amazing! Let’s take a moment to appreciate our brain for doing this.
#
# But, can you explain the process, exactly how you were able to identify these images?
#
# This is one thing that you did intuitively. Using your natural instincts, you “felt” that these images are of the number four, without following any particular process for identifying them. And it goes without saying that you did a great job at doing it.
#
# So, can we implement a similar mechanism in computers to make them perform tasks that we do intuitively?
#
# Yeah, you guessed it right! This paradigm of machine learning is called Deep Learning (DL).
#
# Deep Learning is a subfield of machine learning which is concerned with algorithms inspired by the structure and function of the brain.
#
# It consists of a particular type of mathematical model that can be thought of as a collection of simple blocks, distributed across multiple layers. Each of these blocks are specialized to perform a particular task and can be adjusted to better predict the final outcome.
#
# 
#
# Human brain consists of billions of neurons interconnected to each other. Each neuron receives a signal, processes the signal, and passes it on to the other neurons. This is how the information is processed in our brain.
#
# These blocks try to simulate the behaviour of neuron cells present in human brain for performing a particular task. Hence, the collection of these blocks can be referred to as Artificial Neural Network (ANN).
#
# Deep Learning is called “deep” because the composition of these blocks are stacked on top of each other forming multiple layers. The deeper you go, the more complex features are extracted.
#
# ## Conclusion
#
# Although terms like artificial intelligence, machine learning and deep learning are used interchangeably but, they are not the same thing. Machine learning is the subset of artificial intelligence and deep learning is a subset of machine learning.
#
# 
#
# Here, the key takeaway is:
#
# - Artificial Intelligence deals with imitating human intelligence.
# - Machine Learning deals with imitating how humans learn.
# - Deep learning deals with imitating how human brain processes data with the help of neurons.
#
#
# Follow our [Medium Publication](https://medium.com/code-heroku) to get regular updates on these kind of posts.
#
# >If this article was helpful to you, check out our [Introduction to Machine Learning](http://www.codeheroku.com/course?course_id=1) Course at [Code Heroku](http://www.codeheroku.com/) for a complete guide to Machine Learning.
#
# <br><br>
# <p align="center"><a href="http://www.codeheroku.com/">
# <img src="http://www.codeheroku.com/static/images/logo5.png"></a>
# </p>
#
# <br>
| Blog Posts/AI, ML and Deep Learning - Are they same?/AI_vs_ML_vs_DL_Difference.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## A New Experimental Dataset for Clustering!
# +
import os
import requests
WALKING_DATASET = (
"https://archive.ics.uci.edu/ml/machine-learning-databases/00286/User%20Identification%20From%20Walking%20Activity.zip",
)
def download_data(path='data', urls=WALKING_DATASET):
if not os.path.exists(path):
os.mkdir(path)
for url in urls:
response = requests.get(url)
name = os.path.basename(url)
with open(os.path.join(path, name), 'wb') as f:
f.write(response.content)
download_data()
# +
import zipfile
z = zipfile.ZipFile(os.path.join('data', 'User%20Identification%20From%20Walking%20Activity.zip'))
z.extractall(os.path.join('data', 'walking'))
# +
import pandas as pd
df = pd.
for root, dirs, files in os.walk(os.path.join('data', 'walking','User Identification From Walking Activity')):
for file in files:
if file.endswith(".csv"):
print(int(os.path.splitext(file)[0]))
# -
| _drafts/walking-data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] colab_type="text" id="MhoQ0WE77laV"
# ##### Copyright 2018 The TensorFlow Authors.
# + cellView="form" colab_type="code" id="_ckMIh7O7s6D" colab={}
#@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# + cellView="form" colab_type="code" id="vasWnqRgy1H4" colab={}
#@title MIT License
#
# Copyright (c) 2017 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# # copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# + [markdown] colab_type="text" id="jYysdyb-CaWM"
# # Classificazione base: Classifica immagini di capi d'abbigliamento
# + [markdown] colab_type="text" id="S5Uhzt6vVIB2"
# <table class="tfo-notebook-buttons" align="left">
# <td>
# <a target="_blank" href="https://www.tensorflow.org/tutorials/keras/classification"><img src="https://www.tensorflow.org/images/tf_logo_32px.png" />Visualizza su TensorFlow.org</a>
# </td>
# <td>
# <a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs/blob/master/site/it/tutorials/keras/classification.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png" />Esegui in Google Colab</a>
# </td>
# <td>
# <a target="_blank" href="https://github.com/tensorflow/docs/blob/master/site/it/tutorials/keras/classification.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png" />Visualizza il sorgente su GitHub</a>
# <td>
# <a href="https://storage.googleapis.com/tensorflow_docs/docs/site/en/tutorials/keras/classification.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png" />Scarica il notebook</a>
# </td>
# </table>
# + [markdown] id="BYzaKBe8YXg0" colab_type="text"
# Note: La nostra comunità di Tensorflow ha tradotto questi documenti. Poichè queste traduzioni sono *best-effort*, non è garantito che rispecchino in maniera precisa e aggiornata la [documentazione ufficiale in inglese](https://www.tensorflow.org/?hl=en).
# Se avete suggerimenti per migliorare questa traduzione, mandate per favore una pull request al repository Github [tensorflow/docs](https://github.com/tensorflow/docs).
# Per proporsi come volontari alla scrittura o alla review delle traduzioni della comunità contattate la
# [mailing list <EMAIL>](https://groups.google.com/a/tensorflow.org/forum/#!forum/docs).
# + [markdown] colab_type="text" id="FbVhjPpzn6BM"
# Questa guida allena un modello di rete neurale a classificare immagini di capi d'abbigliamento, come scarpe da ginnastica e magliette. E' normale che tu non comprenda tutti i dettagli; questa è una panoramica rapidissima di un programma completo TensorFlow con i dettagli spiegati durante il percorso.
#
# Questa guida usa [tf.keras](https://www.tensorflow.org/guide/keras), un'API di alto livello per costruire ed addestrare modelli in TensorFlow.
# + colab_type="code" id="jL3OqFKZ9dFg" colab={}
try:
# # %tensorflow_version only exists in Colab.
# %tensorflow_version 2.x
except Exception:
pass
# + colab_type="code" id="dzLKpmZICaWN" colab={}
from __future__ import absolute_import, division, print_function, unicode_literals
# TensorFlow and tf.keras
import tensorflow as tf
from tensorflow import keras
# Helper libraries
import numpy as np
import matplotlib.pyplot as plt
print(tf.__version__)
# + [markdown] colab_type="text" id="yR0EdgrLCaWR"
# ## Importare il dataset Fashion MNIST
# + [markdown] colab_type="text" id="DLdCchMdCaWQ"
# Questa guida usa il dataset [Fashion MNIST](https://github.com/zalandoresearch/fashion-mnist) che contiene 70,000 immagini in toni di grigio di 10 categorie. Le immagini mostrano singoli articoli di abbigliamento a bassa risoluzione (28 per 28 pixel), come visibile sotto:
#
# <table>
# <tr><td>
# <img src="https://tensorflow.org/images/fashion-mnist-sprite.png"
# alt="Fashion MNIST sprite" width="600">
# </td></tr>
# <tr><td align="center">
# <b>Figura 1.</b> <a href="https://github.com/zalandoresearch/fashion-mnist">Fashion-MNIST samples</a> (by Zalando, MIT License).<br/>
# </td></tr>
# </table>
#
# Fashion MNIST va inteso come l'equivalente del classico dataset [MNIST](http://yann.lecun.com/exdb/mnist/) —spesso usato come "Hello, World" dei programmi di machine learning per la visione computerizzata. Il dataset MNIST contiene immagini di caratteri scritti a mano (0, 1, 2, ecc.) in un formato identico a quello degli articoli di abbigliamento che useremo qui.
#
# Questa guida usa Fashion MNIST per varietà, e perchè costituisce un problema leggermente più impegnativo del tipico MNIST. Entrambe i dataset sono relativamente piccoli e sono usati per verificare che un algoritmo lavori come previsto. Sono un buon punto di partenza per provare e correggere gli errori del codice.
#
# Qui, 60,000 immagini sono utilizzate per addestrare la rete e 10,000 immagini per valutare quanto accuratamente la rete ha imparato a classificare immagini. Potete accedere al Fashion MNIST direttamente da TensorFlow. Importare e caricare i dati Fashion MNIST direttamente da TensorFlow:
# + colab_type="code" id="7MqDQO0KCaWS" colab={}
fashion_mnist = keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
# + [markdown] colab_type="text" id="t9FDsUlxCaWW"
# Il caricamento dei dati restituisce quattro vettori NumPy:
#
# * I vettori `train_images` e `train_labels` sono gli *insiemi di addestramento*—i dati che il modello usa per imparare.
# * Il modello è verificato con l' *insieme di verifica*, i vettori `test_images`, e `test_labels`.
#
# Le immagini sono vettori di 28x28 NumPy, in cui i valori dei pixel vanno da 0 a 255. Le *etichette* sono un vettore di interi, da 0 a 9. Esse corrispondono alle *classi* di capi di abbigliamento rappresentate dalle immagini:
#
# <table>
# <tr>
# <th>Label</th>
# <th>Class</th>
# </tr>
# <tr>
# <td>0</td>
# <td>T-shirt/top</td>
# </tr>
# <tr>
# <td>1</td>
# <td>Trouser</td>
# </tr>
# <tr>
# <td>2</td>
# <td>Pullover</td>
# </tr>
# <tr>
# <td>3</td>
# <td>Dress</td>
# </tr>
# <tr>
# <td>4</td>
# <td>Coat</td>
# </tr>
# <tr>
# <td>5</td>
# <td>Sandal</td>
# </tr>
# <tr>
# <td>6</td>
# <td>Shirt</td>
# </tr>
# <tr>
# <td>7</td>
# <td>Sneaker</td>
# </tr>
# <tr>
# <td>8</td>
# <td>Bag</td>
# </tr>
# <tr>
# <td>9</td>
# <td>Ankle boot</td>
# </tr>
# </table>
#
# Ogni immagine è mappata in una singola etichetta. Dato che i *nomi delle classi* non sono inclusi nel dataset, vengono memorizzati qui, per usarli successivamente durante la visualizzazione delle immagini:
# + colab_type="code" id="IjnLH5S2CaWx" colab={}
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
# + [markdown] colab_type="text" id="Brm0b_KACaWX"
# ## Esplorare i dati
#
# Esploriamo il formato dei dati prima di allenare il modello. Il seguito mostra che ci sono 60,000 immagini nell'insieme di addestramento, con ciascuna immagine rappresentata con 28 x 28 pixel:
# + colab_type="code" id="zW5k_xz1CaWX" colab={}
train_images.shape
# + [markdown] colab_type="text" id="cIAcvQqMCaWf"
# Analogamente, nell'insieme di addestramento, ci sono 60,000 etichette:
# + colab_type="code" id="TRFYHB2mCaWb" colab={}
len(train_labels)
# + [markdown] colab_type="text" id="YSlYxFuRCaWk"
# Ogni etichetta è un intero tra 0 e 9:
# + colab_type="code" id="XKnCTHz4CaWg" colab={}
train_labels
# + [markdown] colab_type="text" id="TMPI88iZpO2T"
# Ci sono 10,000 immagini nel insieme di verifica. Di nuovo, ogni immagine è rappresentata come 28 x 28 pixel:
# + colab_type="code" id="2KFnYlcwCaWl" colab={}
test_images.shape
# + [markdown] colab_type="text" id="rd0A0Iu0CaWq"
# Ed l'insieme di verifica contiene 10,000 etichette di immagini:
# + colab_type="code" id="iJmPr5-ACaWn" colab={}
len(test_labels)
# + [markdown] colab_type="text" id="ES6uQoLKCaWr"
# ## Pre-elaborare i dati
#
# Prima di allenare la rete, i dati devono essere pre-elaborati. Osservando la prima immagine dell'insieme di addestramento, si noterà che i valori dei pixel cadono nell'intervallo da 0 a 255:
# + colab_type="code" id="m4VEw8Ud9Quh" colab={}
plt.figure()
plt.imshow(train_images[0])
plt.colorbar()
plt.grid(False)
plt.show()
# + [markdown] colab_type="text" id="Wz7l27Lz9S1P"
# Questi valori vanno scalati in un intervallo tra 0 e 1 prima di darli in pasto al modello della rete. Per fare ciò, si dividono i valorei per 255. E' importante che l'*insieme di addestramento* ed l'*insieme di verifica* siano pre-elaborati nello stesso modo:
# + colab_type="code" id="bW5WzIPlCaWv" colab={}
train_images = train_images / 255.0
test_images = test_images / 255.0
# + [markdown] colab_type="text" id="Ee638AlnCaWz"
# Per verificare che i dati siano nella forma corretta e che tutto sia pronto per costruire e allenare la rete, visualizziamo le prime 25 immagini del *insieme di addestramento* e visualizziamo il nome della classe sotto a ciascuna immagine.
# + colab_type="code" id="oZTImqg_CaW1" colab={}
plt.figure(figsize=(10,10))
for i in range(25):
plt.subplot(5,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(train_images[i], cmap=plt.cm.binary)
plt.xlabel(class_names[train_labels[i]])
plt.show()
# + [markdown] colab_type="text" id="59veuiEZCaW4"
# ## Costruire il modello
#
# La costruzione della rete neurale richiede di configurare i livelli del modello, quindi la compilazione del modello.
# + [markdown] colab_type="text" id="Gxg1XGm0eOBy"
# ### Inizializzare i livelli
#
# L'elemento costruttivo di base di una rete neurale è il *livello*. I livelli estraggono rappresentazioni dai dati con cui vengono alimentati. Sperabilmente, queste rappresentazioni sono significative per il problema che si sta trattando.
#
# La maggior parte del deep learning consiste nel collegare tra loro livelli semplici. La maggior parte dei livelli, come `tf.keras.layers.Dense`, hanno parametri che sono imparati durante l'allenamento.
# + colab_type="code" id="9ODch-OFCaW4" colab={}
model = keras.Sequential([
keras.layers.Flatten(input_shape=(28, 28)),
keras.layers.Dense(128, activation='relu'),
keras.layers.Dense(10, activation='softmax')
])
# + [markdown] colab_type="text" id="gut8A_7rCaW6"
# Il primo livello in questa rete, `tf.keras.layers.Flatten`, trasforma il formato delle immagini da un array bi-dimensionale (di 28 per 28 pixel) in un array uni-dimensionale (di 28 * 28 = 784 pixel). Pensate a questi livelli come righe non impilate di pixel dell'immagine. Questo livello non ha parametri da imparare; esso si limita a rifirmattare i dati.
#
# Dopo la normalizzazione dei pixel, la rete consiste di due livelli `tf.keras.layers.Dense`. Questi sono livelli neurali strettamente connessi, o completamente connessi. Il primo livello `Denso` ha 128 nodi (o neuroni). Il secondo (ed ultimo) livello è un livello *softmax* a 10 nodi che restituisce un vettore di 10 valori di probabilità la cui somma è 1. Ogni nodo contiene un valore che indica la probabilità che l'immagine corrente appartenga ad una delle 10 classi.
#
# ### Compilare il modello
#
# Prima che il modello sia pronto per l'apprendimento, è necessaria qualche impostazione in più. Queste sono aggiunte durante i passi di *compilazione* del modello:
#
# * *Funzione perdita* —Misura quanto è accurato il modello durante l'apprendimento. La volontà è di minimizzare questa funzione per "dirigere" il modello nella giusta direzione.
# * *Ottimizzatore* —Indica com'è aggiornato il modello sulla base dei dati che tratta e della sua funzione perdita.
# * *Metriche* —Usate per monitorare i passi di addestramento e verifica. L'esempio seguente usa come *accuratezza*, la frazione delle immagini che sono classificate correttamente.
# + colab_type="code" id="Lhan11blCaW7" colab={}
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
# + [markdown] colab_type="text" id="qKF6uW-BCaW-"
# ## Addestrare il modello
#
# L'addestramento del modello di rete neurale richiede i seguenti passi:
#
# 1. Alimentare il modello con i dati di addestramento. In questo esempio, i dati di addestramento sono nei vettori `train_images` e `train_labels`.
# 2. Il modello impara ad associare immagini ed etichette.
# 3. Chiedere al modello di fare previsioni su un insieme di prova—in questo esempio, il vettore `test_images`.
# 4. Verificare che le previsioni corrispondano alle etichette del vettore `test_labels`.
# Per iniziare l'addestramento, chiamare il metodo `model.fit`—chiamato così perchè "allena" il modello sui dati di addestramento:
# + colab_type="code" id="xvwvpA64CaW_" colab={}
model.fit(train_images, train_labels, epochs=10)
# + [markdown] colab_type="text" id="W3ZVOhugCaXA"
# Mentre il modello si allena, vengono visualizzate le metriche di perdita e accuratezza. Questo modello raggiunge un'accuratezza di circa 0.88 (o 88%) sui dati di addestramento.
# + [markdown] colab_type="text" id="oEw4bZgGCaXB"
# ## Valutare l'accuratezza
#
# Successivamente, valutare come si comporta il modello sul dataset di test:
# + colab_type="code" id="VflXLEeECaXC" colab={}
test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)
print('\nTest accuracy:', test_acc)
# + [markdown] colab_type="text" id="yWfgsmVXCaXG"
# Per scoprire che l'accuratezza sul datase di test è leggermente inferiore rispetto a quella sul dataset di addestramento. Questa differenza tra l'accuratezza in addestramento e l'accuratezza in test rappresenta l' *overfitting*. L'overfitting è quando un modello di machine learning ha prestazioni peggiori su input nuovi, mai visti prima, che sui dati di addestramento.
# + [markdown] colab_type="text" id="xsoS7CPDCaXH"
# ## Fare previsioni
#
# Una volta addestrato il modello, puoi usarlo per fare previsioni su altre immagini.
# + colab_type="code" id="Gl91RPhdCaXI" colab={}
predictions = model.predict(test_images)
# + [markdown] colab_type="text" id="x9Kk1voUCaXJ"
# Qui, il modello ha previsto l'etichetta per ciascuna immagine nell'insieme di test. Diamo un'ochiata alla prima previsione:
# + colab_type="code" id="3DmJEUinCaXK" colab={}
predictions[0]
# + [markdown] colab_type="text" id="-hw1hgeSCaXN"
# Una prevsione è un vettore di 10 numeri. Essi rappresentano la "confidenza" del modello che l'immagine corrisponda a ciascuno dei 10 diversi articoli di abbigliamento. E si può vedere quale etichetta ha il valore più alto di confidenza:
# + colab_type="code" id="qsqenuPnCaXO" colab={}
np.argmax(predictions[0])
# + [markdown] colab_type="text" id="E51yS7iCCaXO"
# Così, il modello è più condidente che l'immagine sia uno stivaletto, o `class_names[9]`. L'esame dell'etichetta di test ostra che questa classificazione è corretta:
# + colab_type="code" id="Sd7Pgsu6CaXP" colab={}
test_labels[0]
# + [markdown] colab_type="text" id="ygh2yYC972ne"
# Mettendo ciò in forma grafica, si può osservare l'insieme completo delle previsioni sulle 10 classi.
# + colab_type="code" id="DvYmmrpIy6Y1" colab={}
def plot_image(i, predictions_array, true_label, img):
predictions_array, true_label, img = predictions_array, true_label[i], img[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
plt.imshow(img, cmap=plt.cm.binary)
predicted_label = np.argmax(predictions_array)
if predicted_label == true_label:
color = 'blue'
else:
color = 'red'
plt.xlabel("{} {:2.0f}% ({})".format(class_names[predicted_label],
100*np.max(predictions_array),
class_names[true_label]),
color=color)
def plot_value_array(i, predictions_array, true_label):
predictions_array, true_label = predictions_array, true_label[i]
plt.grid(False)
plt.xticks(range(10))
plt.yticks([])
thisplot = plt.bar(range(10), predictions_array, color="#777777")
plt.ylim([0, 1])
predicted_label = np.argmax(predictions_array)
thisplot[predicted_label].set_color('red')
thisplot[true_label].set_color('blue')
# + [markdown] colab_type="text" id="d4Ov9OFDMmOD"
# Osservimo la 0esima immagine, le previsioni, e il vettore delle previsioni. Le etichette previste correttamente sono in blu e le previsioni non corrette sono in rosso. Il numero da la percentuale (su 100) dell'etichetta prevista.
# + colab_type="code" id="HV5jw-5HwSmO" colab={}
i = 0
plt.figure(figsize=(6,3))
plt.subplot(1,2,1)
plot_image(i, predictions[i], test_labels, test_images)
plt.subplot(1,2,2)
plot_value_array(i, predictions[i], test_labels)
plt.show()
# + colab_type="code" id="Ko-uzOufSCSe" colab={}
i = 12
plt.figure(figsize=(6,3))
plt.subplot(1,2,1)
plot_image(i, predictions[i], test_labels, test_images)
plt.subplot(1,2,2)
plot_value_array(i, predictions[i], test_labels)
plt.show()
# + [markdown] colab_type="text" id="kgdvGD52CaXR"
# Rappresentiamo diverse immagini con le rispettive previsioni. Notiamo che il modello può sbagliare anche quando è molto confidente.
# + colab_type="code" id="hQlnbqaw2Qu_" colab={}
# Plot the first X test images, their predicted labels, and the true labels.
# Color correct predictions in blue and incorrect predictions in red.
num_rows = 5
num_cols = 3
num_images = num_rows*num_cols
plt.figure(figsize=(2*2*num_cols, 2*num_rows))
for i in range(num_images):
plt.subplot(num_rows, 2*num_cols, 2*i+1)
plot_image(i, predictions[i], test_labels, test_images)
plt.subplot(num_rows, 2*num_cols, 2*i+2)
plot_value_array(i, predictions[i], test_labels)
plt.tight_layout()
plt.show()
# + [markdown] colab_type="text" id="R32zteKHCaXT"
# Infine, usiamo il modello addestrato per fare una previsione su una singola immagine.
# + colab_type="code" id="yRJ7JU7JCaXT" colab={}
# Grab an image from the test dataset.
img = test_images[1]
print(img.shape)
# + [markdown] colab_type="text" id="vz3bVp21CaXV"
# I modelli `tf.keras` sono ottimizzati per fare previsioni su *batch*, o collezioni, di esempi in una volta. Di conseguenza, anche se state usando una singola immagine, è necessario aggiungerla ad una lista:
# + colab_type="code" id="lDFh5yF_CaXW" colab={}
# Add the image to a batch where it's the only member.
img = (np.expand_dims(img,0))
print(img.shape)
# + [markdown] colab_type="text" id="EQ5wLTkcCaXY"
# Ed ora andiamo a prevedere l'etichetta corretta per questa immagine:
# + colab_type="code" id="o_rzNSdrCaXY" colab={}
predictions_single = model.predict(img)
print(predictions_single)
# + colab_type="code" id="6Ai-cpLjO-3A" colab={}
plot_value_array(1, predictions_single[0], test_labels)
_ = plt.xticks(range(10), class_names, rotation=45)
# + [markdown] colab_type="text" id="cU1Y2OAMCaXb"
# `model.predict` restituisce una lista di liste—una lista per ogni immagine nel batch di dati. Prendiamo la previsione per la nostra (unica) immagine nel batch:
# + colab_type="code" id="2tRmdq_8CaXb" colab={}
np.argmax(predictions_single[0])
# + [markdown] colab_type="text" id="YFc2HbEVCaXd"
# E il modello prevede l'etichetta secondo le aspettative.
| site/it/tutorials/keras/classification.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Chemistry Overview
#
# A wide range of tools exist that support workflows in Chemistry, from looking up the structure and properties of a wide variety of elements and compounds, to visualising their structure using interactive HTML widgets.
#
# Using automation to help us generate the the lookup of compound structures from their names allows us to create narratives where correctness is guaranteed when moving from one consideration, such as the formula for a particular compound given its common name, to the visualisation of that structure, or to a consideration of its physical, structural or chemical properties.
#
# :::{admonition} Hiding Code
# :class: tip
# The following example includes the code inline to show how the automation proceeds. In a finished would the code could be hidden but revealable, for example in collapsed code cells, or have all mention of the code removed from the final output document.
# :::
# ## Example - Describing a Compound
#
# As an example of providing a generated description of a compound simply from its name, let's consider *ethanol*. (We could just as easily have picked another compound, such as a *methane* or *nitric acid*.
#
# Let's define a reference to the compound:
#Provide the common name of a compound
compound_name = "ethanol"
# At the current time, whilst R based `bookdown` workflows *do* support inline embedding of code variables in markdown text, interactive Jupyter notebook markdown cells don't support such a feature (although there is ongoing work to provide this sort of support) other than by extension.
#
# However, it is possible to embed variables into markdown text in Jupyter Book workflows using [`jupyter-glue`](https://jupyterbook.org/content/executable/output-insert.html).
# + tags=["hide-input"]
from myst_nb import glue
# Create a reference to a value we can use in our markdown text
glue("compound", compound_name, display=False)
# -
# Having declared the compound we want to investigate in code, we can refer to it directly inline in our text using a ``{glue:}`compound` `` reference: {glue:text}`compound`.
#
# We can also automatically look-up various properties associated with the compound, such as its chemical formula or a universal compound identifier.
# +
import pubchempy as pcp
_compound = pcp.get_compounds(compound_name, 'name')[0]
# -
# The formula can be rendered in an appropriate typographical form from a LaTeX representation of the formula.
# + tags=["hide-input"]
from IPython.display import Latex
Latex('$\ce{'+_compound.molecular_formula+'}$')
# -
# $$\require{mhchem}$$
#
# It is also possible to create `glue` references to things like the compound LaTeX equation.
#
# Using the `mhchem` *MathJax* package, we easily add support for inline rendering of chemical equations, just as we can rendered mathematical equations:
# + tags=["hide-input"]
_compound_latex = '$\ce{'+_compound.molecular_formula+'}$'
# Save a reference to the Latex equivalent of the compound formula
glue("compoundLatex", Latex(_compound_latex), display=False)
# -
# This means that we can render the chemical equation for our chosen compound ({glue:text}`compound`) in a markdown content block:
#
# ```{glue:math} compoundLatex
# :label: eq-sym
# ```
#
# We can also render a free standing HTML+JS 3D interactive version of the molecule into the page from the previously retrieved universal compound ID:
# + tags=["hide-input"]
import py3Dmol
# Lookup a molecule using its CID (PubChem Compound Identification) code
p=py3Dmol.view(query = f'cid:{_compound.cid}')
# Set the render style
p.setStyle({'stick': {'radius': .1}, 'sphere': {'scale': 0.25}})
p.show()
# -
# If we were to change the name of the compound in the first code cell in our original source document and reflow the notebook, *everything* would be updated and there would be no mismatches between the compound name, its formula, or its visualised structure.
#
# This ability to write *general* templated content and then generate final versions of it that reference *particular* compounds allows easy updating of materials if you want to change the focus of discussion. It might be particularly appropriate when creating worksheets or reference sheets associated with a particular compound, for example. It also provides an easy way to generate templated assessment questions and marking guides that can be "refreshed" simply by aupdating a question to apply to a compound not previously the focus of the same assessment question.
#
# :::{admonition} Code visibility in rendered documents
# :class: tip
# Recall that there is no requirement for the original generating code to be visible, or even present, in the final rendered document.
# :::
| src/chemistry/overview.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Purpose of this to carry out the following:
# explore features we already have and create new ones
# once that is done, carry out an appropriate train/test/validate split - taking temporal validation into account
# pre processing (impute missingness separately for train/test/validate, normalize, etc)
# The datasets used are the datasets created by build_dataset - they're
# too big for github, so they have been uploaded to google drive. They should
# be downloaded into your local data folder to be imported in this notebook
# The end goal of this notebook is to have our final datasets ready for analysis
# this code should then be moved into our .py files so the notebook can eventually be deleted
# +
import sqlite3
from sqlite3 import Error
import pandas as pd
import config
import os.path
from os import path
#from create_db import create_connection, create_table, clean_column_names
#from populate_db import extract_data, insert_records
#import query_db as qd
import importlib
import datetime
import re
import numpy as np
# -
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
# +
# Import datasets created in build_dataset
#dataset_main = pd.read_csv('../data/dataset_main.csv')
#dataset_active_sentences = pd.read_csv('../data/active_sentences.csv')
#dataset_main_active_sentences = pd.read_csv('../data/dataset_main_active.csv')
# Just use dataset_main_active
# -
dataset_main_active.head()
dataset_main_active.isnull().sum()
# +
# Decisions to be made:
# Which variables to keep?
# Which features need to be constructed from the available variables?
# How do we want to deal with missings?
# e.g. impute? choose majority? some notes here: https://towardsdatascience.com/working-with-missing-data-in-machine-learning-9c0a430df4ce
# A first pass, following this group: https://bucklerd.github.io/MUSA801_Recidivism_Markdown/#
# Race - keep
# Sex - keep
# Age at each sentence - need to calculate using birth date and effective sentence begin date - more indifferent
# Age category each sentence - keep, look at literature to decide how to categorize
# Ethnicity - keep
# Citizenship - keep, but just look at variation, don't include in model
# Age at first incarceration? Could compute as age at first sentence - keep, similar information to age category
# Most serious current offense (since there are so many categories, do we want to map on our coded 5 point scale
# to this..?)
# - different versions of this (most serious offense, and turn everything else to other), one-hot encoding
# - with 5 point scale
# Current crime violent or not violent (not sure where they are gtting this from, or if its self constructed)
# - our scale 4-5 to 1, our scale 1-3 is 0 (self-constructed)
# - feel iffy about this, so also try leaving out
# Total sentence count - can be computed - lots of bias baked in? - would be at the individual level?
# - keep, and see how it affects the model (prior history context)
# Juvenile Offense Flag - would need to construct using age at first incarceration - keep
# total count of felony and misdemeanor charges - i think these can be calculated from sentence component
# would be at the individual level not sentence level?
# - keep, for the sentence that got a recidivate flag, how many flags in either category
# custody_class_code - i think this CONTROL_STATUS
# individual level not sentence level - don't keep (probably adds more bias than value...)
# special characteristics - i didn't really know how to make sense of this, so i didn't include it for now...
# - - don't keep (probably adds more bias than value...)
# total disciplinary infractions - would be at the individual level not the sentence level
# although this comes from a file that has infraction by date so in theory
# we could calculate at the sentence level with some SQL maneouvering
# how many infractions between each start/end date of the sentence, but this would be a bit more complicated
# Type of last inmate movement - we have this, but i'm not sure how much value it adds. also its at the
# offender level, not sentence level - don't keep
# Thoughts on missingness:
# Race, Sex, Birth date - basically not missing, can drop or impute couple that are
# Ethnicity and Citizenship - Majority impute?
# Most serious current offense - already working on trying to make this less missing
# Disciplinary infractions - the way this is constructed is by merging on from a file that contains
# infractions. So i think it is safe to assume that if this variable is missing for an individual,
# they did not commit any infractions. replace with 0?
#
# -
# ### Decisions:
#
# *First model is most parsimonious
# * Bring in median household income and unemployment data so the predictor mechanism isn't just on individual (maybe NC, annual)
#
# ### Splitting the work:
# * Damini: (pulling via SQL)
# - Disciplinary infractions
# - Most serious current offense
# - Current crime violent
# - Total count of felony and misdemeanor charges
# - Total sentence count
#
# * Charmaine:
# - Median HH income
# - Unemployment
# - Age at each sentence - need to calculate using birth date and effective sentence begin date - more indifferent
# - Age category each sentence - keep, look at literature to decide how to categorize
# - Age at first incarceration? Could compute as age at first sentence - keep, similar information to age category
# - Juvenile Offense Flag
# ### Charmaine's WIP code:
# Check how many observations you have and make sure you don't drop any while creating new features
dataset_main_active.shape
# +
# Create AGE_AT_SENTENCE
dataset_main_active['EARLIEST_SENTENCE_EFFECTIVE_DT'] = pd.to_datetime(dataset_main_active['EARLIEST_SENTENCE_EFFECTIVE_DT'], yearfirst=True)
dataset_main_active.loc[dataset_main_active['BIRTH_DATE'] == '0001-01-01', 'BIRTH_DATE'] = np.NaN
dataset_main_active['BIRTH_DATE'] = pd.to_datetime(dataset_main_active['BIRTH_DATE'], format='%Y/%m/%d')
dataset_main_active['age_at_sentence'] = (dataset_main_active['EARLIEST_SENTENCE_EFFECTIVE_DT'] - dataset_main_active['BIRTH_DATE']).astype('<m8[Y]')
dataset_main_active['age_at_sentence'].describe()
# -
# Check number of misisng
dataset_main_active['age_at_sentence'].isnull().sum()
# +
# Check observations where age is negative
# dataset_main_active.loc[dataset_main_active['age_at_sentence'] < 0, ['EARLIEST_SENTENCE_EFFECTIVE_DT', 'BIRTH_DATE']]
# Convert to NaN if less than 10
dataset_main_active.loc[dataset_main_active['age_at_sentence'] < 10, ['age_at_sentence']] = np.NaN
# Check number of missing
dataset_main_active['age_at_sentence'].isnull().sum()
# -
# Create age categories
# Resources: https://www.ussc.gov/research/research-reports/effects-aging-recidivism-among-federal-offenders
dataset_main_active['age_cat'] = pd.cut(dataset_main_active['age_at_sentence'],
bins=[0,17,21,24,29,34,39,44,49,54,59,64,90],
labels=['Under 18', '18-21','22-24','25-29','30-34','35-39','40-44','45-49',
'50-54','55-59','60-64','65 and older',])
dataset_main_active.groupby(['age_cat']).size()
dataset_main_active['age_cat'].describe()
# Compute age at first incarceration
first_incarceration = pd.DataFrame(dataset_main_active.groupby(['ID'])['EARLIEST_SENTENCE_EFFECTIVE_DT'].min().reset_index(name='first_incarceration_date'))
dataset_main_active = dataset_main_active.merge(first_incarceration, on='ID')
dataset_main_active[['ID','COMMITMENT_PREFIX','EARLIEST_SENTENCE_EFFECTIVE_DT','first_incarceration_date']].head()
# Flag for juvenile offense
dataset_main_active['age_first_offense'] = (dataset_main_active['first_incarceration_date'] - dataset_main_active['BIRTH_DATE']).astype('<m8[Y]')
dataset_main_active['age_first_offense'].describe()
# Check observations where age is negative
dataset_main_active.loc[dataset_main_active['age_first_offense'] < 0, ['EARLIEST_SENTENCE_EFFECTIVE_DT', 'BIRTH_DATE']]
# +
#dataset_main[dataset_main['age_first_offense'] < 10].count()
# +
import matplotlib.pyplot
dataset_main_active.hist(column=['age_first_offense'])
# -
dataset_main_active[dataset_main_active['age_first_offense'] > 0].hist(column=['age_first_offense'])
# +
# Convert to NaN if less than 10
dataset_main_active.loc[dataset_main_active['age_first_offense'] < 10, ['age_first_offense']] = np.NaN
# Check number of misisng
dataset_main_active['age_first_offense'].isnull().sum()
# -
dataset_main_active['juv_first_offense'] = (dataset_main_active['age_first_offense'] < 18)
dataset_main_active.sort_values('age_first_offense')[['BIRTH_DATE','first_incarceration_date','age_first_offense', 'juv_first_offense']].head(10)
#dataset_main_active.loc[dataset_main_active['age_first_offense'] < 10, ['juv_first_offense']] = np.NaN
dataset_main_active['age_first_offense'].isnull().sum()
# ### TO DISCUSS
# A lot of these dates don't make sense. How can a toddler be sentenced?
#
# * Replace with NaN if below 10 (talked with Damini about this)
# * Maybe impute to mean/median eventually.
# * We could trim the data to start at 1976
dataset_main_active[['BIRTH_DATE','first_incarceration_date', 'age_at_sentence', 'age_cat','age_first_offense', 'juv_first_offense']].sample(10)
# ### Pull in unemployment data
# * Source: BLS LAUS
# * Link: https://beta.bls.gov/dataViewer/view/timeseries/LASST370000000000003
dataset_main_active['EARLIEST_SENTENCE_EFFECTIVE_DT'].describe()
dataset_main_active.groupby([dataset_main_active['EARLIEST_SENTENCE_EFFECTIVE_DT'].dt.year]).size().plot(kind="line")
# The earliest data BLS has only goes to 1976...
# ### TO DISCUSS
# Should we restrict our data to 1976? We would end up dropping 5% of our data.
#
# Or 1984? (see median HH income data limitations below...)
#
# ### DECIDED:
# Trim data to start in 1976
# Import downloaded CSV
unemployment = pd.read_csv('../data/unemployment_nc.csv')
unemployment.head()
unemployment['month'] = unemployment['Period'].str[1:]
unemployment['Year'] = unemployment['Year'].astype(str)
unemployment['date_to_merge'] = unemployment['Year'].str.cat(unemployment['month'], sep ="-")
unemployment['date_to_merge'].head()
# +
# Create a str column to merge on
dataset_main_active['date_to_merge'] = dataset_main_active['EARLIEST_SENTENCE_EFFECTIVE_DT'].dt.strftime('%Y-%m')
# Rename variables
unemployment = unemployment.rename(columns={"Value": "unemp_rate"})
unemployment_limited = unemployment[['date_to_merge','unemp_rate']]
# Merge with unemployment data
dataset_main_active = dataset_main_active.merge(unemployment_limited, on='date_to_merge', how='left')
check_cols = ['EARLIEST_SENTENCE_EFFECTIVE_DT','date_to_merge','unemp_rate']
dataset_main_active[check_cols].sample(10)
# -
# Check how many are missing
dataset_main_active['unemp_rate'].isnull().sum() / dataset_main_active.shape[0]
# ### Pull in median household income
# * Source: Table H-8
# * Links:
# * https://www.census.gov/data/tables/time-series/demo/income-poverty/historical-income-households.html
# * https://fred.stlouisfed.org/series/MEHOINUSA672N
# * Note: Only has 1984-2018?
#
# ### DECIDED: DO NOT USE
# +
#hh_inc = pd.read_excel('../data/h08.xls', sheet_name='edited', usecols=['Year','Median HH Income'], nrows=38)
# +
#hh_inc
# -
# #### Source: https://www.census.gov/topics/income-poverty/income/guidance/cps-historic-footnotes.html
#
# 36. Beginning with 2009 income data, the Census Bureau expanded the upper income interval used to calculate medians and Gini indexes to \\$250,000 or more. Medians falling in the upper open-ended interval are plugged with "\\$250,000." Before 2009, the upper open-ended interval was \\$100,000 and a plug of "\\$100,000" was used.
#
# 37. Implementation of Census 2010-based population controls.
#
# 38. The 2014 CPS ASEC included redesigned questions for income and health insurance coverage. All of the approximately 98,000 addresses were eligible to receive the redesigned set of health insurance coverage questions. The redesigned income questions were implemented to a subsample of the 98,000 addresses using a probability split panel design. Approximately 68,000 addresses were eligible to receive a set of income questions similar to those used in the 2013 CPS ASEC and the remaining 30,000 addresses were eligible to receive the redesigned income questions. The source of these 2013 estimates is the portion of the CPS ASEC sample which received the income questions consistent with the 2013 CPS ASEC, approximately 68,000 addresses.
#
# 39. The source of these 2013 estimates is the portion of the CPS ASEC sample which received the redesigned income questions, approximately 30,000 addresses.
#
# 40. Implementation of an updated CPS ASEC processing system.
#
# We have duplicates of 2013 and 2017, so footnotes 38, 39, and 40 are the most relevant.
#
# #### Decision: Which duplicates do we use?
# - Should we drop the (40) version of 2017? And use the updated version?
# - Should we drop the (39) version of 2013? To be consistent with the decision above to use the updated system.
# +
# Drop duplicate rows and convert to string
#hh_inc_edit = hh_inc.copy()
#hh_inc_edit['Year'] = hh_inc_edit['Year'].astype(str)
#hh_inc_edit = hh_inc_edit.drop(axis=0, index=[2,6]) # Drop (39) and (40) versions of 2013 and 2017
# Slice string to only keep numbers
#hh_inc_edit['Year'] = hh_inc_edit['Year'].str[:4]
#hh_inc_edit.head()
# +
# Create a str column to merge on
#dataset_main['year_to_merge'] = dataset_main['EARLIEST_SENTENCE_EFFECTIVE_DT'].dt.strftime('%Y')
# Rename variable
#hh_inc_edit = hh_inc_edit.rename(columns={"Year": "year_to_merge"})
# Merge with dataset_main
#dataset_main = dataset_main.merge(hh_inc_edit, on='year_to_merge', how='left')
#check_cols = ['EARLIEST_SENTENCE_EFFECTIVE_DT','year_to_merge','Median HH Income']
#dataset_main[check_cols].sample(10)
# +
# Drop merging variables
#dataset_main.drop(axis=1, columns=['year_to_merge', 'date_to_merge'], inplace=True)
#dataset_main.head()
# +
# Keep features to merge on
#features_to_add = ['ID', 'COMMITMENT_PREFIX', 'age_at_sentence', 'age_cat', 'juv_first_offense','unemp_rate']
#age_unemp_features = dataset_main[features_to_add]
# +
#age_unemp_features.head()
#age_unemp_features.shape
# -
# ### Pipeline Steps
#
# 1. **Read Data.**
# Load the data. Your function for reading in data can be as simple as calling pd.read_csv. If this step is more complicated (e.g. in your projects), you will want to write more detailed functions.
#
# 2. **Explore Data.**
# Automate common exploratory tasks. This can include generating distributions of variables, correlations between them, identifying outliers, summarizing by groups, identifying the time range of the data, etc. Feel free to leverage your work from previous labs and Step 1 above.
#
# 3. **Create Training and Testing Sets.**
# Create training and testing splits. You should use a separate training set, validation set (to tune hyperparameters), and testing set to perform cross-validation.
#
# 4. **Pre-Process Data.**
# Automate pre-processing steps. One function should impute missing values of continuous variables using the median value and the other should normalize continuous variables.
# * No need to impute BIRTH_DATE, but we can impute AGE variables with median
# * Majority-vote for juvenile flag
# * Disciplinary infractions
# * Missing should be converted to 0
# * Most serious current offense - should not be missing
# * Limit to certain number categories (e.g., top vs other), before train/test/split becuase not imputing.
# * For version where we map on our scales, will be missing 5% of the time, imputed with most common category after train/test/split
# * Current crime violent - will be missing in places
# * Will either be missing or will be in our scale
# * Impute with most common after train/test split
# * Total count of felony and misdemeanor charges - might be missing
# * Impute with median after train/test/split
# * Total sentence count - shouldn't be missing
#
#
# 5. **Generate Features.**
# Faciliate feature generation. One function should perform one-hot encoding of categorical variables (e.g. with pd.get_dummies) and one function should discretize continuous variables (e.g. with pd.cut). Discretizing continuous variables can be useful in cases where the variable has a significant cutoff value (for example, age could be discretized to distinguish between children under 18 and adults 18 and older).
#
# 6. **Build Classifiers.**
# Apply machine learning models to a dataset. The function should also print the amount of time required to train each model.
#
# 7. **Evaluate Classifiers.**
# Calculate the accuracy of your models based on your testing set, and validate models.
# ## Damini's WIP Code
import pandas as pd
dataset_main_active = pd.read_csv('../data/dataset_main_active.csv')
# temporary until re run build data
dataset_main_active.loc[dataset_main_active['NextPrefix']==0,'NextPrefix'] = "NONE"
dataset_main_active.loc[dataset_main_active['NextPrefix']=="0",'NextPrefix'] = "NONE"
dataset_main_active.dtypes
dataset_main_active.rename(columns={"('Count', 'FELON')":'felon_count',"('Count', 'MISD.')":'misd_count'}, inplace=True)
dataset_main_active.columns
dataset_main_active = dataset_main_active.loc[:,['ID', 'COMMITMENT_PREFIX', 'EARLIEST_SENTENCE_EFFECTIVE_DT','MOST_SERIOUS_OFFENSE_CODE','END_DATE', 'INMATE_RECORD_STATUS_CODE','INMATE_ADMIN_STATUS_CODE', 'DATE_OF_LAST_INMATE_MOVEMENT','TYPE_OF_LAST_INMATE_MOVEMENT','CONTROL_STATUS', 'GENDER', 'RACE', 'BIRTH_DATE', 'STATE_BORN','ETHNICITY', 'CITIZENSHIP', 'PRIMARY_OFFENSE_CODE', 'NextPrefix','NextStart', 'NextOffense', 'Time_Diff', 'Recidivate','INFRACTION_PER_SENT', 'misd_count','felon_count','Recidivate_Risk_Level', 'Recidivate_Risk_Level_Lenient', 'Recidivate_Risk_Level_Harsh','Current_Offense_Risk_Level','Current_Offense_Risk_Level_Lenient','Current_Offense_Risk_Level_Harsh']]
pd.set_option('display.max_columns', None)
dataset_main_active.head()
# ## Steps to follow for features:
# - Stage 0: constructing variables, understanding weirdness and outliers
# - Stage 1: train/test/validate/active split
# - Stage 2: pre-process - including imputing messiness for vars created in Stage 0, and creating variables based on those (e.g. once Age has been imputed, construct Age_cat)
#
# ### Disciplinary infractions
# - Stage 0: constructed in build_dataset, missings replaced with 0 - no infractions assumed if ID not found in infractions dataset
# - Stage 2: Normalization of continuous var required
#
# ### Most serious current offense
# - Stage 0 (v1): Construct more generalized var which captures X% of offenses, turning all else to others
# - Stage 2 (v1): There should be no missingness (double check this) - will need one-hot encoding
#
# - Stage 0 (v2): (alt var - scale coded by us): Already merged, nothing further needed
# - Stage 2 (v2): there will be missings (since we only coded 95% of offenses) so fill NA with majority then one-hot
#
# +
# Most serious current offense v1
#most_offenses = dataset_main_active.groupby("MOST_SERIOUS_OFFENSE_CODE").size().reset_index(name="count")
most_offenses = dataset_main_active.groupby("MOST_SERIOUS_OFFENSE_CODE")['ID'].size().reset_index(name="count")
most_offenses['PCT'] = most_offenses['count'] / dataset_main_active.shape[0]
most_offenses = most_offenses.sort_values(by='PCT', ascending=False)
most_offenses['CUMSUM'] = most_offenses['PCT'].cumsum()
most_offenses['OFFENSE_CLEAN'] = most_offenses['MOST_SERIOUS_OFFENSE_CODE']
most_offenses.loc[most_offenses['CUMSUM'] > 0.9,'OFFENSE_CLEAN'] = "OTHER"
most_offenses = most_offenses.loc[:,['MOST_SERIOUS_OFFENSE_CODE','OFFENSE_CLEAN']]
# Merge this back onto main dataset
dataset_main_active = dataset_main_active.merge(most_offenses, how="left", on="MOST_SERIOUS_OFFENSE_CODE")
# -
dataset_main_active.head()
# ### Current Crime Violent Flag
# - Stage 0 (v1): don't include variable (we dont have it)
# - Stage 2 (v1): dont include variable (we dont have it)
#
# - Stage 0 (v2 - scale coded by us): Already merged, nothing further needed
# - Stage 2: there will be missings (since we only coded 95% of offenses) so fill NA with majority then categorize as 1 for receiving score 4 or 5 and 0 otherwise
#
# ### Total count of felony and misdemeanor charges
# - Stage 0: merged and created, nothing further needed
# - Stage 2: impute NAs with median (very few missing) then normalize since continuous var
#
# ### Total sentence count
# - Stage 0: construct
# - Stage 2: shouldnt be missing (check) - then normalize since continuous var
#
count = dataset_main_active.groupby(['ID','COMMITMENT_PREFIX']).count().groupby(level=0).cumsum().reset_index()
count['sentence_count'] = count['EARLIEST_SENTENCE_EFFECTIVE_DT'] - 1
count = count.loc[:,['ID','COMMITMENT_PREFIX','sentence_count']]
dataset_main_active = dataset_main_active.merge(count, how="left", on = ['ID','COMMITMENT_PREFIX'])
dataset_main_active.head()
# +
#dataset_main_active_subset.isnull().sum()
# -
# # Next Steps:
# - Train/test/validate/active split
# - Write functions to conduct stage 2 for the above vars
# ## Stratified Sampling For Train Test Validate Split
# +
subset = ['ID','COMMITMENT_PREFIX','EARLIEST_SENTENCE_EFFECTIVE_DT','MOST_SERIOUS_OFFENSE_CODE','END_DATE','INMATE_ADMIN_STATUS_CODE','CONTROL_STATUS','GENDER','RACE','ETHNICITY','INFRACTION_PER_SENT','misd_count','felon_count','Recidivate_Risk_Level','Recidivate_Risk_Level_Lenient','Recidivate_Risk_Level_Harsh','Current_Offense_Risk_Level','Current_Offense_Risk_Level_Lenient','Current_Offense_Risk_Level_Harsh','OFFENSE_CLEAN','sentence_count','NextPrefix']
age_unemp_features = ['age_at_sentence', 'age_cat', 'juv_first_offense','unemp_rate', 'date_to_merge']
subset.extend(age_unemp_features)
#age_unemp_features = dataset_main[features_to_add]
dataset_main_active_subset = dataset_main_active.loc[:,subset]
#dataset_main_active_subset = dataset_main_active.copy()
#dataset_main_active_subset.reset_index(drop=True,inplace=True)
# -
dataset_main_active_subset.columns
# Trim data to start in 1976
dataset_main_active_subset = dataset_main_active_subset[dataset_main_active_subset['EARLIEST_SENTENCE_EFFECTIVE_DT'].dt.year >= 1976]
dataset_main_active_subset['EARLIEST_SENTENCE_EFFECTIVE_DT'].describe()
dataset_main_active_subset.shape
# Add time fixed effects (year-month)
dataset_main_active_subset.rename(columns={'date_to_merge': 'year_month'}, inplace=True)
dataset_main_active_subset['year_month'].sample(10)
# Drop if race / sex / ethnicity is missing
dataset_main_active_subset.isnull()
# hold out active sentences
active_sentences = dataset_main_active_subset[(dataset_main_active_subset['INMATE_ADMIN_STATUS_CODE']=='ACTIVE') & (dataset_main_active_subset['NextPrefix']=="NONE") ]
print("Size of active sentences dataset: ",active_sentences.shape[0])
# Drop those missing decided category
dataset_no_active = dataset_main_active_subset[(dataset_main_active_subset['Recidivate_Risk_Level'].notnull())]
print("Dataset size: " , dataset_no_active.shape[0])
dataset_no_active.head(2)
import datetime
holdOut = 0.2
randomState = 1234
# +
#dataset_no_active = dataset_no_active_backup
#dataset_no_active_backup = dataset_no_active
# -
# +
# Train, val, test split:
start = datetime.datetime.now()
# get number of unique ids and the uniqe IDs
n_ID = len(dataset_no_active.ID.unique())
ids = pd.DataFrame(dataset_no_active.ID.unique())
# sample from IDs
train_index = ids.sample(round(n_ID*(1-holdOut)),random_state = randomState ).values.tolist()
train_index = [item for sublist in train_index for item in sublist]
# train data is data from any IDs that show up in train index
train_val = dataset_no_active[dataset_no_active['ID'].isin(train_index)]
# test data is data from any IDs that don't show up in train index
test_data = dataset_no_active[~dataset_no_active['ID'].isin(train_index)]
# repeat similar process for validate data
n_ID = len(train_val.ID.unique())
ids = pd.DataFrame(train_val.ID.unique())
# sample from IDs
train_index = ids.sample(round(n_ID*(1-holdOut)),random_state = randomState ).values.tolist()
train_index = [item for sublist in train_index for item in sublist]
# train data is data from any IDs that show up in train index
train_data = train_val[train_val['ID'].isin(train_index)]
# test data is data from any IDs that don't show up in train index
validate_data = train_val[~train_val['ID'].isin(train_index)]
stop = datetime.datetime.now()
print("Time Elapsed:", stop - start)
# -
train_data.shape
test_data.shape
validate_data.shape
# +
# Sanity check
print("Total Number of Unique IDs:" , len(dataset_no_active.ID.unique()))
print("Total Number of IDs in Test Data:" , len(test_data.ID.unique()))
print("Total Number of IDs in Train Data:" , len(train_data.ID.unique()))
print("Total Number of IDs in Validate Data:" , len(validate_data.ID.unique()))
print("Do the IDs add up?" , len(test_data.ID.unique()) + len(train_data.ID.unique()) + len(validate_data.ID.unique()) == len(dataset_no_active.ID.unique()))
print("Does Test Represent 20% of the data?", (len(test_data.ID.unique())/len(dataset_no_active.ID.unique())) == holdOut)
print("Test Represents X% of the data:", (len(test_data.ID.unique())/len(dataset_no_active.ID.unique())))
print("Does Train+Validate Represent 80% of the data?", len(train_data.ID.unique())+len(validate_data.ID.unique())/len(dataset_no_active.ID.unique()) == (1-holdOut))
print("Train+Validate Represents X% of the data:", (len(train_data.ID.unique())+len(validate_data.ID.unique()))/len(dataset_no_active.ID.unique()))
print("Does Validate Represent 20% of the Train+Validate Data?:", len(validate_data.ID.unique())/(len(train_data.ID.unique())+len(validate_data.ID.unique())))
print("Does Train Represent 80% of the Train+Validate Data?:", len(train_data.ID.unique())/(len(train_data.ID.unique())+len(validate_data.ID.unique())))
# -
# Sanity Check #2 - how representative are our datasets compared to the overall dataset
dataset_no_active.describe()
test_data.describe()
train_data.describe()
validate_data.describe()
active_sentences.describe()
# +
# Distributions look pretty good across train,test, and validate compared to dataset minus active
# Active sentences don't look as close (most variables are fine, not Infractions though) but there's no reason
# currently incarcerated people would be a random sample of historical sentences
# Next need to check if the date distributions look good
print(dataset_no_active['EARLIEST_SENTENCE_EFFECTIVE_DT'].min())
print(dataset_no_active['EARLIEST_SENTENCE_EFFECTIVE_DT'].max())
print(train_data['EARLIEST_SENTENCE_EFFECTIVE_DT'].min())
print(train_data['EARLIEST_SENTENCE_EFFECTIVE_DT'].max())
print(validate_data['EARLIEST_SENTENCE_EFFECTIVE_DT'].min())
print(validate_data['EARLIEST_SENTENCE_EFFECTIVE_DT'].max())
print(test_data['EARLIEST_SENTENCE_EFFECTIVE_DT'].min())
print(test_data['EARLIEST_SENTENCE_EFFECTIVE_DT'].max())
# -
def fix_dates(data,date_var):
data['new_col'] = data[date_var].astype(str).str[0:4].astype(int)
data.loc[data['new_col']>2261, date_var] = '2261-01-02'
data[date_var] = data[date_var].replace(0,np.nan)
data.loc[data[date_var]=="0", date_var] = None
data[date_var] = pd.to_datetime(data[date_var],format='%Y-%m-%d',errors='coerce')
#df[date_var] = pd.to_datetime(df[date_var].str.split(n=1).str[0],format='%Y-%m-%d')
return data
# +
dataset_no_active = fix_dates(dataset_no_active,'EARLIEST_SENTENCE_EFFECTIVE_DT')
train_data = fix_dates(train_data,'EARLIEST_SENTENCE_EFFECTIVE_DT')
test_data = fix_dates(test_data,'EARLIEST_SENTENCE_EFFECTIVE_DT')
validate_data = fix_dates(validate_data,'EARLIEST_SENTENCE_EFFECTIVE_DT')
# +
#dataset_no_active.dtypes
# -
dataset_no_active.groupby([dataset_no_active['EARLIEST_SENTENCE_EFFECTIVE_DT'].dt.year]).size().plot(kind="line")
train_data.groupby([train_data['EARLIEST_SENTENCE_EFFECTIVE_DT'].dt.year]).size().plot(kind="line")
test_data.groupby([test_data['EARLIEST_SENTENCE_EFFECTIVE_DT'].dt.year]).size().plot(kind="line")
validate_data.groupby([validate_data['EARLIEST_SENTENCE_EFFECTIVE_DT'].dt.year]).size().plot(kind="line")
# +
# All the date distributions look pretty good!
# -
# ## Pre Processing
# Final list of features:
# 1. Race - impute missing with majority in each dataset, then one-hot
# 2. Sex - impute missing with majority in each dataset, then one-hot
# 3. Age at sentence - impute missing with median in each dataset. Do we want to normalize?, then create:
# 13. Age category each sentence - then one-hot?
# 14. Age at first incarceration? - normalize?
# 15. Juvenile Offense Flag - binary indicator
# 4. Ethnicity - impute missing with majority in each dataset, then one-hot
# 5. and 6. Most serious current offense
# v1: OFFENSE CLEAN (no missing) - one-hot
# v2: Current Offense Risk Level, Lenient, Harsh (has missing, majority impute) one-hot
# 7. Disciplinary infractions - normalize?
# 8. Current crime violent
# v1: dont include
# v2: Current Offense Risk Level, Lenient, Harsh (has missing, majority impute),then categorize
# as 1 for receiving score 4 or 5 and 0 otherwise - binary indicator
# 9. and 10. Total count of felony and misdemeanor charges - normalize?
# 11. Total sentence count - normalize..?
# 12. Unemployment - normalize?
# 16. Control status - impute with majority and one-hot
import pipeline as pl
import importlib
importlib.reload(pl)
train_data.columns
# +
#train_data.head()
# +
# Race, Sex, ethnicity, Current_Offense_Risk_Level, Current_Offense_Risk_Level_Lenient,Current_Offense_Risk_Level_Harsh
categorical_vars_to_impute = ['RACE','GENDER','ETHNICITY', 'CONTROL_STATUS','OFFENSE_CLEAN', 'Current_Offense_Risk_Level', "Current_Offense_Risk_Level_Lenient","Current_Offense_Risk_Level_Harsh"]
#continuous_vars_to_impute = ['INFRACTION_PER_SENT','misd_count','felon_count','sentence_count','age_sentence','unemployment']
continuous_vars_to_impute = ['INFRACTION_PER_SENT','misd_count','felon_count','sentence_count']
# impute categorical vars
train_data = pl.impute_most_common(train_data,categorical_vars_to_impute)
test_data = pl.impute_most_common(test_data,categorical_vars_to_impute)
validate_data = pl.impute_most_common(validate_data,categorical_vars_to_impute)
# impute continuous vars
train_data = pl.impute_missing(train_data,continuous_vars_to_impute)
test_data = pl.impute_missing(test_data,continuous_vars_to_impute)
validate_data = pl.impute_missing(validate_data,continuous_vars_to_impute)
# construct vars depending on those vars
# construct age_cat, age at first_sentence, juvenile_offense flag
dataset_main['age_cat'] = pd.cut(dataset_main['age_at_sentence'],
bins=[0,17,21,24,29,34,39,44,49,54,59,64,90],
labels=['Under 18', '18-21','22-24','25-29','30-34','35-39','40-44','45-49',
'50-54','55-59','60-64','65 and older',])
# construct current crime violent v2
train_data = pl.current_crime_violent(train_data,[4,5])
test_data = pl.current_crime_violent(test_data,[4,5])
validate_data = pl.current_crime_violent(validate_data,[4,5])
# then one-hot / normalize
# categorical_vars_one_hot = categorical_vars_to_impute + ['age_cat']
categorical_vars_one_hot = categorical_vars_to_impute
#continuous_vars_normalize = continuous_vars_to_impute + ['age_first_sentence']
continuous_vars_normalize = continuous_vars_to_impute
train_data = pl.one_hot_encode(train_data,categorical_vars_one_hot)
test_data = pl.one_hot_encode(test_data,categorical_vars_one_hot)
validate_data = pl.one_hot_encode(validate_data,categorical_vars_one_hot)
# def normalize_features(to_norm, train, features):
train_backup = train.copy()
train_data = pl.normalize_features(train_data,train_backup,continuous_vars_normalize)
test_data = pl.normalize_features(test_data,train_backup,continuous_vars_normalize)
validate_data = pl.normalize_features(validate_data,train_backup,continuous_vars_normalize)
# -
# ## To Do (in no particular order):
#
#
# - one hot encoding adjustment (i.e. drop from validation / test if they dont appear in training, add empty cols if they appear in training but not test) (Damini)
#
# - i held out the active sentences before doing the train/test split, but need to go back and make sure the relevant pre processing also happens on that dataset (Damini)
#
# - once we're reasonably happy with all this, move everything in build_dataset and features_split_preprocess into .py files that can be called on to build the dataset + features in one go, with no intermediate csvs. Then, we can use a jupyter notebook to actually run the analysis (Charmaine and Damini - mostly on Friday)
#
# - build classifiers starting with random forest, decision tree, multinomial regression (C+D on Saturday/sunday)
#
# - Idk why i'm getting all of these set copy warnings - check if everything is working ok? (Damini - if still happening post re org)
#
#
# - i feel pretty yucky about majority imputing race / sex because its obviously going to be mostly black men. given that demographics arent missing that often, thoughts on dropping observations missing demographics? (right at the onset, before traint/test/validate/active split) (Charmaine)
#
#
# - bringing in age / unemployment features from first half of the notebook (Charmaine - DONE)
# - trim data at some ppoint BEFORE imputing unemployment to be in line with the correct time frame (Charmaine - DONE)
# - add time fixed effects! month-year? year? (Charmaine - DONE)
#
# +
#train_data.shape
# +
#train_data.describe()
# +
#x = train_data['Current_Offense_Risk_Level'].isin([4,5])
# +
#x.head()
# +
#
# +
#train_data.head()
# +
#train_data['CONTROL_STATUS'].isnull().sum()
# +
#train_data.dtypes
# -
| files/_archive/features_split_preprocess.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Caedus-Covid Notebook
#
# ## Model Parameters
#
# The following constants were obtained from the spreadsheet
# `MVP Physical Capacity Model`
import pandas as pd
import numpy as np
# +
## Total Hospital Beds
TOTAL_BEDS = 83371
## Total ICU Beds
TOTAL_ICU_BEDS = 10392
## Number of Med-Techs
MED_TECHS = 0
## Number of Nurses
NURSES = 0
## Number of Physicians
PHYSICIANS = 12254
## Number of Respiratory Therapists
R_THERAPISTS = 0
## Parameters from spreadsheet
## Bed Occupancy Rate
BED_OCC_RT_LB = 0.2
BED_OCC_RT_MEAN = 0.25
BED_OCC_RT_UB = 0.35
## ICU Bed Occupancy Rate
ICU_BED_OCC_RT_LB = 0.3
ICU_BED_OCC_RT_MEAN = 0.3
ICU_BED_OCC_RT_UB = 0.3
## Hospitalization Rate
HOSP_RT_LB = 0.21
HOSP_RT_MEAN = 0.26
HOSP_RT_UB = 0.31
## ICU admissions
ICU_ADM_LB = 0.05
ICU_ADM_MEAN = 0.085
ICU_ADM_UB = 0.12
## ICU vent rate
ICU_VENT_RT_LB = 0.16
ICU_VENT_RT_MEAN = 0.46
ICU_VENT_RT_UB = 0.7
## Infection rate of healthcare workers
## Vent occupancy rate
## Number of vents per ICU bed
NUM_VENTS_PER_ICU_BED_LB = 1.5
NUM_VENTS_PER_ICU_BED_MEAN = 1.5
NUM_VENTS_PER_ICU_BED_UB = 1.5
## Available Personnel
AVAIL_PERSONNEL_LB = 0.357
AVAIL_PERSONNEL_MEAN = 0.357
AVAIL_PERSONNEL_UB = 0.357
## Number of Patients per vent
PATIENTS_PER_VENT_LB = 1.0
PATIENTS_PER_VENT_MEAN = 1.0
PATIENTS_PER_VENT_UB = 1.0
## ICU Patients per Physician
ICU_PATIENTS_PER_PHYSICIAN = 15
## Non-ICU Patients per Physician
NON_ICU_PATIENTS_PER_PHYSICIAN = 25
## ICU Patients per Nurse
## Non-ICU Patients per Nurse
## ICU Patients per Med-Tech
## Non-ICU Patients per Med-Tech
## ICU Patients per Resp Therapist
## Non-ICU Patients per Resp Therapist
# +
seir_output_df = pd.read_csv('tx_seir_output_Thu_Apr__2_11_05_47_2020.csv')
# +
seir_output_df['Date']=pd.to_datetime(seir_output_df['Date'])
seir_output_df.dtypes
# -
seir_output_df.head()
df = seir_output_df.iloc[:,:7]
df.head(10)
df.iloc[69]
# +
df['hospitalized_best']=df['Cases_Mean']*HOSP_RT_LB
df['hospitalized_mean']=df['Cases_Mean']*HOSP_RT_MEAN
df['hospitalized_worst']=df['Cases_Mean']*HOSP_RT_UB
df['ICU_best']=df['Cases_Mean']*ICU_ADM_LB
df['ICU_mean']=df['Cases_Mean']*ICU_ADM_MEAN
df['ICU_worst']=df['Cases_Mean']*ICU_ADM_UB
df['total_beds']= (1.0-BED_OCC_RT_MEAN)*TOTAL_BEDS
df['regular_bed_supply_best'] = df['total_beds'] - df['hospitalized_best']
df['regular_bed_supply_mean'] = df['total_beds'] - df['hospitalized_mean']
df['regular_bed_supply_worst'] = df['total_beds'] - df['hospitalized_worst']
df['total_ICU_beds'] = (1.0-ICU_BED_OCC_RT_MEAN)*TOTAL_ICU_BEDS
df['ICU_bed_supply_best']= df['total_ICU_beds']-df['ICU_best']
df['ICU_bed_supply_mean']= df['total_ICU_beds']-df['ICU_mean']
df['ICU_bed_supply_worst']= df['total_ICU_beds']-df['ICU_worst']
df['patient_vent_lb']= ICU_VENT_RT_LB*df['ICU_mean']
df['patient_vent_mean']= ICU_VENT_RT_MEAN*df['ICU_mean']
df['patient_vent_ub']= ICU_VENT_RT_UB*df['ICU_mean']
df['total_vents']=NUM_VENTS_PER_ICU_BED_MEAN * TOTAL_ICU_BEDS * PATIENTS_PER_VENT_MEAN
df['open_vents_lb'] = df['total_vents'] - df['patient_vent_lb']
df['open_vents_mean'] = df['total_vents'] - df['patient_vent_mean']
df['open_vents_ub'] = df['total_vents'] - df['patient_vent_ub']
df['med_tech'] = MED_TECHS
df['med_tech_active'] = None
df['med_tech_supply'] = None
df['med_tech_demand'] = None
df['nurse'] = NURSES
df['nurse_active'] = None
df['nurse_supply'] = None
df['nurse_demand'] = None
df['phys'] = PHYSICIANS
df['phys_active'] = df['phys'] * AVAIL_PERSONNEL_MEAN
df['phys_supply'] = None
df['phys_demand'] = np.ceil((1/ICU_PATIENTS_PER_PHYSICIAN*df['ICU_mean']) +
(1/NON_ICU_PATIENTS_PER_PHYSICIAN *
(df['hospitalized_mean']-df['ICU_mean'])))
df['resp'] = R_THERAPISTS
df['resp_active'] = None
df['resp_supply'] = None
df['resp_demand'] = None
# +
## Validate against Kyle's computations
df.iloc[69]
# -
| covid-modeling-master/Physical_Capacity.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/maderix/covid_analysis/blob/master/reddit_covid_positive_word_embeddings.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="nG263bVS_VSr" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 817} outputId="0e1b653b-1647-421c-db6f-ba0030f2783d"
# !pip install praw
# !pip install tf-nightly
import tensorflow as tf
import tensorflow_datasets as tfds
import os
import numpy
# + id="QUstc6Py_qBX" colab_type="code" colab={}
import praw
reddit = praw.Reddit(client_id='JlcL0XKfkM6fBw', client_secret='th1yQxpTkAftyan22fiN4sH9nuA', user_agent='covidscraper')
# + id="VqVo6TITAUfk" colab_type="code" colab={}
covid_desc = []
covid_scores = []
for submission in reddit.subreddit('COVID19positive').search('flair:"Tested Positive - Me"', limit=1000):
covid_desc.append(submission.selftext)
covid_scores.append(submission.score)
covid_scores = numpy.array(covid_scores)
covid_scores = covid_scores / numpy.max(covid_scores)
# + id="5v1QgRUkAaoX" colab_type="code" colab={}
import numpy
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import LSTM
from tensorflow.keras.callbacks import ModelCheckpoint
# + id="TyjOXAavAdAr" colab_type="code" colab={}
dataset = tf.data.Dataset.from_tensor_slices((covid_desc,covid_scores))
# + id="J9BZ3Nd3Ai9R" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="5a6723d8-0343-42d6-e519-cc177dbe96ce"
tokenizer = tfds.features.text.Tokenizer()
vocabulary_set = set()
for text_tensor, _ in dataset:
some_tokens = tokenizer.tokenize(text_tensor.numpy())
vocabulary_set.update(some_tokens)
vocab_size = len(vocabulary_set)
vocab_size
# + id="ZruRvIopAnXY" colab_type="code" colab={}
encoder = tfds.features.text.TokenTextEncoder(vocabulary_set)
# + id="e21R4n-bBZ0A" colab_type="code" colab={}
def encode(text_tensor, label):
encoded_text = encoder.encode(text_tensor.numpy())
return encoded_text, label
# + id="8oAq87BaBbsr" colab_type="code" colab={}
def encode_map_fn(text, label):
# py_func doesn't set the shape of the returned tensors.
encoded_text, label = tf.py_function(encode,
inp=[text, label],
Tout=(tf.int64, tf.float64))
# `tf.data.Datasets` work best if all components have a shape set
# so set the shapes manually:
encoded_text.set_shape([None])
label.set_shape([])
return encoded_text, label
encoded_data = dataset.map(encode_map_fn)
# + id="Z-PVE3GLCLXp" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="96c7f61c-a9ef-4a32-bd4a-d567e18ed694"
TAKE_SIZE = 16
BUFFER_SIZE = 256
BATCH_SIZE = 4
train_data = encoded_data.skip(TAKE_SIZE).shuffle(BUFFER_SIZE)
train_data = train_data.padded_batch(BATCH_SIZE, padded_shapes=([None],[]))
test_data = encoded_data.take(TAKE_SIZE)
test_data = test_data.padded_batch(BATCH_SIZE, padded_shapes=([None],[]))
len(list(test_data))
# + id="hGAva52-CyB2" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 555} outputId="12d8ef25-2fe0-4cdf-bd45-aae89069a5de"
sample_text, sample_labels = next(iter(test_data))
sample_text[0], sample_labels[0]
# + id="LYFCHk_TDEXU" colab_type="code" colab={}
vocab_size += 1
# + id="e1itCNMWE2u-" colab_type="code" colab={}
model = tf.keras.Sequential()
model.add(tf.keras.layers.Embedding(vocab_size, 64))
model.add(tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(64)))
# One or more dense layers.
# Edit the list in the `for` line to experiment with layer sizes.
for units in [64, 64]:
model.add(tf.keras.layers.Dense(units, activation='relu'))
# Output layer. The first argument is the number of labels.
model.add(tf.keras.layers.Dense(3))
model.compile(optimizer='adam',
loss='mse')
# + id="23BZ9racFBAA" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 381} outputId="6880686d-a00f-483c-95d2-93f36937c0e7"
model.fit(train_data, epochs=10, validation_data=test_data)
# + id="Q0-yId-RFGla" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="9a2e90b8-673e-4086-92f0-bed32d0f5db0"
e = model.layers[0]
weights = e.get_weights()[0]
print(weights.shape)
# + id="LHWP2tPmMBff" colab_type="code" colab={}
import io
out_v = io.open('vecs.tsv', 'w', encoding='utf-8')
out_m = io.open('meta.tsv', 'w', encoding='utf-8')
for num, word in enumerate(encoder.tokens):
vec = weights[num+1] # skip 0, it's padding.
out_m.write(word + "\n")
out_v.write('\t'.join([str(x) for x in vec]) + "\n")
out_v.close()
out_m.close()
# + id="G00tLJcnMFob" colab_type="code" colab={}
try:
from google.colab import files
except ImportError:
pass
else:
files.download('vecs.tsv')
files.download('meta.tsv')
# + id="GJHS-Zd3Mx7h" colab_type="code" colab={}
| reddit_covid_positive_word_embeddings.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.9.2 64-bit (''base'': conda)'
# name: python392jvsc74a57bd098b0a9b7b4eaaa670588a142fd0a9b87eaafe866f1db4228be72b4211d12040f
# ---
# ---
# author: <NAME> (<EMAIL>)
# ---
#
# This answer assumes you have imported SymPy as follows.
from sympy import * # load all math functions
init_printing( use_latex='mathjax' ) # use pretty math output
# Let's consider the example of the unit circle, $x^2+y^2=1$.
#
# To plot it, SymPy first expects us to move everything to the left-hand side
# of the equation, so in this case, we would have $x^2+y^2-1=0$.
#
# We then use that left hand side to represent the equation as a single formula,
# and we can plot it with SymPy's `plot_implicit` function.
var( 'x y' )
formula = x**2 + y**2 - 1 # to represent x^2+y^2=1
plot_implicit( formula )
| database/tasks/How to graph curves that are not functions/Python, using SymPy.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + tags=["context"] deletable=false editable=false dc={"key": "4"} run_control={"frozen": true}
# ## 1. Loading the NIPS papers
# <p>The NIPS conference (Neural Information Processing Systems) is one of the most prestigious yearly events in the machine learning community. At each NIPS conference, a large number of research papers are published. Over 50,000 PDF files were automatically downloaded and processed to obtain a dataset on various machine learning techniques. These NIPS papers are stored in <code>datasets/papers.csv</code>. The CSV file contains information on the different NIPS papers that were published from 1987 until 2017 (30 years!). These papers discuss a wide variety of topics in machine learning, from neural networks to optimization methods and many more.
# <img src="https://s3.amazonaws.com/assets.datacamp.com/production/project_158/img/nips_logo.png" alt="The logo of NIPS (Neural Information Processing Systems)"></p>
# <p>First, we will explore the CSV file to determine what type of data we can use for the analysis and how it is structured. A research paper typically consists of a title, an abstract and the main text. Other data such as figures and tables were not extracted from the PDF files. Each paper discusses a novel technique or improvement. In this analysis, we will focus on analyzing these papers with natural language processing methods.</p>
# + tags=["sample_code"] dc={"key": "4"}
# Importing modules
# -- YOUR CODE HERE --
import pandas as pd
# Read datasets/papers.csv into papers
papers = pd.read_csv('datasets/papers.csv')
# Print out the first rows of papers
# -- YOUR CODE HERE --
print(papers.head())
# + tags=["context"] deletable=false editable=false dc={"key": "11"} run_control={"frozen": true}
# ## 2. Preparing the data for analysis
# <p>For the analysis of the papers, we are only interested in the text data associated with the paper as well as the year the paper was published in.</p>
# <p>We will analyze this text data using natural language processing. Since the file contains some metadata such as id's and filenames, it is necessary to remove all the columns that do not contain useful text information.</p>
# + tags=["sample_code"] dc={"key": "11"}
# Remove the columns
# -- YOUR CODE HERE --
papers.drop(['id','event_type','pdf_name'],axis=1,inplace=True)
# Print out the first rows of papers
# -- YOUR CODE HERE --
print(papers.head())
# + tags=["context"] deletable=false editable=false dc={"key": "18"} run_control={"frozen": true}
# ## 3. Plotting how machine learning has evolved over time
# <p>In order to understand how the machine learning field has recently exploded in popularity, we will begin by visualizing the number of publications per year. </p>
# <p>By looking at the number of published papers per year, we can understand the extent of the machine learning 'revolution'! Typically, this significant increase in popularity is attributed to the large amounts of compute power, data and improvements in algorithms.</p>
# + tags=["sample_code"] dc={"key": "18"}
# Group the papers by year
groups = papers.groupby('year')
# Determine the size of each group
counts = groups.size()
# Visualise the counts as a bar plot
import matplotlib.pyplot
# %matplotlib inline
# -- YOUR CODE HERE --
counts.plot()
# + tags=["context"] deletable=false editable=false dc={"key": "25"} run_control={"frozen": true}
# ## 4. Preprocessing the text data
# <p>Let's now analyze the titles of the different papers to identify machine learning trends. First, we will perform some simple preprocessing on the titles in order to make them more amenable for analysis. We will use a regular expression to remove any punctuation in the title. Then we will perform lowercasing. We'll then print the titles of the first rows before and after applying the modification.</p>
# + tags=["sample_code"] dc={"key": "25"}
# Load the regular expression library
# -- YOUR CODE HERE --
import re
# Print the titles of the first rows
print(papers['title'].head())
# Remove punctuation
papers['title_processed'] = papers['title'].map(lambda x: re.sub('[,\.!?]', '', x))
# Convert the titles to lowercase
papers['title_processed'] = papers['title_processed'].str.lower()
# Print the processed titles of the first rows
# -- YOUR CODE HERE --
print(papers.head())
# + tags=["context"] deletable=false editable=false dc={"key": "32"} run_control={"frozen": true}
# ## 5. A word cloud to visualize the preprocessed text data
# <p>In order to verify whether the preprocessing happened correctly, we can make a word cloud of the titles of the research papers. This will give us a visual representation of the most common words. Visualisation is key to understanding whether we are still on the right track! In addition, it allows us to verify whether we need additional preprocessing before further analyzing the text data.</p>
# <p>Python has a massive number of open libraries! Instead of trying to develop a method to create word clouds ourselves, we'll use Andreas Mueller's <a href="http://amueller.github.io/word_cloud/">wordcloud library</a>.</p>
# + tags=["sample_code"] dc={"key": "32"}
# Import the wordcloud library
# -- YOUR CODE HERE --
import wordcloud
# Join the different processed titles together.
s = " "
long_string = s.join(papers['title_processed'])
# Create a WordCloud object
wordcloud = wordcloud.WordCloud()
# Generate a word cloud
# -- YOUR CODE HERE --
wordcloud.generate(long_string)
# Visualize the word cloud
wordcloud.to_image()
# + tags=["context"] deletable=false editable=false dc={"key": "39"} run_control={"frozen": true}
# ## 6. Prepare the text for LDA analysis
# <p>The main text analysis method that we will use is latent Dirichlet allocation (LDA). LDA is able to perform topic detection on large document sets, determining what the main 'topics' are in a large unlabeled set of texts. A 'topic' is a collection of words that tend to co-occur often. The hypothesis is that LDA might be able to clarify what the different topics in the research titles are. These topics can then be used as a starting point for further analysis.</p>
# <p>LDA does not work directly on text data. First, it is necessary to convert the documents into a simple vector representation. This representation will then be used by LDA to determine the topics. Each entry of a 'document vector' will correspond with the number of times a word occurred in the document. In conclusion, we will convert a list of titles into a list of vectors, all with length equal to the vocabulary. For example, <em>'Analyzing machine learning trends with neural networks.'</em> would be transformed into <code>[1, 0, 1, ..., 1, 0]</code>.</p>
# <p>We'll then plot the 10 most common words based on the outcome of this operation (the list of document vectors). As a check, these words should also occur in the word cloud.</p>
# + tags=["sample_code"] dc={"key": "39"}
# Load the library with the CountVectorizer method
from sklearn.feature_extraction.text import CountVectorizer
import numpy as np
# Helper function
def plot_10_most_common_words(count_data, count_vectorizer):
import matplotlib.pyplot as plt
words = count_vectorizer.get_feature_names()
total_counts = np.zeros(len(words))
for t in count_data:
total_counts+=t.toarray()[0]
count_dict = (zip(words, total_counts))
count_dict = sorted(count_dict, key=lambda x:x[1], reverse=True)[0:10]
words = [w[0] for w in count_dict]
counts = [w[1] for w in count_dict]
x_pos = np.arange(len(words))
plt.bar(x_pos, counts,align='center')
plt.xticks(x_pos, words, rotation=90)
plt.xlabel('words')
plt.ylabel('counts')
plt.title('10 most common words')
plt.show()
# Initialise the count vectorizer with the English stop words
count_vectorizer = CountVectorizer(stop_words = 'english')
# Fit and transform the processed titles
count_data = count_vectorizer.fit_transform(papers['title'])
# Visualise the 10 most common words
# -- YOUR CODE HERE --
# + tags=["context"] deletable=false editable=false dc={"key": "46"} run_control={"frozen": true}
# ## 7. Analysing trends with LDA
# <p>Finally, the research titles will be analyzed using LDA. Note that in order to process a new set of documents (e.g. news articles), a similar set of steps will be required to preprocess the data. The flow that was constructed here can thus easily be exported for a new text dataset.</p>
# <p>The only parameter we will tweak is the number of topics in the LDA algorithm. Typically, one would calculate the 'perplexity' metric to determine which number of topics is best and iterate over different amounts of topics until the lowest 'perplexity' is found. For now, let's play around with a different number of topics. From there, we can distinguish what each topic is about ('neural networks', 'reinforcement learning', 'kernel methods', 'gaussian processes', etc.).</p>
# + tags=["sample_code"] dc={"key": "46"}
import warnings
warnings.simplefilter("ignore", DeprecationWarning)
# Load the LDA model from sk-learn
from sklearn.decomposition import LatentDirichletAllocation as LDA
# Helper function
def print_topics(model, count_vectorizer, n_top_words):
words = count_vectorizer.get_feature_names()
for topic_idx, topic in enumerate(model.components_):
print("\nTopic #%d:" % topic_idx)
print(" ".join([words[i]
for i in topic.argsort()[:-n_top_words - 1:-1]]))
# Tweak the two parameters below (use int values below 15)
number_topics = 20
number_words = 20
# Create and fit the LDA model
lda = LDA(n_components=number_topics)
lda.fit(count_data)
# Print the topics found by the LDA model
print("Topics found via LDA:")
print_topics(lda, count_vectorizer, number_words)
# + tags=["context"] deletable=false editable=false dc={"key": "53"} run_control={"frozen": true}
# ## 8. The future of machine learning
# <p>Machine learning has become increasingly popular over the past years. The number of NIPS conference papers has risen exponentially, and people are continuously looking for ways on how they can incorporate machine learning into their products and services.</p>
# <p>Although this analysis focused on analyzing machine learning trends in research, a lot of these techniques are rapidly being adopted in industry. Following the latest machine learning trends is a critical skill for a data scientist, and it is recommended to continuously keep learning by going through blogs, tutorials, and courses.</p>
# + tags=["sample_code"] dc={"key": "53"}
# The historical data indicates that:
more_papers_published_in_2018 = True
| The GitHub History of the Scala Language/notebook.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## 02 scikit-learn 中的 kNN
import numpy as np
import matplotlib.pyplot as plt
# +
raw_data_X = [[3.393533211, 2.331273381],
[3.110073483, 1.781539638],
[1.343808831, 3.368360954],
[3.582294042, 4.679179110],
[2.280362439, 2.866990263],
[7.423436942, 4.696522875],
[5.745051997, 3.533989803],
[9.172168622, 2.511101045],
[7.792783481, 3.424088941],
[7.939820817, 0.791637231]
]
raw_data_y = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
X_train = np.array(raw_data_X)
y_train = np.array(raw_data_y)
x = np.array([8.093607318, 3.365731514])
# -
# %run kNN_function/kNN.py
predict_y = kNN_classify(6, X_train, y_train, x)
predict_y
# ### 使用scikit-learn中的kNN
from sklearn.neighbors import KNeighborsClassifier
kNN_classifier = KNeighborsClassifier(n_neighbors=6)
kNN_classifier.fit(X_train, y_train)
kNN_classifier.predict(x)
X_predict = x.reshape(1, -1)
X_predict
kNN_classifier.predict(X_predict)
y_predict = kNN_classifier.predict(X_predict)
y_predict[0]
# ### 重新整理我们的kNN的代码
# 代码参见 [这里](kNN/KNN.py)
# %run kNN/kNN.py
knn_clf = KNNClassifier(3)
knn_clf.fit(X_train, y_train)
y_predict = knn_clf.predict(X_predict)
y_predict
y_predict[0]
| 04-kNN/02-kNN-in-scikit-learn/02-kNN-in-scikit-learn.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + id="z85-XBJe8fwX"
import numpy as np
import networkx as nx
import matplotlib.pyplot as plt
from IPython.display import Image
# + id="iRNLuKNm8fwh" colab={"base_uri": "https://localhost:8080/", "height": 551} outputId="8a6d4cdf-2026-4cdc-f92f-6eb3892112e4"
Image('graph.png')
# + [markdown] id="XfIn3hQlbsX-"
# We are going to use networkx package to construct the graph and find the shortest paths. Refer to the [NetworkX documentation](https://networkx.github.io/documentation/stable/).
# + colab={"base_uri": "https://localhost:8080/"} id="Zf70Jff6bWYI" outputId="8eef3513-4665-444e-e08a-15534eac1168"
#type in the edges and edgecost as a list of 3-tuples
edges = [(0,1,2),(0,2, 1.5),(0,3, 2.5),(1,4, 1.5),(2,5, 0.5),(4,8, 1),
(2,6, 2.5),(3,7, 2),(7,9, 1.25),(5,10, 2.75),(6,10, 3.25),
(9,10, 1.5),(8,10, 3.5)]
#Define an empty graph
G =nx.Graph()
#populate the edges and the cost in graph G
G.add_weighted_edges_from(edges, weight='cost')
#Find the shortest path from Node 0 to Node 10
print(nx.shortest_path(G, 0, 10, 'cost'))
#Find the cost of the shortest path from Node 0 to Node 10
print(nx.shortest_path_length(G, 0, 10, 'cost'))
# + [markdown] id="S0ceeptfi9Zs"
# Let us now move onto a grid which represents the robot's operating environment. First convert the grid to a graph. Then we will use Astar from networkX to find the shortest path
# + id="JP5UumQad3EG"
# write the Euclidean function that takes in the
# node x, y and compute the distance
def euclidean(node1, node2):
x1, y1 = node1
x2, y2 = node2
return np.sqrt((x1-x2)**2 + (y1-y2)**2)
# + colab={"base_uri": "https://localhost:8080/"} id="DYzDj1yr2a6C" outputId="825d6f28-0d4d-4f2d-fe69-c37f1f09d00f"
grid = np.load("astar_grid.npy")
print(grid.shape)
# + colab={"base_uri": "https://localhost:8080/", "height": 700} id="N-uQvXAOtauO" outputId="313a5475-7f8f-4a61-ef3c-25f3995cbf29"
# use np.load to load a grid of 1s and 0s
# 1 - occupied 0- free
grid = np.load("astar_grid.npy")
# you can define your own start/ end
start = (0, 0)
goal = (0, 19)
# visualize the start/ end and the robot's environment
fig, ax = plt.subplots(figsize=(12,12))
ax.imshow(grid, cmap=plt.cm.Dark2)
ax.scatter(start[1],start[0], marker = "+", color = "yellow", s = 200)
ax.scatter(goal[1],goal[0], marker = "+", color = "red", s = 200)
plt.show()
# + [markdown] id="h5ISh41o086P"
# Convert this grid array into a graph. You have to follow these steps
# 1. Find the dimensions of grid. Use grid_2d_graph() to initialize a grid graph of corresponding dimensions
# 2. Use remove_node() to remove nodes and edges of all cells that are occupied
# + colab={"base_uri": "https://localhost:8080/"} id="AuKatVcuchIa" outputId="3c9d5737-7af9-4683-df10-c91aaed10423"
#initialize graph
grid_size = grid.shape
G = nx.grid_2d_graph(*grid_size)
deleted_nodes = 0 # counter to keep track of deleted nodes
#nested loop to remove nodes that are not connected
#free cell => grid[i, j] = ?
#occupied cell => grid[i, j] = ?
num_nodes = 0
for i in range(grid_size[0]):
for j in range(grid_size[1]):
if grid[i, j] == 1:
G.remove_node((i, j))
num_nodes += 1
print(f"removed {num_nodes} nodes")
print(f"number of occupied cells in grid {np.sum(grid)}")
# + [markdown] id="lvnzFEsm15_G"
# Visualize the resulting graph using nx.draw(). Note that pos argument for nx.draw() has been given below. The graph is too dense. Try changing the node_size and node_color. You can correlate this graph with the grid's occupied cells
# + colab={"base_uri": "https://localhost:8080/", "height": 319} id="vv5TZNPRzzzO" outputId="e981fe9c-7269-4f79-918c-496646bbda1e"
pos = {(x,y):(y,-x) for x,y in G.nodes()}
nx.draw(G, pos=pos, node_color='red', node_size=10)
# + [markdown] id="OUgJK3f22c3A"
# We are 2 more steps away from finding the path!
# 1. Set edge attribute. Use set_edge_attributes(). Remember we have to provide a dictionary input: Edge is the key and cost is the value. We can set every move to a neighbor to have unit cost.
# 2. Use astar_path() to find the path. Set heuristic to be euclidean distance. weight to be the attribute you assigned in step 1
# + id="K2cZ_VcL2aJr"
nx.set_edge_attributes(G, {e: 1 for e in G.edges()}, "cost")
astar_path = nx.astar_path(G, start, goal, heuristic=euclidean, weight="cost")
# + colab={"base_uri": "https://localhost:8080/"} id="h21DiKFO-8gu" outputId="28ac5439-5590-4435-eb16-07f25c2eeeb3"
astar_path
# + [markdown] id="K-243tW63Q8z"
# Visualize the path you have computed!
# + colab={"base_uri": "https://localhost:8080/", "height": 700} id="3fEEGhpOgwq9" outputId="55368507-aa3e-4e22-f485-cd02b7e6ef21"
fig, ax = plt.subplots(figsize=(12,12))
ax.imshow(grid, cmap=plt.cm.Dark2)
ax.scatter(start[1],start[0], marker = "+", color = "yellow", s = 200)
ax.scatter(goal[1],goal[0], marker = "+", color = "red", s = 200)
for s in astar_path[1:]:
ax.plot(s[1], s[0],'r+')
# + [markdown] id="fLaAnQtG3ZVu"
# Cool! Now you can read arbitrary evironments and find the shortest path between 2 robot positions. Pick a game environment from here and repeat: {https://www.movingai.com/benchmarks/dao/index.html}
# + id="wFtbNy1z39CH"
| week2/keivalyapandya2001/Q3 - Q/Attempt1_filesubmission_search_based_planning_package.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.8.5 64-bit (''Andy'': virtualenv)'
# metadata:
# interpreter:
# hash: ab2c6effcff14de217321fbbb614bff3794ce5785c4964fdaa7bfe00a73c95ca
# name: python3
# ---
# +
import pandas as pd
import numpy as np
year_list = []
month_list = []
rtn_list = []
for year in range(2006, 2017):
for month in [6, 12]:
year_list.append(year)
month_list.append(month)
rtn = round((-1)**(month/6)*(month/6/10), 3) +(np.random.random()-0.5)*0.1
rtn_list.append(rtn)
df = pd.DataFrame()
df['year'] = year_list
df['month'] = month_list
df['rtn'] = rtn_list
df
# -
sr1 = round(df['rtn'].mean()/df['rtn'].std()*np.sqrt(2), 3)
sr1
df_year= df.groupby(['year']).sum()
del df_year['month']
df_year
sr2 = round(df_year['rtn'].mean()/df_year['rtn'].std(), 3)
sr2
| chapter8/section8.5_Sharpe_ratio_demo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
## Decision Tree
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import os
for dirname, _, filenames in os.walk('/Users/mohammadreza/Desktop/python'):
for filename in filenames:
print(os.path.join(dirname, filename))
data = pd.read_csv('dataset.csv', encoding ='latin1')
data.info()
data = data.drop(['ID'],axis=1)
data.head(10000)
y = data['Y'].values
y = y.reshape(-1,1)
x_data = data.drop(['Y'],axis = 1)
print(x_data)
x = (x_data - np.min(x_data)) / (np.max(x_data) - np.min(x_data)).values
x.head(20000)
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x,y,test_size = 0.5,random_state=100)
y_train = y_train.reshape(-1,1)
y_test = y_test.reshape(-1,1)
print("x_train: ",x_train.shape)
print("x_test: ",x_test.shape)
print("y_train: ",y_train.shape)
print("y_test: ",y_test.shape)
from sklearn.tree import DecisionTreeClassifier
dtree = DecisionTreeClassifier()
dtree.fit(x_train, y_train.ravel())
print("accuracy: ", dtree.score(x_test, y_test))
# -
| Decision-Tree.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:miniconda3-research]
# language: python
# name: python3
# ---
# +
import numpy as np
import torch
from datasets import load_dataset
from datasets.arrow_dataset import concatenate_datasets
data_set = load_dataset("nlu_evaluation_data",split='train')
labels = data_set.features["label"].names
scenarios = list(set(map(lambda x: x.split("_")[0], labels)))
# +
from transformers import AutoTokenizer, AutoModel
from transformers import DataCollatorWithPadding
tokenizer = AutoTokenizer.from_pretrained("roberta-large")
data_collator = DataCollatorWithPadding(tokenizer=tokenizer)
def add_mask(example):
example['text'] = example["text"] + "[MASK]"
return example
def preprocess_function(examples):
return tokenizer(examples["text"], add_special_tokens=True, truncation=True)
data_set = data_set.map(add_mask)
tokenized_data_set = data_set.map(preprocess_function, batched=True)
model = AutoModel.from_pretrained("roberta-large")
device = torch.device("cuda")
model.to(device)
# Model created
# +
label_embeddings = { lab: { } for lab in labels}
scen_embeddings = { scen: { } for scen in scenarios}
for label in labels:
tokenized_label = tokenizer(label + '[MASK]', add_special_tokens=True)
inp_tensor = torch.LongTensor(tokenized_label['input_ids']).unsqueeze(0).to(device)
out = model(inp_tensor)[0].squeeze(0).cpu().detach().numpy()
label_embeddings[label]['cls'] = out[0]
label_embeddings[label]['mask'] = out[-2]
label_embeddings[label]['avg'] = np.mean(out[1:-2], 0)
for scen in scenarios:
tokenized_label = tokenizer(scen + '[MASK]', add_special_tokens=True)
inp_tensor = torch.LongTensor(tokenized_label['input_ids']).unsqueeze(0).to(device)
out = model(inp_tensor)[0].squeeze(0).cpu().detach().numpy()
scen_embeddings[scen]['cls'] = out[0]
scen_embeddings[scen]['mask'] = out[-2]
scen_embeddings[scen]['avg'] = np.mean(out[1:-2], 0)
# +
from tqdm import tqdm
label_inner_prods = {'cls': 0.0, 'mask': 0.0, 'avg': 0.0}
scen_inner_prods = {'cls': 0.0, 'mask': 0.0, 'avg': 0.0}
for ind in tqdm(range(len(tokenized_data_set))):
inp = tokenized_data_set[ind]
act_label = labels[inp['label']]
act_scen = inp['scenario']
inp_tensor = torch.LongTensor(inp['input_ids']).unsqueeze(0).to(device)
out = model(inp_tensor)[0].squeeze(0).cpu().detach().numpy()
cls_emb = out[0]
mask_emb = out[-2]
avg_emb = np.mean(out[1:-2], 0)
label_inner_prods['cls'] += cls_emb @ label_embeddings[act_label]['cls'] / sum([cls_emb @ label_embeddings[lab]['cls'] for lab in labels])
label_inner_prods['mask'] += mask_emb @ label_embeddings[act_label]['mask'] / sum([mask_emb @ label_embeddings[lab]['mask'] for lab in labels])
label_inner_prods['avg'] += avg_emb @ label_embeddings[act_label]['avg'] / sum([avg_emb @ label_embeddings[lab]['avg'] for lab in labels])
scen_inner_prods['cls'] += cls_emb @ scen_embeddings[act_scen]['cls'] / sum([cls_emb @ scen_embeddings[scen]['cls'] for scen in scenarios])
scen_inner_prods['mask'] += mask_emb @ scen_embeddings[act_scen]['mask'] / sum([mask_emb @ scen_embeddings[scen]['mask'] for scen in scenarios])
scen_inner_prods['avg'] += avg_emb @ scen_embeddings[act_scen]['avg'] / sum([avg_emb @ scen_embeddings[scen]['avg'] for scen in scenarios])
for tok in label_inner_prods:
label_inner_prods[tok] /= len(tokenized_data_set)
for tok in scen_inner_prods:
scen_inner_prods[tok] /= len(tokenized_data_set)
# -
# Inner product analysis
print(label_inner_prods)
print(scen_inner_prods)
| notebooks/embedding_inner_prod.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# In a process like the SGCN, we have some specific business rules we are following. We are trying to nail down species for a "National List" based on consultation with taxonomic authorities, putting every name on the national list where we find a valid record in ITIS or WoRMS to follow. Since we are essentially trusting ITIS first and only using the information from WoRMS as an additional possible source for taxonomic alignment, we can shortcut the process of consulting with WoRMS and only search for the ITIS leftovers. If we wanted to use the information in WoRMS for more than that, we might run all of the species names through this process.
# +
import pandas as pd
import bispy
import json
from joblib import Parallel, delayed
worms = bispy.worms.Worms()
# -
with open('itis.json', 'r') as f:
itis_data = json.loads(f.read())
f.close()
itis_leftovers = [i["parameters"]["Scientific Name"] for i in itis_data if i["processing_metadata"]["status"] != "success"]
len(itis_leftovers)
# %%time
# Use joblib to run multiple requests for ITIS documents in parallel via known ITIS TSNs
worms_cache = Parallel(n_jobs=8)(delayed(worms.search)(name) for name in itis_leftovers)
with open('worms.json', 'w') as f:
f.write(json.dumps(worms_cache))
f.close()
| Build WoRMS Info.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Training Tests for Part Of Speech tagging
#
# This notebook is dedicated to start working with the PoS dataset already pre-processed and the column networks that I'm creating.
#
# The network will be constructed from small parts, each will be trained on top of the previous one, adding a new column and decoder.
#
# +
from datetime import datetime
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.tensorboard import SummaryWriter
from langmodels.models import *
import langmodels.utf8codec as utf8codec
from langmodels.utils.tools import *
from langmodels.utils.preprocess_conllu import *
# -
# Load the embeddings first
# load the codebook and all the dictionaries mapping the data
# utf8codes, txt2code, code2txt, txt2num, num2txt = utf8codec._load_codebook()
utf8codes = np.load("./utf8-codes/utf8_codebook_overfit_matrix_2seg_dim64.npy")
utf8codes = utf8codes.reshape(1987,64)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
net = Conv1DPoS(utf8codes)
net = net.to(device)
count_parameters(net)
# for the original network 11266152 parameters, I have cut the number of features and dimensions to make it smaller
#
# for nlayers = 5 of dim 5 is 6912424 and 6846888 trainable
#
# for the following Conv1DPartOfSpeech the number of parameters is: 2161960 where 2096424 are trainable
#
# nchannels_in=[64, 128, 256, 512, 256],
# nchannels_out=[128, 256, 512, 256, 96],
# kernels=[3, 3, 3, 3, 3],
# nlayers=[6, 6, 4, 4, 3],
# groups=[1, 4, 8, 4, 1],
#
# And LinearUposDeprelDecoder params are:
#
# lin_in_dim=96,
# lin_hidd_dim=768,
# upos_dim=18,
# deprel_dim=278,
count_trainable_parameters(net)
# Datasets are the one that are heavy, so I'll just load them and check what happens
dataset_train = "/home/leo/projects/Datasets/text/UniversalDependencies/ud-treebanks-v2.4/traindev_np_batches_779000x3x1024_uint16.npy"
data_train = np.load(dataset_train)
data_train.shape
len(data_train)
data_train.dtype
dta_train_txt = data_train[:,0,:]
dta_train_upos = data_train[:,1,:]
dta_train_deprel = data_train[:,2,:]
# +
# x = torch.from_numpy(dta_train_txt[:50].astype("int64")).to(device)
# +
# txtcode, positions, latent, dec = net(x)
# last_latent = latent[-1]
# upos, deprel = dec
# +
# txtcode.shape, positions.shape, last_latent.shape, upos.shape, # deprel.shape
# +
# out = torch.cat([upos,deprel], dim=2)
# +
# out.shape
# -
# upos and deprel data are given by indices, this keeps memory as low as possible, but they need to be encoded
upos_eye = torch.eye(len(UPOS))
deprel_eye = torch.eye(len(DEPREL))
with torch.no_grad():
upos_emb = nn.Embedding(*upos_eye.shape)
upos_emb.weight.data.copy_(upos_eye)
upos_emb = upos_emb.to(device)
deprel_emb = nn.Embedding(*deprel_eye.shape)
deprel_emb.weight.data.copy_(deprel_eye)
deprel_emb.to(device)
# +
# from https://stackoverflow.com/questions/312443/how-do-you-split-a-list-into-evenly-sized-chunks
def chunks(data, n, dim=0):
"""Yield successive n-sized chunks from data by the dimension dim"""
for i in range(0, data.shape[dim], n):
yield data[i:i + n,:,:]
# -
def loss_function(upos, deprel, target_upos, target_deprel):
# TODO check a more sofisticated loss function, for the moment only the sum to see if it runs
# the issue is that upos is easier than deprel (18 vs 278 classes)
# upos_loss = F.mse_loss(upos, target_upos)
# deprel_loss = F.mse_loss(deprel, target_deprel)
# issue with the size of target and tensors for cross_entropy ... I don't understand
# upos_loss = F.cross_entropy(upos, target_upos)
# deprel_loss = F.cross_entropy(deprel, target_deprel)
# print(upos.shape, target_upos.shape, deprel.shape, target_deprel.shape)
upos_loss = F.nll_loss(upos, target_upos)
deprel_loss = F.nll_loss(deprel, target_deprel)
# upos_loss = F.kl_div(upos, target_upos)
# deprel_loss = F.kl_div(deprel, target_deprel)
loss = upos_loss + deprel_loss
# loss = F.kl_div(torch.cat([upos, deprel], dim=-1).contiguous(), torch.cat([target_upos, target_deprel], dim=-1).contiguous())
return loss
writer = SummaryWriter()
# +
# indata = torch.from_numpy(data_train[-2:,0,:].astype("int64")).to(device)
# +
# indata.shape
# +
# # %%time
# testing tensorboard add_graph to see if the network graph is drawn correctly ;)
# indata = torch.from_numpy(data_train[-2:,0,:].astype("int64")).to(device)
# writer.add_graph(net, indata)
# Kernel dies when I do this ... so ... :O
# -
def train(model, optimizer, loss_function, batches, epoch, ndatapoints, device, log_interval=100):
model.train()
train_loss = 0
# batch_loss = []
batch_idx = 1
for b_data in batches:
torch.cuda.empty_cache() # make sure the cache is emptied to begin the nexxt batch
b_train = torch.from_numpy(b_data[:,0,:].astype("int64")).squeeze().to(device).long()
b_upos = torch.from_numpy(b_data[:,1,:].astype("int64")).squeeze().to(device).long()
# b_deprel = torch.from_numpy(b_data[:,2,:].astype("int64")).squeeze().to(device).long()
# tensor_data = torch.from_numpy(bdata).to(device).long() #.double() #.float()
optimizer.zero_grad()
txtcode, positions, latent, dec = model(b_train)
last_latent = latent[-1]
upos, deprel = dec
# print(emb.shape,emb.dtype, res.shape, res.dtype)
# print(upos.shape, b_upos.shape)
# loss = loss_function(upos, deprel, upos_emb(b_upos), deprel_emb(b_deprel))
# loss = loss_function(upos, deprel, b_upos, b_deprel)
# Untill I make it work, work only with the UPOS PoS as it will be faster MUCH faster
# loss = F.kl_div(upos, upos_emb(b_upos), reduction="batchmean")
loss = F.nll_loss(upos.view([-1,18]),b_upos.view([-1]))
# loss = F.cross_entropy(upos.view([-1,18]),b_upos.view([-1]))
# loss = F.cross_entropy(upos,b_upos)
# loss = F.mse_loss(upos, upos_emb(b_upos))
loss.backward()
train_loss += loss.data.item() # [0]
writer.add_scalar("Loss/train", loss.data.item(), global_step=epoch*batch_idx)
optimizer.step()
if batch_idx % log_interval == 0:
print('Timestamp {} Train Epoch: {} [{}/{} ]\tLoss: {:.6f}'.format(
datetime.now(),
epoch, batch_idx , (ndatapoints//len(b_data)),
loss.data.item() / b_data.shape[0]))
# batch_loss.append(loss)
batch_idx += 1
del(b_train)
del(b_upos)
# del(b_deprel)
torch.cuda.empty_cache()
writer.add_scalar("EpochLoss/train", train_loss / batch_idx, epoch)
print('====> Timestamp {} Epoch: {} Average loss: {:.8f}'.format(datetime.now(), epoch, train_loss / ndatapoints))
return train_loss
# load testing data ALL the training data
base_dir = "/home/leo/projects/Datasets/text/UniversalDependencies/ud-treebanks-v2.5"
# get all file paths for testing
all_fnames = get_all_files_recurse(base_dir)
fnames = [f for f in all_fnames if "test-charse" in f and f.endswith(".npy")]
len(fnames)
# load all test files
test_data = []
for f in fnames:
data = np.load(f)
lang_name = path_leaf(f).split("-ud")[0]
test_data.append((lang_name, data))
def test(model, loss_function, test_data, epoch, device, max_data=100):
model.eval()
test_loss = 0
for lang, d in test_data:
torch.cuda.empty_cache() # make sure the cache is emptied to begin the nexxt batch
b_test = torch.from_numpy(d[:max_data,0,:].astype("int64")).squeeze().to(device).long()
b_upos = torch.from_numpy(d[:max_data,1,:].astype("int64")).squeeze().to(device).long()
# b_deprel = torch.from_numpy(d[:,2,:].astype("int64")).squeeze().to(device).long()
_, _, _, dec = model(b_test)
# last_latent = latent[-1]
upos, _ = dec
loss = loss_function(upos.view([-1,18]),b_upos.view([-1]))
# loss = loss_function(res, tensor_data).data.item() # [0]
test_loss += loss.data.item()
writer.add_scalar("LangLoss/test/"+lang, loss.data.item(), global_step=epoch)
del(b_test)
del(b_upos)
torch.cuda.empty_cache()
test_loss /= len(test_data) # although this is not faire as different languages give different results
writer.add_scalar("EpochLangLoss/test/", test_loss, global_step=epoch)
print('epoch: {}====> Test set loss: {:.8f}'.format(epoch, test_loss))
# +
# reload model from saved state:
# net.network.load_model("./trained_models/conv1dcol", "conv1dcol_kl-div+1000batches-mse-loss_epoch-3_001")
# -
model = net.to(device)
# optimizer = torch.optim.Adam(model.parameters(), lr=1e-6, weight_decay=0, amsgrad=False )
# optimizer = torch.optim.AdamW(model.parameters())
optimizer = torch.optim.AdamW(model.parameters(), lr=1e-4, weight_decay=1e-4)
loss_function = F.nll_loss
data_train.shape, data_train.shape[0]//50
epoch_size = 10000
batch_size = 50
# data = data_train[-1000*batch_size:,:,:] # just for the trials, use the last 1000 batches only
data = data_train
print(data.shape)
epochs = chunks(data, epoch_size, dim=0)
# batches = chunks(data, batch_size, dim=0)
# +
# # %%time
# epoch_count = 0
# test(model, loss_function, test_data, epoch_count, device, max_data=50)
# -
# %%time
epoch_count = 1
for e in epochs:
batches = chunks(e, batch_size, dim=0)
eloss = train(model, optimizer, loss_function, batches, epoch_count, epoch_size, device, log_interval=10)
test(model, loss_function, test_data, epoch_count, device, max_data=50)
epoch_count+=1
# # %%time
# epoch_count = 2
# eloss = train(model, optimizer, loss_function, batches, epoch_count, len(data), device, log_interval=20)
model.network.save_model("./trained_models/conv1dcol", "conv1dcol_nll-loss_pretrained_epoch-{}".format(epoch_count))
epoch_count
# Tried different Nx50 sizes for batches but the only one that works is 50, it seems will be the maximum number of samples in each batch for the training in my GPU
# The issue is that training does not seem to work correctly.
#
# All training losses (kl_div, mse_loss) seem to learn well only the first 100 batches and then nothing, it oscilates. After several different initializations with kl_div it worked better (the first loss was about initialized to -1 ... ) so initialization seems to take an important role here.
#
# I need to write a test function now to be able to measure with the test datasets and see the real accuracy
#
#
torch.cuda.memory_allocated()
torch.cuda.memory_cached()
torch.cuda.empty_cache()
torch.cuda.memory_allocated()
torch.cuda.memory_cached()
| predictors/sequence/text/Conv1DColumnTrain.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <img src="qiskit-heading.gif" width="500 px" align="center">
# ## _*Qiskit Aqua: Using Grover Search for 3SAT problems*_
#
# This notebook is based on an official notebook by Qiskit team, available at https://github.com/qiskit/qiskit-tutorial under the [Apache License 2.0](https://github.com/Qiskit/qiskit-tutorial/blob/master/LICENSE) license.
#
# This notebook demonstrates how to use the `Qiskit Aqua` library Grover algorithm and process the result.
#
# Your **TASK** is to execute every step of this notebook while learning to use qiskit-aqua and also strengthening your knowledge of the Grover search algorithm.
import pylab
from qiskit_aqua import run_algorithm
from qiskit_aqua.input import get_input_instance
from qiskit.tools.visualization import matplotlib_circuit_drawer as draw
from qiskit.tools.visualization import plot_histogram
# We have a SAT problem to which we want to find solutions using Grover and SAT oracle combination. The SAT problem is specified in the [DIMACS CNF format](https://www.satcompetition.org/2009/format-benchmarks2009.html). We read one of
# the sample cnf files to load the problem.
#
# Try the code below for both sample files `3sat3-5.cnf` and `3sat2-3.cnf`.
with open('3sat3-5.cnf', 'r') as f:
sat_cnf = f.read()
print(sat_cnf)
# In order to run an algorithm we need to create a configuration dictionary with the parameters for the algorithm and any other dependent objects it requires. So we first define a dictionary for the algorithm. We name GROVER as the algorithm and as it has no further parameters we are done. GROVER needs an oracle so we configure one. Here we use the SAT oracle which will allow us to solve an optimization SAT problem by searching solution space. We configure the oracle with the problem we loaded above. We then combine the dictionaries into the final single params dictionary that is passed to run_algorithm.
# +
algorithm_cfg = {
'name': 'Grover'
}
oracle_cfg = {
'name': 'SAT',
'cnf': sat_cnf
}
params = {
'problem': {'name': 'search', 'random_seed': 50},
'algorithm': algorithm_cfg,
'oracle': oracle_cfg,
'backend': {'name': 'qasm_simulator'}
}
result = run_algorithm(params)
print(result['result'])
# -
# As seen above, a satisfying solution to the specified sample SAT problem is obtained, with the absolute values indicating the variable indices, and the signs the `True/False` assignments, similar to the DIMACS format.
#
# A measurements result is also returned where it can be seen, below in the plot, that result\['result'\] was the highest probability. But the other solutions were very close in probability too.
pylab.rcParams['figure.figsize'] = (8, 4)
plot_histogram(result['measurements'])
# Try the following command `draw(result['circuit'])`
#
# Which should produce a figure like:
#
# 
#
# If it does not and an error is showw instead it is due to a bug in the drawing mechanism the `cnx` gates may not be visible yet, this issue will surely be fixed in later versions.
# +
#draw(result['circuit'])
# -
# The above figure shows the circuit that was run for Grover. This circuit was returned from the algorithm for the above visualization which was generated using qiskit.tools.visualization functionality.
| community/awards/teach_me_quantum_2018/TeachMeQ/Week_8-High_Level_Quantum_Programming/exercises/w8_01.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Seaborn examples, taken from https://jakevdp.github.io/PythonDataScienceHandbook/04.14-visualization-with-seaborn.html
import numpy as np
import pandas as pd
import seaborn as sns
# Turning on notebook plots -- just for use in jupyter notebooks.
import matplotlib
matplotlib.use('nbagg')
import numpy as np
import matplotlib.pyplot as plt
sns.set()
# +
data = np.random.multivariate_normal([0, 0], [[5, 2], [2, 2]], size=2000)
data = pd.DataFrame(data, columns=['x', 'y'])
fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(9, 4))
for col in 'xy':
plt.hist(data[col], normed=True, alpha=0.5)
# -
fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(9, 4))
for col in 'xy':
sns.kdeplot(data[col], shade=True)
#fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(9, 4))
with sns.axes_style('white'):
sns.jointplot("x", "y", data, kind='hex')
iris = sns.load_dataset("iris")
iris.head()
sns.pairplot(iris, hue='species', size=2.5);
tips = sns.load_dataset('tips')
tips.head()
# +
tips['tip_pct'] = 100 * tips['tip'] / tips['total_bill']
grid = sns.FacetGrid(tips, row="sex", col="time", margin_titles=True)
grid.map(plt.hist, "tip_pct", bins=np.linspace(0, 40, 15));
# -
with sns.axes_style(style='ticks'):
g = sns.factorplot("day", "total_bill", "sex", data=tips, kind="box")
g.set_axis_labels("Day", "Total Bill");
sns.jointplot("total_bill", "tip", data=tips, kind='reg');
url = 'https://raw.githubusercontent.com/jakevdp/marathon-data/master/marathon-data.csv'
data = pd.read_csv(url)
data.head()
| Visualization with Seaborn.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/kumgame/colab1/blob/master/Torrent_To_Google_Drive_Downloader.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="aQuWDmfm9YOi" colab_type="text"
# # Torrent To Google Drive Downloader
# + [markdown] id="qYk44mBwJf6E" colab_type="text"
# **Important Note:** To get more disk space:
# > Go to Runtime -> Change Runtime and give GPU as the Hardware Accelerator. You will get around 384GB to download any torrent you want.
# + [markdown] id="NaFa7M-e9YOr" colab_type="text"
# ### Install libtorrent and Initialize Session
# + colab_type="code" id="m6hF0emftx4h" colab={}
# !apt install python3-libtorrent
import libtorrent as lt
ses = lt.session()
ses.listen_on(6881, 6891)
downloads = []
# + [markdown] id="Uf90U73y9YOj" colab_type="text"
# ### Mount Google Drive
# To stream files we need to mount Google Drive.
# + colab_type="code" id="oWM9l2fvtuvO" colab={}
from google.colab import drive
drive.mount("/content/drive")
# + [markdown] id="R_1XuuIf9YOn" colab_type="text"
# ### Add From Torrent File
# You can run this cell to add more files as many times as you want
# + colab_type="code" id="0et2A6N3udA0" colab={}
from google.colab import files
source = files.upload()
params = {
"save_path": "/content/drive/My Drive/Torrent",
"ti": lt.torrent_info(list(source.keys())[0]),
}
downloads.append(ses.add_torrent(params))
# + [markdown] id="WD-6M6eZyzmj" colab_type="text"
# ### Add From Magnet Link
# You can run this cell to add more files as many times as you want
# + id="Cwi1GMlxy3te" colab_type="code" colab={}
params = {"save_path": "/content/drive/My Drive/Torrent"}
while True:
magnet_link = input("Enter Magnet Link Or Type Exit: ")
if magnet_link.lower() == "exit":
break
downloads.append(
lt.add_magnet_uri(ses, magnet_link, params)
)
# + [markdown] id="m-a1CUP39YOu" colab_type="text"
# ### Start Download
# Source: https://stackoverflow.com/a/5494823/7957705 and [#3 issue](https://github.com/FKLC/Torrent-To-Google-Drive-Downloader/issues/3) which refers to this [stackoverflow question](https://stackoverflow.com/a/6053350/7957705)
# + colab_type="code" id="DBNoYYoSuDBT" colab={}
import time
from IPython.display import display
import ipywidgets as widgets
state_str = [
"queued",
"checking",
"downloading metadata",
"downloading",
"finished",
"seeding",
"allocating",
"checking fastresume",
]
layout = widgets.Layout(width="auto")
style = {"description_width": "initial"}
download_bars = [
widgets.FloatSlider(
step=0.01, disabled=True, layout=layout, style=style
)
for _ in downloads
]
display(*download_bars)
while downloads:
next_shift = 0
for index, download in enumerate(downloads[:]):
bar = download_bars[index + next_shift]
if not download.is_seed():
s = download.status()
bar.description = " ".join(
[
download.name(),
str(s.download_rate / 1000),
"kB/s",
state_str[s.state],
]
)
bar.value = s.progress * 100
else:
next_shift -= 1
ses.remove_torrent(download)
downloads.remove(download)
bar.close() # Seems to be not working in Colab (see https://github.com/googlecolab/colabtools/issues/726#issue-486731758)
download_bars.remove(bar)
print(download.name(), "complete")
time.sleep(1)
| Torrent_To_Google_Drive_Downloader.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Introduction to this tutorial
#
# This tutorial is an introduction to how to load data into Spark. For this tutorial we are going to be using the following Data Set:
# __ratings.csv__: _100,000 ratings and 3,600 tag applications applied to 9,000 movies by 600 users._
#
# This Data Set is in CSV format.
# ## SparkSession and Settings
# Before we continue, set up a SparkSession.
# +
from pyspark.sql import SparkSession
spark = SparkSession.builder.appName("31LoadingDataFromCSV").getOrCreate()
# -
# Additionally, we have to define some settings to ensure proper operations.
# - `RATINGS_CSV_LOCATION` is used to tell our Spark Application where to find the ratings.csv file
# Location of the ratings.csv file
RATINGS_CSV_LOCATION = "/home/jovyan/data-sets/ml-latest-small/ratings.csv"
# # Part 1: Loading Data from a CSV file
# Take a moment to study the readme file that belongs to the MovieLens-latest-small dataset.
#
# [`pyspark.sql.DataFrameReader.read.csv`](https://spark.apache.org/docs/2.4.3/api/python/pyspark.sql.html#pyspark.sql.DataFrameReader) is used for reading csv files.
#
# We can access this, simply, by referencing our `SparkSession`, which we initated as an object we named `spark` in the previous cell. Hence, [`spark.read.csv()`](https://spark.apache.org/docs/2.4.3/api/python/pyspark.sql.html#pyspark.sql.DataFrameReader) is used to tell Spark to read from a csv file from a given location.
#
# So let's try this.
df = spark.read.csv(RATINGS_CSV_LOCATION)
# You've now told Spark to load the data from the given CSV file.
# Because Spark is lazy, we have to explicitly tell it to show us something.
#
# Let's see the content by running `.show()` on our new DataFrame.
# Let's also check the schema of what we loaded, by using `.printSchema()`.
df.show()
df.printSchema()
# What you can see, is that the data is being loaded, but it does not quite appear to be right. Additionally, all the columns appear to be cast as a StringType - which is not ideal.
# ### Parsing the CSV file correctly and DataTypes()
#
#
# We can fix the aformentioned issues by giving the `read.csv()` method the correct settings.
#
# To quote the `README.txt` that belongs to the MovieLens data:
# > The dataset files are written as [__comma__-separated values](http://en.wikipedia.org/wiki/Comma-separated_values) files with a __single header row__. Columns that contain commas (`,`) are __escaped using double-quotes__ (`"`). These files are __encoded as UTF-8__.
#
# *__NOTE__: It is a good idea to read the full `README.txt`, since it explains in detail how the data should be interpreted.*
# *__NOTE__: Take a moment to study the [documentation for `read.csv()`](https://spark.apache.org/docs/2.4.3/api/python/pyspark.sql.html#pyspark.sql.DataFrameReader) to learn about which possible things we can set.*
#
# To parse the CSV correctly, we are going to need to set the following on our `read.csv()` method:
#
# 1. We leave the same `path` as before, referring to `RATINGS_CSV_LOCATION` that we set previously.
# 2. Since we have __comma-seperated-values__, we need to set `sep` to `','`.
# 3. Since we have a __single header row__, we need to set `header` to `True`.
# 4. Since columns that contain commas (`,`) are __escaped using double-quotes__ (`"`), we set `quote` to `'"'`.
# 5. Since the files are __encoded as UTF-8__, we set `encoding` to `UTF-8`.
# 6. Additionally, since we observed that all values are cast to `StringType` by default, we set `inferSchema` to `True`.
# +
# Loading CSV file with proper parsing and inferSchema
df = spark.read.csv(
path=RATINGS_CSV_LOCATION,
sep=",",
header=True,
quote='"',
encoding="UTF-8",
inferSchema=True,
)
# Displaying results of the load
df.show()
df.printSchema()
# -
# Looking at the output we can notice a few things:
#
# - The header now appears properly parsed, no more `_c0`, `_c1`, etc.
# - The numeric value columns are cast to `IntegerType` and `DoubleType` thanks to `inferSchema`
#
# Using `inferSchema`, Spark casted the following types to our schema:
#
# > `|-- userId: integer (nullable = true)`
# > `|-- movieId: integer (nullable = true)`
# > `|-- rating: double (nullable = true)`
# > `|-- timestamp: integer (nullable = true)`
#
# In short, our data now appears to have a correct parsed schema with DataTypes that appear to match the current data.
# ### Type Safety
#
# InferSchema is a great way to (quickly) set the schema for the data we are using. It is however good practice to be as explicit as possible when it comes to DataTypes and Schema - we call this [Type Safety](https://en.wikipedia.org/wiki/Type_safety).
# Applying proper schema and ensuring Type Safety, is extra important once we start using more than one Data Source. For example, when trying to join two datasets, the join will not work as expected if the DataTypes of the join columns are not set correctly.
#
# *__NOTE__: Take a moment to read about [Spark DataTypes](https://spark.apache.org/docs/latest/sql-reference.html#data-types).*
#
# Let's now set out schema to an explicit value. We will do this by using the `schema` option belonging to [`read.csv()`](https://spark.apache.org/docs/2.4.3/api/python/pyspark.sql.html#pyspark.sql.DataFrameReader).
#
# `schema`'s description reads:
#
# > *an optional `pyspark.sql.types.StructType` for the input schema or a DDL-formatted string (For example `col0 INT, col1 DOUBLE`).*
#
# I won't cover `StructType` at this point in time, but we will be using a `DDL-formatted string`. Spark uses Apache Hive's DDL language.
#
# *__NOTE__: Take a moment to read about [Compatibility with Apache Hive](https://spark.apache.org/docs/2.4.3/sql-migration-guide-hive-compatibility.html#supported-hive-features) if you want to learn more about the Apache Hive DDL syntax that Spark uses.*
#
# In this case we will define our `DDL-formatted string` as:
# `'userId INT, movieId INT, rating DOUBLE, timestamp INT'`
# +
# Type safe loading of ratings.csv file
df = spark.read.csv(
path=RATINGS_CSV_LOCATION,
sep=",",
header=True,
quote='"',
encoding="UTF-8",
schema="userId INT, movieId INT, rating DOUBLE, timestamp INT",
)
# Displaying results of the load
df.show()
df.printSchema()
df.describe().show()
df.explain()
# -
# We now have the same output as before, but since we have an explicit schema we can ensure Type Safety
#
# ## What we've learned so far:
#
# - How to use `read.csv()` to load CSV files, and how to control the settings of this method
# - By default, CSVs are parsed with all columns being cast to `StringType`
# - `inferSchema` allows Spark to guess what schema should be used
# - To ensure proper Type Safety, we can use Hive Schema DDL to set an explicit schema
#
# ---
spark.stop()
# End of part 1
| Section 3 - Preparing Data using SparkSQL/3.1/loading_data_from_a_csv_file.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from sklearn.datasets import fetch_california_housing
raw = fetch_california_housing()
X = raw.data
y = raw.target
# Show feature names
raw.feature_names
# -
# Show dataset description
print(raw.DESCR)
# Show dimension of X
X.shape
# Show dimension of y
y.shape
# Split X, y into X_train, X_test, y_train, y_test with 7:3 ratio
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.7)
# Build a linear regression model with X_train, y_train
from sklearn.linear_model import LinearRegression
reg_model = LinearRegression().fit(X_train, y_train)
# y_pred from X_test
y_pred = reg_model.predict(X_test)
# find the argmax of coefficients
import numpy as np
np.argmax(reg_model.coef_)
# Draw scatter plots of
# argmax of X_train - y_train as 'x' marker
# argmax of X_test - y_test as 'o' marker
import matplotlib.pyplot as plt
plt.scatter(X_train[:,3],y_train,marker='x')
plt.scatter(X_test[:,3],y_test,marker='o')
plt.show()
| Assignment 2/Assignment_2_linreg.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
weather = pd.read_csv("../weather3_180703.csv")
weather['date'] = pd.to_datetime(weather["date"])
station = weather[weather['station_nbr'] == 1]
station.tail()
len(station)
for i in station.columns:
count_null = station[i].isna().sum()
print(i, ":", count_null, "(",round((count_null / len(station) * 100),2),"%",")")
train = pd.read_csv("../data/train.csv")
train.date = pd.to_datetime(train.date)
train.tail()
key = pd.read_csv("../data/key.csv")
station = station.merge(key)
station = station.merge(train)
station.tail()
station['log1p_units'] = np.log1p(station.units)
# +
# # units가 다 0인 item_number 골라내기
# stn_real = pd.DataFrame(columns = station.columns)
# station["item_nbr"] = station["item_nbr"].astype("str")
# item_number = station.groupby("item_nbr")["units"].agg(sum)
# stn_item = item_number[item_number != 0].index
# stn_real = pd.DataFrame(columns = station.columns)
# for i in range(len(stn_item)):
# stn_real = pd.concat([stn_real, station[station["item_nbr"] == stn_item[i]]], ignore_index=True)
# -
# ## 전체 데이터로 다시 OLS
# +
target1 = station['units']
target2 = station['log1p_units']
station.drop(columns=['units','log1p_units'],inplace=True)
station.tail()
# -
df1 = pd.concat([station,target1], axis=1)
df2 = pd.concat([station,target2], axis=1)
df1_piv = df1.pivot_table(values = 'units', index = ['item_nbr'], columns=['weekend']).astype(int)
df1_piv = df1_piv.replace(0,np.nan).dropna(how='all')
df1_piv
fig = plt.figure(figsize=(25, 10))
sns.set(font_scale=2.0)
sns.heatmap(df1_piv, cmap="YlGnBu", annot = True, fmt = '.0f')
df1['rainY']
# ##### binomial test : weekend
df1_piv = df1.pivot_table(values = 'units', index = ['item_nbr'], columns=['rainY']).astype(int)
df1_piv = df1_piv.replace(0,np.nan).dropna(how='all')
df1_piv
fig = plt.figure(figsize=(25, 10))
sns.set(font_scale=1.6)
sns.heatmap(df1_piv, cmap="YlGnBu", annot = True, fmt = '.0f')
# ### 1. OLS : df1 (units)
model1 = sm.OLS.from_formula('units ~ tmax + tmin + tavg + dewpoint + wetbulb + heat + cool + preciptotal + stnpressure + sealevel \
+ resultspeed + C(resultdir) + avgspeed + C(year) + C(month) + relative_humility + windchill + weekend \
+ C(rainY) + C(item_nbr)+ 0', data = df1)
result1 = model1.fit()
print(result1.summary())
# ### 2. OLS : df1 (units) - 스케일링
# - conditional number가 너무 높음.
model1_1 = sm.OLS.from_formula('units ~ scale(tmax) + scale(tmin) + scale(tavg) + scale(dewpoint) + scale(wetbulb) + scale(heat) + scale(cool)\
+ scale(preciptotal) + scale(stnpressure) + scale(sealevel) + scale(resultspeed) \
+ C(resultdir) + scale(avgspeed) + C(year) + C(month) + scale(relative_humility) + scale(windchill) + C(weekend) \
+ C(rainY) + C(store_nbr) + C(item_nbr) + 0', data = df1)
result1_1 = model1_1.fit()
print(result1_1.summary())
# 스케일링을 했으나 conditional number가 크게 떨어지진 않았다.
# ### 3. OLS : df1 (units) - 아웃라이어 제거
# 아웃라이어 제거
# Cook's distance > 2 인 값 제거
influence = result1.get_influence()
cooks_d2, pvals = influence.cooks_distance
fox_cr = 4 / (len(df1) - 2)
idx_outlier = np.where(cooks_d2 > fox_cr)[0]
len(idx_outlier)
idx = list(set(range(len(df1))).difference(idx_outlier))
df1_1 = df1.iloc[idx, :].reset_index(drop=True)
df1_1
# OLS - df1_1
model1_1_1 = sm.OLS.from_formula('units ~ scale(tmax) + scale(tmin) + scale(tavg) + scale(dewpoint) + scale(wetbulb) + scale(heat) + scale(cool)\
+ scale(preciptotal) + scale(stnpressure) + scale(sealevel) + scale(resultspeed) \
+ C(resultdir) + scale(avgspeed) + C(year) + C(month) + scale(relative_humility) + scale(windchill) + C(weekend) \
+ C(rainY) + C(store_nbr) + C(item_nbr) + 0', data = df1_1)
result1_1_1 = model1_1_1.fit()
print(result1_1_1.summary())
# R square 약간 상승, conditional number에 전혀 변화가 없다...
# ### 3-1. OLS : df1 (units) - 아웃라이어 제거 + tmax/tmin/tavg 제거 + dewpoint/wetbulb제거 + stnpressure/sealevel제거 + resultdir제거
# OLS - df1_1
model1_1_1 = sm.OLS.from_formula('units ~ scale(heat) + scale(cool)\
+ scale(preciptotal) + scale(resultspeed) \
+ scale(avgspeed) + C(year) + C(month) + scale(relative_humility) + scale(windchill) + C(weekend) \
+ C(rainY) + C(store_nbr) + C(item_nbr) + 0', data = df1_1)
result1_1_1 = model1_1_1.fit()
print(result1_1_1.summary())
# ### 4. 변수변환 : df2 (log1p_units)
model2 = sm.OLS.from_formula('log1p_units ~ scale(tmax) + scale(tmin) + scale(tavg) + scale(dewpoint) + scale(wetbulb) + scale(heat) + scale(cool)\
+ scale(preciptotal) + scale(stnpressure) + scale(sealevel) + scale(resultspeed) \
+ C(resultdir) + scale(avgspeed) + C(year) + C(month) + scale(relative_humility) + scale(windchill) + C(weekend) \
+ C(rainY) + C(store_nbr) + C(item_nbr) + 0', data = df2)
result2 = model2.fit()
print(result2.summary())
# units에 log를 취하여 R square값은 올랐지만, 여전히 conditional number는 그대로. 상관관계가 높은 변수 제거해야할 거 같다.
# ### 5. 변수변환 : df2 (log1p_units) + 아웃라이어 제거
# 아웃라이어 제거
# Cook's distance > 2 인 값 제거
influence = result2.get_influence()
cooks_d2, pvals = influence.cooks_distance
fox_cr = 4 / (len(df2) - 2)
idx_outlier = np.where(cooks_d2 > fox_cr)[0]
len(idx_outlier)
idx = list(set(range(len(df2))).difference(idx_outlier))
df2_1 = df2.iloc[idx, :].reset_index(drop=True)
df2_1
# OLS - df2_1
model2_1 = sm.OLS.from_formula('log1p_units ~ scale(tmax) + scale(tmin) + scale(tavg) + scale(dewpoint) + scale(wetbulb) + scale(heat) + scale(cool)\
+ scale(preciptotal) + scale(stnpressure) + scale(sealevel) + scale(resultspeed) \
+ C(resultdir) + scale(avgspeed) + C(year) + C(month) + scale(relative_humility) + scale(windchill) + C(weekend) \
+ C(rainY) + C(store_nbr) + C(item_nbr) + 0', data = df2_1)
result2_1 = model2_1.fit()
print(result2_1.summary())
# 설명력이 더 올라갔다.(0.870), conditional number는 그대로
# ### 6. 변수변환 : df2 (log1p_units) + 아웃라이어 제거 + preciptotal 변수변환
# +
# OLS - df2_1_1
model2_1_1 = sm.OLS.from_formula('log1p_units ~ scale(tmax) + scale(tmin) + scale(tavg) + scale(dewpoint) + scale(wetbulb) + scale(heat) + scale(cool)\
+ scale(np.log1p(preciptotal)) + scale(stnpressure) + scale(sealevel) + scale(resultspeed) \
+ C(resultdir) + scale(avgspeed) + C(year) + C(month) + scale(relative_humility) + scale(windchill) + C(weekend) \
+ C(rainY) + C(store_nbr) + C(item_nbr) + 0', data = df2_1)
result = model2_1_1.fit()
result2_1_1 = model2_1_1.fit()
print(result2_1_1.summary())
# -
# 변화없음.(5번 결과와 동일)
# ### 6 - 1. 변수변환 : df2 (log1p_units) + 아웃라이어 제거 + preciptotal 변수변환 + tmax/tmin/tavg제거(VIF에 근거)
# +
# OLS - df2_1_1
model2_1_1 = sm.OLS.from_formula('log1p_units ~ scale(dewpoint) + scale(wetbulb) + scale(heat) + scale(cool)\
+ scale(np.log1p(preciptotal)) + scale(stnpressure) + scale(sealevel) + scale(resultspeed) \
+ C(resultdir) + scale(avgspeed) + C(year) + C(month) + scale(relative_humility) + scale(windchill) + C(weekend) \
+ C(rainY) + C(store_nbr) + C(item_nbr) + 0', data = df2_1)
result = model2_1_1.fit()
result2_1_1 = model2_1_1.fit()
print(result2_1_1.summary())
# -
# 그래도 conditional number가 243....
# ### 6 - 2. 변수변환 : df2 (log1p_units) + 아웃라이어 제거 + preciptotal 변수변환 + tmax/tmin/tavg제거 + wetbulb/dewpoint(VIF에 근거) --> 아래 VIF부분으로 갈 것.
# +
# OLS - df2_1_1
model2_1_1 = sm.OLS.from_formula('log1p_units ~ scale(heat) + scale(cool)\
+ scale(np.log1p(preciptotal)) + scale(stnpressure) + scale(sealevel) + scale(resultspeed) \
+ C(resultdir) + scale(avgspeed) + C(year) + C(month) + scale(relative_humility) + scale(windchill) + C(weekend) \
+ C(rainY) + C(store_nbr) + C(item_nbr) + 0', data = df2_1)
result = model2_1_1.fit()
result2_1_1 = model2_1_1.fit()
print(result2_1_1.summary())
# -
# wetbulb, dewpoint추가로 지웠더니 conditional number 20감소...(?!)
# ### 7. result2의 잔차의 정규성 검정 : 정규성을 띄지 않음.
sp.stats.probplot(result1_1_1.resid, plot=plt)
plt.show()
# ### 8. 다중공선성 감소시키기 : VIF
# ##### 1) tmax~cool까지의 VIF를 본다.
df2_1.columns
# +
# sampleX = df2_1.loc[:, cols]
# sampley = df2_1.loc[:,"log1p_units"]
# sns.pairplot(sampleX)
# plt.show()
# +
from statsmodels.stats.outliers_influence import variance_inflation_factor
cols = ['tmax','tmin','tavg','dewpoint','wetbulb','heat','cool','preciptotal','stnpressure','sealevel','resultspeed','avgspeed','relative_humility','windchill']
y = df2.loc[:,cols]
vif = pd.DataFrame()
vif["VIF Factor"] = [variance_inflation_factor(y.values, i) for i in range(y.shape[1])]
vif["features"] = y.columns
vif
# -
# tmax, tmin, tavg를 빼고 df2_1을 다시 OLS돌려본다(위로위로~)
# ##### VIF : 2) dewpoint, wetbulb, heat, cool로 VIF를 돌려본다.
cols = ['dewpoint','wetbulb','heat','cool','preciptotal','stnpressure','sealevel','resultspeed','avgspeed','relative_humility','windchill']
sampleX = df2.loc[:, cols]
sampley = df2.loc[:,"log1p_units"]
# sns.pairplot(sampleX)
# plt.show()
# +
from statsmodels.stats.outliers_influence import variance_inflation_factor
vif = pd.DataFrame()
vif["VIF Factor"] = [variance_inflation_factor(sampleX.values, i) for i in range(sampleX.shape[1])]
vif["features"] = sampleX.columns
vif
# -
# df2_1에서 stnpressure를 제거하고 OLS돌려본다.
# ##### VIF : stnpressure제거 후 다시 돌리자.
cols = ['dewpoint','wetbulb','heat','cool','preciptotal','sealevel','resultspeed','avgspeed','relative_humility','windchill']
sampleX = df2.loc[:, cols]
sampley = df2.loc[:,"log1p_units"]
# +
from statsmodels.stats.outliers_influence import variance_inflation_factor
vif = pd.DataFrame()
vif["VIF Factor"] = [variance_inflation_factor(sampleX.values, i) for i in range(sampleX.shape[1])]
vif["features"] = sampleX.columns
vif
# -
# wetbulb를 버린다.
# ### VIF : wetbulb 버리고 다시
cols = ['dewpoint','heat','cool','preciptotal','sealevel','resultspeed','avgspeed','relative_humility','windchill']
sampleX = df2.loc[:, cols]
sampley = df2.loc[:,"log1p_units"]
# +
from statsmodels.stats.outliers_influence import variance_inflation_factor
vif = pd.DataFrame()
vif["VIF Factor"] = [variance_inflation_factor(sampleX.values, i) for i in range(sampleX.shape[1])]
vif["features"] = sampleX.columns
vif
# -
# ### VIF : windchill 버리고 다시
cols = ['dewpoint','heat','cool','preciptotal','sealevel','resultspeed','avgspeed','relative_humility']
sampleX = df2.loc[:, cols]
sampley = df2.loc[:,"log1p_units"]
# +
from statsmodels.stats.outliers_influence import variance_inflation_factor
vif = pd.DataFrame()
vif["VIF Factor"] = [variance_inflation_factor(sampleX.values, i) for i in range(sampleX.shape[1])]
vif["features"] = sampleX.columns
vif
# -
# ### VIF : dewpoint 버리고 다시
cols = ['heat','cool','preciptotal','sealevel','resultspeed','avgspeed','relative_humility']
sampleX = df2.loc[:, cols]
sampley = df2.loc[:,"log1p_units"]
# +
from statsmodels.stats.outliers_influence import variance_inflation_factor
vif = pd.DataFrame()
vif["VIF Factor"] = [variance_inflation_factor(sampleX.values, i) for i in range(sampleX.shape[1])]
vif["features"] = sampleX.columns
vif
# -
# ### VIF : avgspeed 버리고 다시
cols = ['heat','cool','preciptotal','sealevel','resultspeed','relative_humility']
sampleX = df2.loc[:, cols]
sampley = df2.loc[:,"log1p_units"]
# +
from statsmodels.stats.outliers_influence import variance_inflation_factor
vif = pd.DataFrame()
vif["VIF Factor"] = [variance_inflation_factor(sampleX.values, i) for i in range(sampleX.shape[1])]
vif["features"] = sampleX.columns
vif
# -
# ### VIF : sealevel 버리고 다시
cols = ['heat','cool','preciptotal','resultspeed','relative_humility']
sampleX = df2.loc[:, cols]
sampley = df2.loc[:,"log1p_units"]
# +
from statsmodels.stats.outliers_influence import variance_inflation_factor
vif = pd.DataFrame()
vif["VIF Factor"] = [variance_inflation_factor(sampleX.values, i) for i in range(sampleX.shape[1])]
vif["features"] = sampleX.columns
vif
# -
# ### VIF 결과 : OLS 성능
model2_1_1 = sm.OLS.from_formula('log1p_units ~ scale(heat) + scale(cool)\
+ scale(preciptotal) + scale(resultspeed) \
+ C(year) + C(month) + scale(relative_humility) + C(weekend) \
+ C(rainY) + C(store_nbr) + C(item_nbr) + 0', data = df2)
result = model2_1_1.fit()
result2_1_1 = model2_1_1.fit()
print(result2_1_1.summary())
# 아웃라이어 제거
# Cook's distance > 2 인 값 제거
influence = result2.get_influence()
cooks_d2, pvals = influence.cooks_distance
fox_cr = 4 / (len(df2) - 2)
idx_outlier = np.where(cooks_d2 > fox_cr)[0]
len(idx_outlier)
idx = list(set(range(len(df2))).difference(idx_outlier))
df2_1 = df2.iloc[idx, :].reset_index(drop=True)
df2_1
model2_1_1 = sm.OLS.from_formula('log1p_units ~ scale(heat) + scale(cool)\
+ scale(preciptotal) + scale(resultspeed) \
+ C(year) + C(month) + scale(relative_humility) + C(weekend) \
+ C(rainY) + C(store_nbr) + C(item_nbr) + 0', data = df2_1)
result = model2_1_1.fit()
result2_1_1 = model2_1_1.fit()
print(result2_1_1.summary())
sp.stats.probplot(result2_1_1.resid, plot=plt)
plt.show()
# ### case:
# - 변수변환 : df2 (log1p_units) + 아웃라이어 제거 + tmax/tmin/tavg제거 + wetbulb/dewpoint(VIF에 근거) + sealevel/stnpressure제거 + resultdir제거(전부 p-value가 유의하지 않음) + item_nbr제거(하나빼고 다 유의하지 않음)
# +
# OLS - df2_1_1
model2_1_1 = sm.OLS.from_formula('log1p_units ~ scale(heat) + scale(cool)\
+ scale(preciptotal) + scale(resultspeed) \
+ scale(avgspeed) + C(year) + C(month) + scale(relative_humility) + scale(windchill) + C(weekend) \
+ C(rainY) + C(store_nbr) + 0', data = df2_1)
result = model2_1_1.fit()
result2_1_1 = model2_1_1.fit()
print(result2_1_1.summary())
# -
# - R square 0.000, conditional number 30.6.
# - conditional을 확 줄이는 대신 R square도 확 증발시킴.
# ##### 9. 다중공선성 감소시키기 : PCA
from patsy import dmatrix
formula = "scale(tavg) + scale(dewpoint) + scale(wetbulb) + scale(heat) + scale(cool) \
+ scale(preciptotal) + scale(stnpressure) + scale(sealevel) + scale(resultspeed) \
+ scale(avgspeed) + scale(relative_humility) + scale(windchill) + C(item_nbr) \
+ C(store_nbr) + C(weekend) + C(rainY) + C(otherY) + C(nothing) + 0"
dfX = dmatrix(formula, station, return_type="dataframe")
dfy = pd.DataFrame(target1, columns=["units"])
idx = list(set(range(len(df2))).difference(idx_outlier))
dfX = dfX.iloc[idx, :].reset_index(drop=True)
dfy = dfy.iloc[idx, :].reset_index(drop=True)
# ##### ***tmax, tmin빠져서 conditional number 감소
model_units = sm.OLS(np.log1p(dfy), dfX)
result_units = model_units.fit()
print(result_units.summary())
from sklearn.decomposition import PCA
dfX2 = sm.add_constant(pd.DataFrame(PCA(n_components=25).fit_transform(dfX)))
model_units2 = sm.OLS(np.log1p(dfy), dfX2)
result_units2 = model_units2.fit()
print(result_units2.summary())
# R square가 0.00....
# ##### 9. 다중공선성 감소시키기 : 정규화
# 6번 model 사용
# 순수 Ridge모형(L1_wt=0), 순수 lasso모형(L1_wt=1)
result2 = model2_1_1.fit_regularized(alpha=0.001, L1_wt=0)
result2.params[result2.params>0]
# ### 10. Cross validation(교차검증)
from patsy import dmatrix
# 독립변수와 종속변수로 나누기
df2_1_target = df2_1['log1p_units']
df2_1_X = df2_1.drop(columns=['log1p_units'])
len(df2_1_X), len(df2_1_target)
# ##### scikit learn에서 적용할 때 사용하는 코드 :df2_1(log1p_units) 대상
formula = 'scale(heat) + scale(cool)\
+ scale(preciptotal) + scale(resultspeed) \
+ C(year) + C(month) + scale(relative_humility) + C(weekend) \
+ C(rainY) + C(store_nbr) + C(item_nbr) + 0'
dfX = dmatrix(formula, df2_1_X, return_type='dataframe')
dfy = pd.DataFrame(df2_1_target, columns=["log1p_units"])
# +
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import KFold
from sklearn.metrics import r2_score
model = LinearRegression()
cv = KFold(10,shuffle=True)
scores = np.zeros(10)
for i, (train_index, test_index) in enumerate(cv.split(dfX)):
X_train = dfX.values[train_index]
y_train = dfy.loc[train_index]
X_test = dfX.loc[test_index]
y_test = dfy.loc[test_index]
# model.fit(X_train, y_train)
model = sm.OLS(dfy, dfX).fit()
y_pred = model.predict(X_test)
scores[i] = r2_score(y_test, y_pred)
scores
# -
# ### station 평균성능
scores.mean()
# 현재의 모델을 사용해도 될 것으로 판단.
# +
model2_1_1 = sm.OLS.from_formula('log1p_units ~ scale(heat) + scale(cool)\
+ scale(preciptotal) + scale(resultspeed) \
+ C(year) + C(month) + scale(relative_humility) + C(weekend) \
+ C(rainY) + C(store_nbr) + C(item_nbr) + 0', data = df2_1)
result2_1_1 = model2_1_1.fit()
print(result2_1_1.summary())
# +
# 정규화 result1 = Ridge 모형
result1 = model2_1_1.fit_regularized(alpha=0.1, L1_wt=0)
# 정규화 result2 = Lasso 모형
result2 = model2_1_1.fit_regularized(alpha=0.1, L1_wt=1)
# 정규화 result3 = Elastic Net 모형
result3 = model2_1_1.fit_regularized(alpha=0.1, L1_wt=0.5)
# -
# 여기까지
import pickle
with open("./station1.p","wb") as f:
pickle.dump(model2_1_1, f)
with open("station1.p", "rb") as f:
result = pickle.load(f)
# +
# from sklearn.linear_model import LinearRegression
# from sklearn.metrics import r2_score
# from sklearn.model_selection import KFold
# model = LinearRegression()
# cv = KFold(5, random_state=0)
# for train_index, test_index in cv.split(df2_1_X):
# print("test index :", test_index, len(test_index))
# print("." * 80)
# print("train index:", train_index, len(train_index))
# print("=" * 80)
# X_train = df2_1_X.loc[train_index, :]
# X_test = df2_1_X.loc[test_index, :]
# y_train = df2_1_target.loc[train_index, :]
# y_test = df2_1_target.loc[test_index, :]
# tmp_data = pd.concat([X_train, y_train], axis = 1)
# model = sm.OLS.from_formula('log1p_units ~ scale(heat) + scale(cool) + scale(preciptotal) + scale(resultspeed) + scale(avgspeed) \
# + C(year) + C(month) + scale(relative_humility) + scale(windchill) \
# + C(weekend) + C(rainY) + C(store_nbr) + C(item_nbr) + 0', data=tmp_data)
# result = model.fit()
# y_hat = result.predict(X_test)
# print(((y_hat - y_hat.mean())**2).sum() / ((y_test-y_test.mean())**2).sum() )
# -
from sklearn.model_selection import cross_val_score
model = LinearRegression()
cv = KFold(5)
cross_val_score(model, dfX, dfy, scoring="r2", cv=cv)
sum([0.9762711, 0.88227912, 0.85968661,0.7786411, 0.81481606])/5
# +
# station = station[station["units"] > 0].reset_index(drop=True)
# +
# set(range(len(station)))
# len(stn_real)
influence = result.get_influence()
hat = influence.hat_matrix_diag
plt.figure(figsize=(10, 2))
plt.stem(hat)
plt.show()
# -
# 이분산성 확인
plt.scatter(df['heat'], result.resid)
plt.show()
sns.distplot((np.log(np.sqrt(station['tavg']))))
plt.show()
a, (b, c) = enumerate(cv.split(dfX))
| weather2_code/station1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# +
def plotCartesian(name, method, points, file):
fig = plt.figure(figsize=(5,5))
ax1 = fig.add_subplot(111)
ax1.set_title(name + "\nCartesian coordinates, %s" % method)
ax1.set_xlabel(r'$x$'); plt.ylabel(r'$y$');
ax1.set_xlim(-750,750)
ax1.set_ylim(-750,750)
ax1.scatter(points.x, points.y, 2, 'r', 'o')
fig.savefig(file)
plt.show()
def plotPolar(name, method, points, file):
fig = plt.figure(figsize=(10,5))
ax1 = fig.add_subplot(111)
ax1.set_title(name + "\nPolar coordinates, %s" % method)
ax1.set_xlabel(r'$\theta$'); plt.ylabel(r'$r$');
ax1.set_ylim(0,750)
ax1.scatter(points.theta, points.r, 2, 'b', 'o')
fig.savefig(file)
plt.show()
# -
for t in [1,1000,2000,5000]:
file = "spirals/euler/csv_state%04d.csv" % t
spiral = pd.read_csv(file, sep=',')
plotCartesian("Spiral galaxy t=%d" % t, r"Euler method", spiral, "spirals/euler/spiral%04d_cartesian.pdf" % t)
plotPolar("Spiral galaxy t=%d" % t, r"Euler method", spiral, "spirals/euler/spiral%04d_polar.pdf" % t)
| analysis/spiralAnalysis_results.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8.10 64-bit
# language: python
# name: python3
# ---
# + [markdown] id="lx2nbe5zZ_-N"
# # AskReddit Troll Question Detection Challenge
# + [markdown] id="KXFHvvFiZ_-U"
# ## Imports
# + id="fhlIiuWBZ_-U"
import numpy as np
import pandas as pd
import sklearn
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
import re
# + colab={"base_uri": "https://localhost:8080/"} id="KDrBqoiNZ_-X" outputId="c40b4b25-b8cb-49ba-dcd4-cc6a92ab4ced"
import nltk # for tokenizing the paragraphs in sentences and sentences in words
from nltk.stem import WordNetLemmatizer
from nltk.corpus import stopwords
# + colab={"base_uri": "https://localhost:8080/"} id="9ocHtsZhZ_-X" outputId="3f4da35a-6e3c-474d-fdc1-e8dc94e362c3"
nltk.download('punkt')
nltk.download('stopwords')
nltk.download('wordnet')
# + id="rBUkOv3-Z_-Z"
train_df = pd.read_csv("train.csv")
# train_df.head()
# df = train_df[(train_df == 1).any(axis=1)]
# print(df['question_text'].tolist())
# + [markdown] id="_J94ZCTaZ_-Z"
# ## Preprocessing
# + [markdown] id="OLiW_3JiZ_-a"
# ### Dropping the qid
# + id="cx1MhOldZ_-b"
train_df.drop(columns=["qid"],inplace=True)
# train_df.head()
# -
# ### Data Balance Check
# +
import matplotlib.pyplot as plt
# Plotting the distribution for dataset.
ax = train_df.groupby('target').count().plot(kind='bar', title='Distribution of data',legend=False)
ax.set_xticklabels(['0','1'], rotation=0)
# -
# Hence, we need to balance the data some how.
#
# - As the data is in string so we cannot do balancing of data right now.
# - We cannot duplicate the data here as in that case we will affect the vectorisation of the base data (We tried but that didn't work well).
# - Now we will first vectorize the data and then use balancing data techniques.
# +
# from imblearn.over_sampling import SMOTE
# sm = SMOTE(random_state=23, sampling_strategy=1.0)
# X_train_sm, y_train_sm = sm.fit_resample(train_df['question_text'], train_df['target'])
# print(len(X_train_sm), len(y_train_sm))
# Above cannot be used here as they are in string format
# -----------------------------------------------------------------------------------------------------------
# minority_class = train_df[train_df['target']==1]
# majority_class = train_df[train_df['target']==0]
# for i in range(14):
# train_df = train_df.append(minority_class, ignore_index=True)
# print(train_df.shape)
# train_df=train_df.sample(frac=1).reset_index(drop=True)
# print(train_df.shape)
# print(train_df.shape)
# print(minority_class.shape)
# print(majority_class.shape)
# print(minority_class[0:100])
# +
# ax = train_df.groupby('target').count().plot(kind='bar', title='Distribution of data',legend=False)
# ax.set_xticklabels(['0','1'], rotation=0)
# + [markdown] id="x-VUUbh5Z_-c"
# ### Cleaning the data
#
# - Like removing !?., etc.
# - converting sentences to lower case
# -
sentences = train_df['question_text'].tolist()
N = len(sentences)
sentences = sentences[0:N]
# + id="bDlxrDDYZ_-d"
i=0
for sentence in sentences:
temp = re.sub('[^a-zA-Z]', ' ', sentence)
temp = temp.lower()
new_sentence = temp.split()
new_sentence = ' '.join(new_sentence)
sentences[i] = new_sentence
# print(new_sentence)
i+=1
# + [markdown] id="_dz4BTlbZ_-d"
# ### Lemmatization
# - We need to perform Stemming and Lemmatization on the sentences. Lemmatization is prefered as of now (Converting to meaningful words).
#
# It was obvious, lemmatization is not working for our data. It is affecting True Positives. So we will just remove stop words for now.
# + id="EXYPQfkHZ_-d"
tokenized_sentences = []
for sentence in sentences:
words = nltk.word_tokenize(sentence)
# removing stop words and using list composition
words = [word for word in words if word not in set(stopwords.words('english'))]
# joining words using spaces
tokenized_sentences.append(' '.join(words))
sentences = tokenized_sentences
# print(sentences)
# -
# ## Saving The PreProcessed Data
# +
Y1 = train_df['target'].to_numpy().astype(np.float64)
Y1 = Y1[:N]
data = [["question_text","target"]]
for i in range(N):
data.append([sentences[i],Y1[i]])
import csv
with open('processed_train_data.csv','w',newline='') as fp:
a = csv.writer(fp, delimiter=',')
a.writerows(data)
| Complete Code/attempt4/attempt4_1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# https://es.khanacademy.org/math/multivariable-calculus/applications-of-multivariable-derivatives/optimizing-multivariable-functions/a/maximums-minimums-and-saddle-points
from IPython.display import HTML,display
# <iframe src="https://es.wikipedia.org/wiki/Ley_del_enfriamiento_de_Newton?useformat=mobile" width=700 height=350></iframe>
#
# <html>
# <head>
# <title>Page Title</title>
# </head>
# <body>
#
# <h1>This is a Heading</h1>
# <p>This is a paragraph.</p>
# <iframe src="https://es.wikipedia.org/wiki/Ley_del_enfriamiento_de_Newton?useformat=mobile" width=700 height=350></iframe>
# </body>
# </html>
#
#
# <html>
#
# <head>
# <title>HTML Iframes</title>
# </head>
#
# <body>
# <p>Document content goes here...</p>
#
# <iframe src="https://es.wikipedia.org/wiki/Ley_del_enfriamiento_de_Newton?useformat=mobile" width = "555" height = "200">
# Sorry your browser does not support inline frames.
# </iframe>
#
# <p>Document content also go here...</p>
# </body>
#
# </html>
# +
#fuente = 'https://es.khanacademy.org/math/multivariable-calculus/applications-of-multivariable-derivatives/optimizing-multivariable-functions/a/maximums-minimums-and-saddle-points?useformat=mobile'
fuente='<iframe src=https://es.khanacademy.org/math/multivariable-calculus/applications-of-multivariable-derivatives/optimizing-multivariable-functions/a/maximums-minimums-and-saddle-points width=700 height=350></iframe>'
Ley_Newton_enfriamiento='<iframe src=https://es.wikipedia.org/wiki/Ley_del_enfriamiento_de_Newton?useformat=mobile width=700 height=350></iframe>'
HTML(Ley_Newton_enfriamiento)
# +
#'<iframe src=https://es.khanacademy.org/math/multivariable-calculus/applications-of-multivariable-derivatives/optimizing-multivariable-functions/a/maximums-minimums-and-saddle-points?useformat=mobile width=700 height=350></iframe>'
HTML('<iframe src=https://es.khanacademy.org/math/multivariable-calculus/applications-of-multivariable-derivatives/optimizing-multivariable-functions/a/maximums-minimums-and-saddle-points?useformat=mobile width=700 height=350></iframe>')
# -
from IPython.display import IFrame
IFrame('http://stackoverflow.org', width=700, height=350)
IFrame('https://google.org', width=700, height=350)
| MarkowitzPortafolioOptimization/Max_Min_PuntoSilla.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import pickle
# %matplotlib inline
# -
# Hides the pink warnings
import warnings
warnings.filterwarnings('ignore')
def data_formatter(path_pkl, column, aug_type):
#with open(path, 'rb') as f:
# df = pd.read_pickle(f)
pickle_file = open(path_pkl, "rb")
df = pd.DataFrame(pickle.load(pickle_file))
pickle_file.close()
if column == 'shifts':
df['shifts'] = list(range(1,23))
elif column == 'pixels_lost':
if aug_type == 'Linear':
df['pixels_lost'] = pd.Series(list(range(1,23))) * 28
elif aug_type == 'Diagonal' or aug_type == 'Combined':
df['pixels_lost'] = pd.Series(list(range(1, 23))) * 28 * 2
elif column == 'percentage_lost':
if aug_type == 'Linear':
pixels_lost = pd.Series(list(range(1,23))) * 28
elif aug_type == 'Diagonal' or aug_type == 'Combined':
pixels_lost = pd.Series(list(range(1,23))) * 28 * 2
df['percentage_lost'] = round((pixels_lost/784), 3)
df = pd.melt(df, id_vars = [column],
value_vars = list('12345'),
value_name = 'accuracy')
df.drop('variable', axis = 1, inplace = True)
df['aug_type'] = [aug_type] * len(df)
return df
def shifts_visualizer_function(linear_file, diagonal_file, combined_file, column):
linear_data = data_formatter(linear_file, column, 'Linear')
linear_data_mean = linear_data.groupby(['shifts']).mean().reset_index()
linear_data_var = linear_data.groupby(['shifts']).var().reset_index()
diagonal_data = data_formatter(diagonal_file, column, 'Diagonal')
diagonal_data_mean = diagonal_data.groupby(['shifts']).mean().reset_index()
diagonal_data_var = diagonal_data.groupby(['shifts']).var().reset_index()
combined_data = data_formatter(combined_file, column, 'Combined')
combined_data_mean = combined_data.groupby(['shifts']).mean().reset_index()
combined_data_var = combined_data.groupby(['shifts']).var().reset_index()
fig = plt.figure(figsize=(20,10))
fig_1 = fig.add_subplot(211)
fig_1.plot(linear_data_mean['shifts'], linear_data_mean['accuracy'], label = 'Linear Augmentation')
fig_1.plot(diagonal_data_mean['shifts'], diagonal_data_mean['accuracy'], label = 'Diagonal Augmentation')
fig_1.plot(combined_data_mean['shifts'], combined_data_mean['accuracy'], label = 'Combined Augmentation')
plt.title("Mean Accuracy per Shift")
plt.legend()
fig_2 = fig.add_subplot(212)
fig_2.plot(linear_data_var['shifts'], linear_data_var['accuracy'], label = 'Linear Augmentation')
fig_2.plot(diagonal_data_var['shifts'], diagonal_data_var['accuracy'], label = 'Diagonal Augmentation')
fig_2.plot(combined_data_var['shifts'], combined_data_var['accuracy'], label = 'Combined Augmentation')
plt.title("Mean Variance Accuracy per Shift")
plt.legend()
plt.savefig('results_single/single_visualization.png')
plt.show()
# ## Le-Net 5 Visualizations
linear_pkl = 'results_single/linear_non_augmented_test/performance_linear_non_augmented_test.pkl'
diagonal_pkl = 'results_single/diagonal_non_augmented_test/performance_diagonal_non_augmented_test.pkl'
combined_pkl = 'results_single/combined_non_augmented_test/performance_combined_non_augmented_test.pkl'
# ### Shifts
shifts_visualizer_function(linear_pkl, diagonal_pkl, combined_pkl, 'shifts')
# ### Maximum image pixel loss
# ### Maximum % of pixels losed
# ## Support Vector Machine
svm_linear_pkl = 'results/svm_linear_non_augmented_test/svm_performance_linear_non_augmented_test.pkl'
svm_diagonal_pkl = 'results/svm_diagonal_non_augmented_test/svm_performance_diagonal_non_augmented_test.pkl'
svm_combined_pkl = 'results/svm_combined_non_augmented_test/svm_performance_combined_non_augmented_test.pkl'
# ### Shifts
shifts_visualizer_function(svm_linear_pkl, svm_diagonal_pkl, svm_combined_pkl, 'shifts')
# ### Maximum image pixel loss
# ### Maximum % of pixels losed
| Jupyter Notebooks/Visuals-Copy1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 线性回归的概念
# ### 1、线性回归的定义
#
#
#
# 进入一家房产网,可以看到房价、面积、厅室呈现以下数据:
#
# <table>
# <tr>
# <th>面积($x_1$)</th>
# <th>厅室数量($x_2)$</th>
# <th>价格(万元)(y)</th>
# </tr>
# <tr>
# <th>64</th>
# <th>3</th>
# <th>225</th>
# </tr>
# <tr>
# <th>59</th>
# <th>3</th>
# <th>185</th>
# </tr>
# <tr>
# <th>65</th>
# <th>3</th>
# <th>208</th>
# </tr>
# <tr>
# <th>116</th>
# <th>4</th>
# <th>508</th>
# </tr>
# <tr>
# <th>……</th>
# <th>……</th>
# <th>……</th>
# </tr>
# 我们可以将价格和面积、厅室数量的关系习得为$f(x)=\theta_0+\theta_1x_1+\theta_2x_2$,使得$f(x)\approx y$,这就是一个直观的线性回归的样式。
# <td bgcolor=#87CEEB>
# <font size =2>
# 小练习:这是国内一个房产网站上任意搜的数据,有兴趣可以找个网站观察一下,还可以获得哪些可能影响到房价的因素?可能会如何影响到实际房价呢?</font>
# </td>
# ### 2、线性回归的一般形式:
# 有数据集$\{(x_1,y_1),(x_2,y_2),...,(x_m,y_m)\}$,其中,$x_i = (x_{i1};x_{i2};x_{i3};...;x_{id}),y_i\in R$<br>
# 可以用以下函数来描述y和x之间的关系:
# \begin{align*}
# f(x)
# &= \theta_0 + \theta_1x_1 + \theta_2x_2 + ... + \theta_dx_d \\
# &= \sum_{i=0}^{d}\theta_ix_i \\
# \end{align*}
# 如何来确定$\theta$的值,使得$f(x)$尽可能接近y的值呢?均方误差是回归中常用的性能度量,即:
# $$J(\theta)=\frac{1}{2}\sum_{j=1}^{m}(h_{\theta}(x^{(i)})-y^{(i)})^2$$<br>
# 我们可以选择$\theta$,试图让均方误差最小化:
# ### 3、有用的概念
# * 损失函数(Loss Function):度量单样本预测的错误程度,损失函数值越小,模型就越好。
# * 代价函数(Cost Function):度量全部样本集的平均误差。
# * 目标函数(Object Function):代价函数和正则化函数,最终要优化的函数。
# 常用的损失函数包括:0-1损失函数、平方损失函数、绝对损失函数、对数损失函数等;常用的代价函数包括均方误差、均方根误差、平均绝对误差等。
# <td bgcolor=#87CEEB>思考题:既然代价函数已经可以度量样本集的平均误差,为什么还要设定目标函数?</td>
# #### 回答:
# 当模型复杂度增加时,有可能对训练集可以模拟的很好,但是预测测试集的效果不好,出现过拟合现象,这就出现了所谓的“结构化风险”。结构风险最小化即为了防止过拟合而提出来的策略,定义模型复杂度为$J(F)$,目标函数可表示为:
# $$\underset{f\in F}{min}\, \frac{1}{N}\sum^{N}_{i=1}L(y_i,f(x_i))+\lambda J(F)$$
# 
# 例如有以上6个房价和面积关系的数据点,可以看到,当设定$f(x)=\sum_{j=0}^{5}\theta_jx_j$时,可以完美拟合训练集数据,但是,真实情况下房价和面积不可能是这样的关系,出现了过拟合现象。当训练集本身存在噪声时,拟合曲线对未知影响因素的拟合往往不是最好的。
# 通常,随着模型复杂度的增加,训练误差会减少;但测试误差会先增加后减小。我们的最终目的时试测试误差达到最小,这就是我们为什么需要选取适合的目标函数的原因。
# ## 线性回归的优化方法
# ### 1、梯度下降法
# 设定初始参数$\theta$,不断迭代,使得$j(\theta)$最小化:
# $$\theta_j:=\theta_j-\alpha\frac{\partial{J(\theta)}}{\partial\theta}$$
# \begin{align*}
# \frac{\partial{J(\theta)}}{\partial\theta}
# &= \frac{\partial}{\partial\theta_j}\frac{1}{2}\sum_{i=1}^{n}(f_\theta(x)^{(i)}-y^{(i)})^2 \\
# &= 2*\frac{1}{2}\sum_{i=1}^{n}(f_\theta(x)^{(i)}-y^{(i)})*\frac{\partial}{\partial\theta_j}(f_\theta(x)^{(i)}-y^{(i)}) \\
# &= \sum_{i=1}^{n}(f_\theta(x)^{(i)}-y^{(i)})*\frac{\partial}{\partial\theta_j}(\sum_{j=0}^{d}\theta_jx_j^{(i)}-y^{(i)}))\\
# &= \sum_{i=1}^{n}(f_\theta(x)^{(i)}-y^{(i)})x_j^{(i)} \\
# \end{align*}
# 即:
# $$
# \theta_j = \theta_j + \alpha\sum_{i=1}^{n}(y^{(i)}-f_\theta(x)^{(i)})x_j^{(i)}
# $$
# 注:下标j表示第j个参数,上标i表示第i个数据点。
# 将所有的参数以向量形式表示,可得:
# $$
# \theta = \theta + \alpha\sum_{i=1}^{n}(y^{(i)}-f_\theta(x)^{(i)})x^{(i)}
# $$
# 由于这个方法中,参数在每一个数据点上同时进行了移动,因此称为批梯度下降法,对应的,我们可以每一次让参数只针对一个数据点进行移动,即:
# $$
# \theta = \theta + \alpha(y^{(i)}-f_\theta(x)^{(i)})x^{(i)}
# $$
# 这个算法成为随机梯度下降法,随机梯度下降法的好处是,当数据点很多时,运行效率更高;缺点是,因为每次只针对一个样本更新参数,未必找到最快路径达到最优值,甚至有时候会出现参数在最小值附近徘徊而不是立即收敛。但当数据量很大的时候,随机梯度下降法经常优于批梯度下降法。
# 
# 当J为凸函数时,梯度下降法相当于让参数$\theta$不断向J的最小值位置移动
# 梯度下降法的缺陷:如果函数为非凸函数,有可能找到的并非全局组最优值,而是局部最优值。
#
# ### 2、最小二乘法的矩阵求解
# 令<br>
# $$ X = \left[ \begin{array} {cccc}
# (x^{(1)})^T\\
# (x^{(2)})^T\\
# \ldots \\
# (x^{(n)})^T
# \end{array} \right] $$
# 其中,
# $$x^{(i)} = \left[ \begin{array} {cccc}
# x_1^{(i)}\\
# x_2^{(i)}\\
# \ldots \\
# x_d^{(i)}
# \end{array} \right]$$
# 由于
# $$Y = \left[ \begin{array} {cccc}
# y^{(1)}\\
# y^{(2)}\\
# \ldots \\
# y^{(n)}
# \end{array} \right]$$
# $h_\theta(x)$可以写作
# $$h_\theta(x)=X\theta$$
# 对于向量来说,有
# $$z^Tz = \sum_i z_i^2$$
# 因此可以把损失函数写作
# $$J(\theta)=\frac{1}{2}(X\theta-Y)^T(X\theta-Y)$$
# 为最小化$J(\theta)$,对$\theta$求导可得:
# \begin{align*}
# \frac{\partial{J(\theta)}}{\partial\theta}
# &= \frac{\partial}{\partial\theta} \frac{1}{2}(X\theta-Y)^T(X\theta-Y) \\
# &= \frac{1}{2}\frac{\partial}{\partial\theta} (\theta^TX^TX\theta - Y^TX\theta-\theta^T X^TY - Y^TY) \\
# \end{align*}
# 中间两项互为转置,由于求得的值是个标量,矩阵与转置相同,因此可以写成
# \begin{align*}
# \frac{\partial{J(\theta)}}{\partial\theta}
# &= \frac{1}{2}\frac{\partial}{\partial\theta} (\theta^TX^TX\theta - 2\theta^T X^TY - Y^TY) \\
# \end{align*}
# 令偏导数等于零,由于最后一项和$\theta$无关,偏导数为0。
# 因此,
# $$\frac{\partial{J(\theta)}}{\partial\theta} = \frac{1}{2}\frac{\partial}{\partial\theta} \theta^TX^TX\theta - \frac{\partial}{\partial\theta} \theta^T X^TY
# $$
# 利用矩阵求导性质,<br>
#
#
#
# $$
# \frac{\partial \vec x^T\alpha}{\partial \vec x} =\alpha
# $$
# $$
# 和
# $$
# $$\frac{\partial A^TB}{\partial \vec x} = \frac{\partial A^T}{\partial \vec x}B + \frac{\partial B^T}{\partial \vec x}A$$
#
#
# \begin{align*}
# \frac{\partial}{\partial\theta} \theta^TX^TX\theta
# &= \frac{\partial}{\partial\theta}{(X\theta)^TX\theta}\\
# &= \frac{\partial (X\theta)^T}{\partial\theta}X\theta + \frac{\partial (X\theta)^T}{\partial\theta}X\theta \\
# &= 2X^TX\theta
# \end{align*}
# $$\frac{\partial{J(\theta)}}{\partial\theta} = X^TX\theta - X^TY
# $$
# 令导数等于零,
# $$X^TX\theta = X^TY$$
# $$\theta = (X^TX)^{(-1)}X^TY
# $$
# 注:CS229视频中吴恩达的推导利用了矩阵迹的性质,可自行参考学习。
# ### 3、牛顿法
# 
# 通过图例可知(参考吴恩达CS229),
# $$f(\theta)' = \frac{f(\theta)}{\Delta},\Delta = \theta_0 - \theta_1$$
# $$可求得,\theta_1 = \theta_0 - \frac {f(\theta_0)}{f(\theta_0)'}$$
# 重复迭代,可以让逼近取到$f(\theta)$的最小值
# 当我们对损失函数$l(\theta)$进行优化的时候,实际上是想要取到$l'(\theta)$的最小值,因此迭代公式为:
# $$
# \theta :=\theta-\frac{l'(\theta)}{l''(\theta)}
# $$
# $$
# 当\theta是向量值的时候,\theta :=\theta - H^{-1}\Delta_{\theta}l(\theta)
# $$
# 其中,$\Delta_{\theta}l(\theta)$是$l(\theta)$对$\theta_i$的偏导数,$H$是$J(\theta)$的海森矩阵,<br>
# $$H_{ij} = \frac{\partial ^2l(\theta)}{\partial\theta_i\partial\theta_j}$$
# 问题:请用泰勒展开法推导牛顿法公式。
# Answer:将$f(x)$用泰勒公式展开到第二阶,
# $f(x) = f(x_0) + f'(x_0)(x - x_0)+\frac{1}{2}f''(x_0)(x - x_0)^2$
# 对上式求导,并令导数等于0,求得x值
# $$f'(x) = f'(x_0) + f''(x_0)x -f''(x_0)x_0 = 0$$
# 可以求得,
# $x = x_0 - \frac{f'(x_0)}{f''(x_0)}$
# ### 4、拟牛顿法
# 拟牛顿法的思路是用一个矩阵替代计算复杂的海森矩阵H,因此要找到符合H性质的矩阵。
# 要求得海森矩阵符合的条件,同样对泰勒公式求导$f'(x) = f'(x_0) + f''(x_0)x -f''(x_0)x_0$
# 令$x = x_1$,即迭代后的值,代入可得:
# $$f'(x_1) = f'(x_0) + f''(x_0)x_1 - f''(x_0)x_0$$
# 更一般的,
# $$f'(x_{k+1}) = f'(x_k) + f''(x_k)x_{k+1} - f''(x_k)x_k$$
# $$f'(x_{k+1}) - f'(x_k) = f''(x_k)(x_{k+1}- x_k)= H(x_{k+1}- x_k)$$
# $x_k$为第k个迭代值
# 即找到矩阵G,使得它符合上式。
# 常用的拟牛顿法的算法包括DFP,BFGS等,作为选学内容,有兴趣者可自行查询材料学习。
# ## 线性回归的评价指标
# 均方误差(MSE):$\frac{1}{m}\sum^{m}_{i=1}(y^{(i)} - \hat y^{(i)})^2$
# 均方根误差(RMSE):$\sqrt{MSE} = \sqrt{\frac{1}{m}\sum^{m}_{i=1}(y^{(i)} - \hat y^{(i)})^2}$
# 平均绝对误差(MAE):$\frac{1}{m}\sum^{m}_{i=1} | (y^{(i)} - \hat y^{(i)} | $
# 但以上评价指标都无法消除量纲不一致而导致的误差值差别大的问题,最常用的指标是$R^2$,可以避免量纲不一致问题
# $$R^2: = 1-\frac{\sum^{m}_{i=1}(y^{(i)} - \hat y^{(i)})^2}{\sum^{m}_{i=1}(\bar y - \hat y^{(i)})^2} =1-\frac{\frac{1}{m}\sum^{m}_{i=1}(y^{(i)} - \hat y^{(i)})^2}{\frac{1}{m}\sum^{m}_{i=1}(\bar y - \hat y^{(i)})^2} = 1-\frac{MSE}{VAR}$$
# 我们可以把$R^2$理解为,回归模型可以成功解释的数据方差部分在数据固有方差中所占的比例,$R^2$越接近1,表示可解释力度越大,模型拟合的效果越好。
# ## sklearn.linear_model参数详解:
# fit_intercept : 默认为True,是否计算该模型的截距。如果使用中心化的数据,可以考虑设置为False,不考虑截距。注意这里是考虑,一般还是要考虑截距
#
# normalize: 默认为false. 当fit_intercept设置为false的时候,这个参数会被自动忽略。如果为True,回归器会标准化输入参数:减去平均值,并且除以相应的二范数。当然啦,在这里还是建议将标准化的工作放在训练模型之前。通过设置sklearn.preprocessing.StandardScaler来实现,而在此处设置为false
#
# copy_X : 默认为True, 否则X会被改写
#
# n_jobs: int 默认为1. 当-1时默认使用全部CPUs ??(这个参数有待尝试)
#
# 可用属性:
#
# coef_:训练后的输入端模型系数,如果label有两个,即y值有两列。那么是一个2D的array
#
# intercept_: 截距
#
# 可用的methods:
#
# fit(X,y,sample_weight=None):
# X: array, 稀疏矩阵 [n_samples,n_features]
# y: array [n_samples, n_targets]
# sample_weight: 权重 array [n_samples]
# 在版本0.17后添加了sample_weight
#
# get_params(deep=True): 返回对regressor 的设置值
#
# predict(X): 预测 基于 R^2值
#
# score: 评估
#
# 参考https://blog.csdn.net/weixin_39175124/article/details/79465558
# <table align ="left";background-color="#87CEEB">
# <tr>
# <td bgcolor="#87CEEB"><font size=2>练习题:请用以下数据(可自行生成尝试,或用其他已有数据集)</font></td>
# </tr>
# <tr>
# <td bgcolor="#87CEEB"><font size=2>1、首先尝试调用sklearn的线性回归函数进行训练;</font></td>
# </tr>
# <tr>
# <td bgcolor="#87CEEB"><font size=2>2、用最小二乘法的矩阵求解法训练数据;</font></td>
# </tr>
# <tr>
# <td bgcolor="#87CEEB"><font size=2>3、用梯度下降法训练数据;</font></td>
# </tr>
# <tr>
# <td bgcolor="#87CEEB"><font size=2>4、比较各方法得出的结果是否一致。</font></td>
# </tr>
# </table>
# 生成数据
#生成数据
import numpy as np
#生成随机数
np.random.seed(1234)
# x1 = np.ones((100,1))
# x2 = np.random.randn(100,1)
# x3 = np.random.randn(100,1)
x = np.random.rand(500,3)
#构建映射关系,模拟真实的数据待预测值,映射关系为y = 4.2 + 5.7*x1 + 10.8*x2
#x = np.hstack((x1,x2,x3))
y = x.dot(np.array([4.2,5.7,10.8]))
# 1、先尝试调用sklearn的线性回归模型训练数据
# +
import numpy as np
from sklearn.linear_model import LinearRegression
import matplotlib.pyplot as plt
# %matplotlib inline
# 调用模型
lr = LinearRegression(fit_intercept=True)
# 训练模型
lr.fit(x,y)
# 计算R平方
print(lr.score(x,y))
# 计算y_hat
x_test = np.array([2,4,5]).reshape(1,-1)
y_hat = lr.predict(x_test)
print(y_hat)
print(lr.coef_)
# 打印出图
#plt.scatter(x,y)
#plt.plot(x, y_hat)
# -
# 2、最小二乘法的矩阵求解
# +
class LR_ls():
def __init__(self):
self.w = None
def fit(self, X, y):
# 最小二乘法矩阵求解
self.w = np.linalg.inv(X.T.dot(X)).dot(X.T).dot(y)
def predict(self, X):
# 用已经拟合的参数值预测新自变量
y_pred = X.dot(self.w)
return y_pred
if __name__ == "__main__":
lr_ls = LR_ls()
lr_ls.fit(x,y)
print(lr_ls.w)
x_test = np.array([2,4,5]).reshape(1,-1)
print(lr_ls.predict(x_test))
# -
# 3、梯度下降法
# +
class LR_GD():
def __init__(self):
self.w = None
def fit(self,X,y,alpha=0.002,loss = 1e-10): # 设定步长为0.002,最大的迭代次数为10000次,判断是否收敛的条件为1e-10
y = y.reshape(-1,1) #重塑y值的维度以便矩阵运算
[m,d] = np.shape(X) #自变量的维度
self.w = np.zeros((d)) #将参数的初始值定为0
tol = 1e5
while tol > loss:
h_f = X.dot(self.w).reshape(-1,1)
theta = self.w + alpha*np.sum(X*(y - h_f),axis=0) #计算迭代的参数值
tol = np.sum(np.abs(theta - self.w))
self.w = theta
self.w = theta
def predict(self, X):
# 用已经拟合的参数值预测新自变量
y_pred = X.dot(self.w)
return y_pred
if __name__ == "__main__":
lr_gd = LR_GD()
lr_gd.fit(x,y)
print(lr_gd.w)
x_test = np.array([2,4,5]).reshape(1,-1)
print(lr_gd.predict(x_test))
| origin_data/Task1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from pyspark.sql import SparkSession
from pyspark.ml.classification import GBTClassifier
from pyspark.ml.linalg import Vectors
from pyspark.ml.feature import VectorAssembler
from pyspark.ml.stat import Correlation
from pyspark.ml import Pipeline
# +
spark = SparkSession.builder.appName("SParkCrossVal").getOrCreate()
df = spark.read.format("csv")\
.load("../Data/train.csv", header=True, inferSchema=True)
# +
#Features Vector generated
assembler = VectorAssembler(inputCols=df.columns[2:], outputCol="features")
output = assembler.transform(df)
# -
trainDF = output.selectExpr('target as label', 'features')
trainDF.show(5)
# +
#from pyspark.ml.classification import DecisionTreeClassifier
#decisionTree = DecisionTreeClassifier(labelCol = "label")
# Train a GBT model.
gbt = GBTClassifier(labelCol="label", featuresCol="features", maxIter=10)
pipeline = Pipeline(stages = [gbt])
# -
from pyspark.ml.tuning import CrossValidator, ParamGridBuilder
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
paramGrid = ParamGridBuilder()\
.addGrid(decisionTree.maxDepth, [1,2,4,5,6,7,8])\
.build()
# +
evaluator = MulticlassClassificationEvaluator(labelCol = "label", predictionCol = "prediction",
metricName = "accuracy")
crossVal = CrossValidator(estimator = pipeline,
estimatorParamMaps = paramGrid,
evaluator = evaluator,
numFolds = 10)
# -
cvModel = crossVal.fit(trainDF)
cvModel.avgMetrics
print (cvModel.bestModel.stages[0])
print(gbt)
| develop/2018-03-15-MN-SparkGBTCrossVal.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Example B.3 Parent Breadth-first Search
#
# Examples come from http://people.eecs.berkeley.edu/~aydin/GraphBLAS_API_C_v13.pdf
import numpy as np
import pandas as pd
import networkx as nx
import matplotlib.pyplot as plt
import grblas as gb
from grblas import lib, ffi, Matrix, Vector, Scalar
from grblas.base import NULL
from grblas import dtypes
from grblas import descriptor
from grblas import unary, binary, monoid, semiring
from grblas import io as gio
# Create initial data objects
edges = [
[3, 0, 3, 5, 6, 0, 6, 1, 6, 2, 4, 1],
[0, 1, 2, 2, 2, 3, 3, 4, 4, 5, 5, 6],
]
A = Matrix.from_values(edges[0], edges[1], [True for _ in edges[0]])
s = 1
gio.draw(A)
# ### parent breadth-first search (BFS) in GraphBLAS
# ```
# 1 #include <stdlib.h>
# 2 #include <stdio.h>
# 3 #include <stdint.h>
# 4 #include <stdbool.h>
# 5 #include ”GraphBLAS.h”
# 6
# 7 /∗
# 8 ∗ Given a binary n x n adjacency matrix A and a source vertex s, performs a BFS
# 9 ∗ traversal of the graph and sets parents[i] to the index i's parent.
# 10 ∗ The parent of the root vertex, s, will be set to itself (parents[s] == s). If
# 11 ∗ vertex i is not reachable from s, parents[i] will not contain a stored value.
# 12 ∗/
# 13 GrB_Info BFS(GrB_Vector *v, const GrB_Matrix A, GrB_Index s)
# 14 {
# 15 GrB_Index N;
# 16 GrB_Matrix nrows(&N, A); // N = # of rows of A
# 17
# 18 // create index ramp for index_of() functionality
# 19 GrB_Index *idx = (GrB_Index *) malloc (N*sizeof(GrB_Index));
# 20 for (GrB_Index i = 0; i < N; ++i) idx[i] = i;
# 21 GrB_Vector index_ramp;
# 22 GrB_Vector_new(&index_ramp, GrB_UINT64, N);
# 23 GrB_Vector_build_UINT64(index_ramp, idx, idx, N, GrB_PLUS_INT64);
# 24 free(idx);
# 25
# 26 GrB_Vector_new(parents, GrB_UINT64, N);
# 27 GrB_Vector_setElement(*parents, s, s); // parents[s] = s
# 28
# 29 GrB_Vector wavefront;
# 30 GrB_Vector_new(&wavefront, GrB_UINT64, N);
# 31 GrB_Vector_setElement(wavefront, 1UL, s); // wavefront[s] = 1
# 18 GrB_Vector_new(v, GrB_INT32, N) ; // Vector<int32_t> v(n) = 0
# 19
# 33 /∗
# 34 ∗ BFS traversal and label the vertices.
# 35 ∗/
# 36 GrB_Index nvals;
# 37 GrB_Vector_nvals(&nvals, wavefront);
# 38
# 39 while (nvals > 0)
# 40 {
# 41 // convert all stored values in wavefront to their 0−based index
# 42 GrB_eWiseMult(wavefront, GrB_NULL, GrB_NULL, GrB_FIRST_UINT64,
# 43 index_ramp, wavefront, GrB_NULL);
# 44
# 45 // ”FIRST” because left−multiplying wavefront rows. Masking out the parent
# 46 // list ensures wavefront values do not overwrite parents already stored.
# 47 GrB_vxm(wavefront, * parents, GrB_NULL, GrB_MIN_FIRST_SEMIRING_UINT64,
# 48 wavefront, A, GrB_DESC_RSC);
# 49
# 50 // Don’t need to mask here since we did it in mxm. Merges new parents in
# 51 // current wave front with existing parents : parents += wavefront
# 52 GrB_apply(* parents, GrB_NULL, GrB_PLUS_UINT64,
# 53 GrB_IDENTITY_UINT64, wavefront, GrB_NULL);
# 54
# 55 GrB_Vector_nvals(&nvals, wavefront);
# 56 }
# 57
# 58 GrB_free(&wavefront);
# 59 GrB_free(&index_ramp);
# 60
# 61 return GrB_SUCCESS;
# 62 }
# ```
# ## Python implementation
N = A.nrows
index_ramp = Vector.new(dtypes.UINT64, N)
index_ramp.build(range(N), range(N))
parents = Vector.new(dtypes.UINT64, N)
parents[s] << s
wavefront = Vector.new(dtypes.UINT64, N)
wavefront[s] << 1
while wavefront.nvals > 0:
# convert all stored values in wavefront to their 0−based index
wavefront << index_ramp.ewise_mult(wavefront, binary.first)
# ”FIRST” because left−multiplying wavefront rows. Masking out the parent
# list ensures wavefront values do not overwrite parents already stored.
wavefront(~parents.S, replace=True) << wavefront.vxm(A, semiring.min_first)
# Don’t need to mask here since we did it in mxm. Merges new parents in
# current wave front with existing parents : parents += wavefront
parents(binary.plus) << wavefront
parents
# Let's Step thru each loop to watch the action unfold
# Only run this cell once -- it initializes things
parents.clear()
parents[s] << s
wavefront.clear()
wavefront[s] << 1
print("Proceed" if wavefront.nvals > 0 else "Done")
# convert all stored values in wavefront to their 0−based index
wavefront << index_ramp.ewise_mult(wavefront, binary.first)
wavefront
# ”FIRST” because left−multiplying wavefront rows. Masking out the parent
# list ensures wavefront values do not overwrite parents already stored.
wavefront(~parents.S, replace=True) << wavefront.vxm(A, semiring.min_first)
wavefront
# Don’t need to mask here since we did it in mxm. Merges new parents in
# current wave front with existing parents : parents += wavefront
parents(binary.plus) << wavefront
parents
#
| notebooks/Example B.3 -- Parent BFS.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="bOChJSNXtC9g"
# # Welcome to IPython
#
# <img src="figures/jupyter.png" width=500>
#
#
# # Notebook Basics
# + [markdown] colab_type="text" id="rSXwaU-ptNG6"
# Welcome to the very first lesson. In this lesson we will learn how to work with the notebook and saving it. If you already know how to use notebooks, feel free to skip this lesson.
# + [markdown] colab_type="text" id="cOEaLCZAu4JQ"
# # Types of cells
# + [markdown] colab_type="text" id="WcOgqq5xvtMn"
# Notebooks are a great visual way of programming. We will use these notebooks to code in Python and learn the basics of machine learning. First, you need to know that notebooks are made up of cells. Each cell can either be a **code cell** or a **text cell**.
#
# * **text cells**: used for headers and paragraph text.
# * **code cells**: used for holding code.
#
#
#
# + [markdown] colab_type="text" id="tBVFofpLutnn"
# # Creating cells
#
# First, let's create a text cell. To create a cell at a particular location, just click on the spot and create a text cell by clicking on the **➕TEXT** below the *View* button up top. Once you made the cell, click on it and type the following inside it:
#
#
# ```
# ### This is a header
# Hello world!
# ```
# + [markdown] colab_type="text" id="iXYgZpgpYS3N"
# # Running cells
# Once you type inside the cell, press the **SHIFT** and **ENTER** together to run the cell.
# + [markdown] colab_type="text" id="WKTbiBuvYexD"
# # Editing cells
# To edit a cell, double click it and you should be able to replace what you've typed in there.
# + [markdown] colab_type="text" id="Jv0ZSuhNYVIU"
# # Moving cells
# Once you create the cell, you can move it with the ⬆️**CELL** and ⬇️**CELL** buttons above.
# + [markdown] colab_type="text" id="B_VGiYf8YXiU"
# # Deleting cells
# You can delete the cell by clicking on the cell and pressing the button with three vertical dots on the top right corner of the cell. Click **Delete cell**.
# + [markdown] colab_type="text" id="hxl7Fk8LVQmR"
# # Creating a code cell
# Now let's take the same steps as above to create, edit and delete a code cell. You can create a code cell by clicking on the ➕CODE below the *File* menu at the top. Once you have created the cell, click on it and type the following inside it:
#
# ```
# print ("hello world!")
# ```
#
# ⏰ - It may take a few seconds when you run your first code cell.
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="DfGf9KmQ3DJM" outputId="dd9665df-ac81-4c0d-ef72-5ca2099e53f7"
print ("hello Dr. Pike!")
# + [markdown] colab_type="text" id="GURvB6XzWN12"
# **Note:** These Google colab notebooks timeout if you are idle for more than ~30 minutes which means you'll need to run all your code cells again.
# + [markdown] colab_type="text" id="VoMq0eFRvugb"
# # Saving the notebook
# + [markdown] colab_type="text" id="nPWxXt5Hv7Ga"
# Go to *File* menu and then click on **Save a copy in Drive**. Now you will have your own copy of each notebook in your own Google Drive. If you have a [Github](https://github.com/), you can explore saving it there or even downloading it as a .ipynb or .py file.
| notebooks/Python-in-2-days/D1_L2_IPython/Homework 1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "d8cf8eda-8bbf-44e4-bbdd-702efd62aea7"}
# 
# + [markdown] application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "4ae73e24-0be3-46bc-9ed3-d8f35308952d"}
# # Training Entity Coding Models (SNOMED example)
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "3cb7a973-e5c3-4e65-9771-7685520b18e9"}
import os
import json
import string
import numpy as np
import pandas as pd
import sparknlp
import sparknlp_jsl
from sparknlp.base import *
from sparknlp.util import *
from sparknlp.annotator import *
from sparknlp_jsl.annotator import *
from sparknlp.pretrained import ResourceDownloader
from pyspark.sql import functions as F
from pyspark.ml import Pipeline, PipelineModel
from pyspark.sql.types import StructType, StructField, StringType
pd.set_option('max_colwidth', 100)
pd.set_option('display.max_columns', 100)
pd.set_option('display.expand_frame_repr', False)
print('sparknlp_jsl.version : ',sparknlp_jsl.version())
spark
# + [markdown] application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "c7a5533a-d389-4d5a-b7ef-ba0870ab5a4f"}
# ## Load datasets
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "113ec8cc-2625-472b-95e4-cf97d3c94ce4"}
# !wget -q https://raw.githubusercontent.com/JohnSnowLabs/spark-nlp-workshop/master/tutorials/Certification_Trainings/Healthcare/data/AskAPatient.fold-0.test.txt
# !wget -q https://raw.githubusercontent.com/JohnSnowLabs/spark-nlp-workshop/master/tutorials/Certification_Trainings/Healthcare/data/AskAPatient.fold-0.train.txt
# !wget -q https://raw.githubusercontent.com/JohnSnowLabs/spark-nlp-workshop/master/tutorials/Certification_Trainings/Healthcare/data/AskAPatient.fold-0.validation.txt
dbutils.fs.cp("file:/databricks/driver/AskAPatient.fold-0.test.txt", "dbfs:/")
dbutils.fs.cp("file:/databricks/driver/AskAPatient.fold-0.train.txt", "dbfs:/")
dbutils.fs.cp("file:/databricks/driver/AskAPatient.fold-0.validation.txt", "dbfs:/")
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "863800db-ef00-4cf3-98cb-6bbb49abb6ba"}
import pandas as pd
cols = ["conceptId","_term","term"]
aap_tr = pd.read_csv("file:/databricks/driver/AskAPatient.fold-0.train.txt",sep="\t",encoding="ISO-8859-1",header=None)
aap_tr.columns = cols
aap_tr["conceptId"] = aap_tr.conceptId.apply(str)
aap_ts = pd.read_csv("file:/databricks/driver/AskAPatient.fold-0.test.txt",sep="\t",header=None)
aap_ts.columns = cols
aap_ts["conceptId"] = aap_ts.conceptId.apply(str)
aap_vl = pd.read_csv("file:/databricks/driver/AskAPatient.fold-0.validation.txt",sep="\t",header=None)
aap_vl.columns = cols
aap_vl["conceptId"] = aap_vl.conceptId.apply(str)
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "52f066d0-164f-449c-a4b4-0774746c8b6d"}
aap_vl.head()
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "14d0ed31-d2f8-4097-bc2d-7a2b702fcdee"}
aap_train_sdf = spark.createDataFrame(aap_tr)
aap_test_sdf = spark.createDataFrame(aap_ts)
aap_val_sdf = spark.createDataFrame(aap_vl)
# + [markdown] application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "aca80eb6-e618-4fdd-8672-2ed888a1f3e1"}
# # Chunk Entity Resolver (Glove Embeddings)
# + [markdown] application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "eb7c1a23-2076-47dd-894a-67e61a3e3709"}
# ## Create Training Pipeline
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "9dff9b41-0bc5-4837-bcb0-29195c688edf"}
document = DocumentAssembler()\
.setInputCol("term")\
.setOutputCol("document")
chunk = Doc2Chunk()\
.setInputCols("document")\
.setOutputCol("chunk")\
token = Tokenizer()\
.setInputCols(['document'])\
.setOutputCol('token')
embeddings = WordEmbeddingsModel.pretrained("embeddings_healthcare_100d", "en", "clinical/models")\
.setInputCols(["document", "token"])\
.setOutputCol("embeddings")
chunk_emb = ChunkEmbeddings()\
.setInputCols("chunk", "embeddings")\
.setOutputCol("chunk_embeddings")
snomed_training_pipeline = Pipeline(
stages = [
document,
chunk,
token,
embeddings,
chunk_emb])
snomed_training_model = snomed_training_pipeline.fit(aap_train_sdf)
snomed_data = snomed_training_model.transform(aap_train_sdf).cache()
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "5c9b2a50-c7af-4869-8bd8-0f3047286fc7"}
snomed_extractor = ChunkEntityResolverApproach() \
.setInputCols("token", "chunk_embeddings") \
.setOutputCol("recognized") \
.setNeighbours(1000) \
.setAlternatives(25) \
.setNormalizedCol("_term") \
.setLabelCol("conceptId") \
.setEnableWmd(True).setEnableTfidf(True).setEnableJaccard(True)\
.setEnableSorensenDice(True).setEnableJaroWinkler(True).setEnableLevenshtein(True)\
.setDistanceWeights([1, 2, 2, 1, 1, 1]) \
.setAllDistancesMetadata(True)\
.setPoolingStrategy("MAX") \
.setThreshold(1e32)
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "92e6526c-227b-47ce-ac8c-373dc9681cf5"}
model = snomed_extractor.fit(snomed_data)
# + [markdown] application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "90690f32-eec2-42d1-85cc-f27ce9a7d798"}
# ## Prediction Pipeline
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "f4e22d66-1bae-48b0-8b2d-3c23977a97a3"}
prediction_Model = PipelineModel(stages=[snomed_training_model, model])
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "bfb92804-bea4-4adc-b1ae-76ca0aed7b39"}
aap_train_pred= prediction_Model.transform(aap_train_sdf).cache()
aap_test_pred= prediction_Model.transform(aap_test_sdf).cache()
aap_val_pred= prediction_Model.transform(aap_val_sdf).cache()
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "6dc66b12-a0ef-4b10-b5c8-e32ae4605462"}
aap_test_pred.selectExpr("conceptId","term","_term","recognized[0].result","recognized[0].metadata.resolved_text","recognized[0].metadata.all_k_resolutions").show(truncate=50)
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "20332bf5-8a99-44d9-9579-bdc3b6c573cd"}
aap_test_pred.selectExpr("conceptId","term","_term","recognized[0].result","recognized[0].metadata.resolved_text","recognized[0].metadata.all_k_resolutions").show(truncate=50)
# + [markdown] application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "07039a54-0414-41d6-b3c1-9be4cde963cb"}
# ## Train Using the entire dataset
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "46fc07d7-790f-41f0-8762-27c9a9f1306f"}
all_data = aap_train_sdf.union(aap_test_sdf).union(aap_val_sdf)
snomed_training_model = snomed_training_pipeline.fit(all_data)
snomed_data = snomed_training_model.transform(all_data).cache()
# %time model = snomed_extractor.fit(snomed_data)
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "35e1f6fa-b867-4d9d-b8bb-1efeac0b28ae"}
# ! cd /databricks/driver/ && mkdir models
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "73788687-8b51-421b-a781-5a6689a07036"}
model.write().overwrite().save("dbfs:/databricks/driver/models/chunkresolve_snomed_askapatient_hc_100d")
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "5e8165e3-9ab7-482e-984f-f6b9be9a96be"}
# %sh cd /databricks/driver/models && ls -la
# + [markdown] application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "8000cb1a-d96f-4c57-bad5-03bba4f8fb3d"}
# ## Prediction on random texts
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "a42cc326-950b-4cbf-b7a7-f6da7f7031e8"}
documentAssembler = DocumentAssembler()\
.setInputCol("term")\
.setOutputCol("document")
# Sentence Detector annotator, processes various sentences per line
sentenceDetector = SentenceDetector()\
.setInputCols(["document"])\
.setOutputCol("sentence")\
.setCustomBounds([","])
# Tokenizer splits words in a relevant format for NLP
tokenizer = Tokenizer()\
.setInputCols(["sentence"])\
.setOutputCol("raw_token")\
stopwords = StopWordsCleaner()\
.setInputCols(["raw_token"])\
.setOutputCol("token")
word_embeddings = WordEmbeddingsModel.pretrained("embeddings_healthcare_100d", "en", "clinical/models")\
.setInputCols(["document", "token"])\
.setOutputCol("embeddings")
clinical_ner = MedicalNerModel.pretrained("ner_healthcare", "en", "clinical/models") \
.setInputCols(["sentence", "token", "embeddings"]) \
.setOutputCol("ner")
snomed_ner_converter = NerConverter() \
.setInputCols(["sentence", "token", "ner"]) \
.setOutputCol("greedy_chunk")\
.setWhiteList(['PROBLEM','TEST'])
chunk_embeddings = ChunkEmbeddings()\
.setInputCols('greedy_chunk', 'embeddings')\
.setOutputCol('chunk_embeddings')
snomed_resolver = ChunkEntityResolverModel.load("dbfs:/databricks/driver/models/chunkresolve_snomed_askapatient_hc_100d")\
.setInputCols("token","chunk_embeddings")\
.setOutputCol("snomed_resolution")
pipeline_snomed = Pipeline(
stages = [
documentAssembler,
sentenceDetector,
tokenizer,
stopwords,
word_embeddings,
clinical_ner,
snomed_ner_converter,
chunk_embeddings,
snomed_resolver
])
empty_data = spark.createDataFrame([['']]).toDF("term")
model_snomed = pipeline_snomed.fit(empty_data)
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "f0d59fe6-f2c2-45c4-8d39-d1c8ba8f56bc"}
model_snomed_lp = LightPipeline(model_snomed)
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "7c70bb85-c8a2-42af-bcdb-57e7314645e3"}
result = model_snomed_lp.annotate('I have a biceps muscle pain and extreme muscle pain in shoulders')
list(zip(result['greedy_chunk'],result['snomed_resolution']))
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "12cb0e51-f677-4d82-9c56-34bbf3242a74"}
result = model_snomed_lp.annotate('I have a flu and a headache')
list(zip(result['greedy_chunk'],result['snomed_resolution']))
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "3b902141-81bc-457b-b98a-f56c58ae516d"}
from pyspark.sql import functions as F
snomed_output = model_snomed.transform(spark.createDataFrame([['I have a biceps muscle pain and extreme muscle pain in shoulders']]).toDF("term"))
snomed_output.select(F.explode(F.arrays_zip("greedy_chunk.result","greedy_chunk.metadata","snomed_resolution.result","snomed_resolution.metadata")).alias("snomed_result")) \
.select(F.expr("snomed_result['0']").alias("chunk"),
F.expr("snomed_result['1'].entity").alias("entity"),
F.expr("snomed_result['3'].all_k_resolutions").alias("target_text"),
F.expr("snomed_result['2']").alias("code"),
F.expr("snomed_result['3'].confidence").alias("confidence")).show(truncate = 100)
# + [markdown] application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "20666e78-b6b6-435e-be02-27b5a5842352"}
# # Sentence Entity Resolver (BioBert sentence embeddings) (after v2.7)
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "9fd8ca6a-6386-4ae4-8050-46e8b4975ac9"}
aap_train_sdf.show()
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "24a6dd2e-c457-4beb-bd51-9647a53c531b"}
aap_train_sdf.printSchema()
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "880d9ef5-8e19-432f-a5fe-b17e01d3c4b4"}
documentAssembler = DocumentAssembler()\
.setInputCol("_term")\
.setOutputCol("sentence")
bert_embeddings = BertSentenceEmbeddings.pretrained("sent_biobert_pubmed_base_cased")\
.setInputCols(["sentence"])\
.setOutputCol("bert_embeddings")
snomed_training_pipeline = Pipeline(
stages = [
documentAssembler,
bert_embeddings])
snomed_training_model = snomed_training_pipeline.fit(aap_train_sdf)
snomed_data = snomed_training_model.transform(aap_train_sdf)
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "2499cf31-e566-488f-a3b2-40cbbe3c74ef"}
bertExtractor = SentenceEntityResolverApproach()\
.setNeighbours(25)\
.setThreshold(1000)\
.setInputCols("bert_embeddings")\
.setNormalizedCol("_term")\
.setLabelCol("conceptId")\
.setOutputCol('snomed_code')\
.setDistanceFunction("EUCLIDIAN")\
.setCaseSensitive(False)
# %time snomed_model = bertExtractor.fit(snomed_data)
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "a85ba7dd-c23b-4ac9-a315-42419894705d"}
# save if you will need that later
model.write().overwrite().save("dbfs:/databricks/driver/models/biobertresolve_snomed_askapatient")
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "1a280e7b-8006-4104-a849-01ad70d32d83"}
prediction_Model = PipelineModel(stages=[snomed_training_model, snomed_model])
aap_train_pred= prediction_Model.transform(aap_train_sdf).cache()
aap_test_pred= prediction_Model.transform(aap_test_sdf).cache()
aap_val_pred= prediction_Model.transform(aap_val_sdf).cache()
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "ade7eecd-a78b-441e-969d-bd54f89bd34f"}
aap_test_pred.selectExpr("conceptId","term","_term","snomed_code[0].result","snomed_code[0].metadata.resolved_text","snomed_code[0].metadata.all_k_resolutions").show(truncate=50)
# + application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "676f6d2d-ca2f-479d-a5ab-5a99e31fd011"}
aap_val_pred.selectExpr("conceptId","term","_term","snomed_code[0].result","snomed_code[0].metadata.resolved_text","snomed_code[0].metadata.all_k_resolutions").show(truncate=50)
# + [markdown] application/vnd.databricks.v1+cell={"title": "", "showTitle": false, "inputWidgets": {}, "nuid": "5cb4a608-29da-4c43-956d-ec0f6a761e64"}
# End of Notebook #
| tutorials/Certification_Trainings/Healthcare/databricks_notebooks/8. Training Entity Coding Models (SNOMED example).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
data = pd.read_excel('data/Governance.xlsx', sheetname=0)
melt_data = data.melt(id_vars=['Country Name', 'Country Code', 'Indicator Name', 'Indicator Code'], var_name="Year")
melt_data = melt_data[['Country Name', 'Country Code', 'Indicator Name', 'Indicator Code', 'Year', 'value']]
melt_data.head(5)
copy_melt = melt_data.copy()
copy_melt = pd.pivot_table(copy_melt, values = 'value', index=['Country Name', 'Country Code','Year'], columns = 'Indicator Name').reset_index()
#copy_melt.index = piv_data['Year']
copy_melt.head(5)
# +
#copy_melt.index = copy_melt.Year
# -
copy_melt.head()
control_corruption = ['Year','Country Name','Control of Corruption: Estimate', 'Control of Corruption: Number of Sources',
'Control of Corruption: Percentile Rank',
'Control of Corruption: Percentile Rank, Lower Bound of 90% Confidence Interval',
'Control of Corruption: Percentile Rank, Upper Bound of 90% Confidence Interval',
'Control of Corruption: Standard Error']
control_data = copy_melt[control_corruption]
government_effectivesness = ['Year','Country Name','Government Effectiveness: Estimate', 'Government Effectiveness: Number of Sources',
'Government Effectiveness: Percentile Rank',
'Government Effectiveness: Percentile Rank, Lower Bound of 90% Confidence Interval',
'Government Effectiveness: Percentile Rank, Upper Bound of 90% Confidence Interval',
'Government Effectiveness: Standard Error']
government_data = copy_melt[government_effectivesness]
government_data.head(5)
political_stab = ['Year','Country Name','Political Stability and Absence of Violence/Terrorism: Estimate',
'Political Stability and Absence of Violence/Terrorism: Number of Sources',
'Political Stability and Absence of Violence/Terrorism: Percentile Rank',
'Political Stability and Absence of Violence/Terrorism: Percentile Rank, Lower Bound of 90% Confidence Interval',
'Political Stability and Absence of Violence/Terrorism: Percentile Rank, Upper Bound of 90% Confidence Interval',
'Political Stability and Absence of Violence/Terrorism: Standard Error']
political_data = copy_melt[political_stab]
regulatory_quality = ['Year','Country Name','Regulatory Quality: Estimate', 'Regulatory Quality: Number of Sources',
'Regulatory Quality: Percentile Rank',
'Regulatory Quality: Percentile Rank, Lower Bound of 90% Confidence Interval',
'Regulatory Quality: Percentile Rank, Upper Bound of 90% Confidence Interval',
'Regulatory Quality: Standard Error']
regulatory_data = copy_melt[regulatory_quality]
rule_law = ['Year','Country Name','Rule of Law: Estimate', 'Rule of Law: Number of Sources', 'Rule of Law: Percentile Rank',
'Rule of Law: Percentile Rank, Lower Bound of 90% Confidence Interval',
'Rule of Law: Percentile Rank, Upper Bound of 90% Confidence Interval',
'Rule of Law: Standard Error']
rule_data = copy_melt[rule_law]
voice_and_account = ['Year','Country Name','Voice and Accountability: Estimate', 'Voice and Accountability: Number of Sources',
'Voice and Accountability: Percentile Rank',
'Voice and Accountability: Percentile Rank, Lower Bound of 90% Confidence Interval',
'Voice and Accountability: Percentile Rank, Upper Bound of 90% Confidence Interval',
'Voice and Accountability: Standard Error']
voice_data = copy_melt[voice_and_account]
# ## Saving to CSV
voice_data.to_csv('data/voice_accountability.csv',encoding='utf-8', index=False)
rule_data.to_csv('data/rule_of_law.csv',encoding='utf-8', index=False)
regulatory_data.to_csv('data/regulatory_quality.csv',encoding='utf-8', index=False)
political_data.to_csv('data/political_stability.csv',encoding='utf-8', index=False)
control_data.to_csv('data/control_corruption.csv',encoding='utf-8', index=False)
government_data.to_csv('data/government_effectiveness.csv',encoding='utf-8', index=False)
voice_data.head()
vvv = pd.read_csv('data/voice_accountability.csv')
vvv.head()
voice_data.groupby(lambda x: pd.to_datetime(x))
voice_data.sort_values('Year').head()
SSA = ["Angola", "Gabon", "Nigeria", "Benin", "Gambia, The", "Rwanda", "Guinea-Bissau","Botswana",
"Ghana", "São Tomé and Principe", "Burkina Faso", "Guinea", "Senegal", "Burundi", "Seychelles",
"Cabo Verde", "Kenya", "Sierra Leone", "Cameroon", "Lesotho", "Somalia", "Central African Republic",
"Liberia", "South Africa", "Chad", "Madagascar", "Comoros", "Malawi", "Sudan", "Congo, Dem. Rep.",
"Mali", "Swaziland", "Congo, Rep", "Mauritania", "Tanzania", "Côte d'Ivoire", "Mauritius", "Togo",
"Equatorial Guinea", "Mozambique", "Uganda", "Eritrea" "Namibia", "Zambia", "Ethiopia", "Niger", "Zimbabwe"]
ssa_melt = voice_data[voice_data['Country Name'].isin(SSA)]
ssa_melt['Country Name'].nunique()
ssa_melt.head()
est_voice = ssa_melt[ssa_melt['Country Name'] == 'Somalia'].groupby('Voice and Accountability: Estimate').size().head(10).to_frame(name = 'count').reset_index()
ssa_melt.groupby('Country Name')['Voice and Accountability: Number of Sources'].mean()
ax = ssa_melt.plot(x='Year', y=["Country Name","Voice and Accountability: Number of Sources"])
table = pivot_table(df, values='D', index=['Somalia'], columns=['C'], aggfunc=np.sum)
# +
# pd.pivot_table?
# -
estimate = voice_data.groupby('Country Name')['Voice and Accountability: Estimate'].sum()
np.argsort(estimate)
| Data Exploring/institution data/Government Data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/"} id="uCCdIFdfeR2u" outputId="cb0b177d-70dd-4387-e3f6-0d9c1d42fa5c"
# !ls
# + colab={"base_uri": "https://localhost:8080/"} id="ljMOEjUYfawg" outputId="cea4a4c2-1e1c-4094-89b9-a5b83020910f"
# !ls -l
# + colab={"base_uri": "https://localhost:8080/"} id="tXFvJNZKfgi7" outputId="4b954159-0e0e-4f1e-d1a9-e8b5b6dbacef"
# !pwd
# + colab={"base_uri": "https://localhost:8080/"} id="lAA7Q6Xrfwnn" outputId="ea035d44-ae7c-428a-9c65-f364bf8c388b"
# !ls ./sample_data
# + colab={"base_uri": "https://localhost:8080/"} id="Y_5_p3KvipFe" outputId="3e3aed19-f21f-473b-e7d3-4d05e13c4656"
# !ls -l ./Wholesale_customers_data.csv
# + colab={"base_uri": "https://localhost:8080/", "height": 417} id="AB1Qx1oeiwUn" outputId="69e69d74-3241-4760-c7de-0167752ffee6"
import pandas as pd
df = pd.read_csv('./Wholesale_customers_data.csv')
df
# + colab={"base_uri": "https://localhost:8080/", "height": 142} id="erQCQ257mZFy" outputId="62cf8f81-1378-4e15-c143-1eebe7b813de"
mydict = [{'a': 1, 'b': 2, 'c': 3, 'd': 4},
{'a': 100, 'b': 200, 'c': 300, 'd': 400},
{'a': 1000, 'b': 2000, 'c': 3000, 'd': 4000 }]
df = pd.DataFrame(mydict)
df
# + colab={"base_uri": "https://localhost:8080/"} id="WDgQUOijnDWi" outputId="22284015-c46f-4189-b8dd-e4882a21396e"
df.iloc[0]
# + colab={"base_uri": "https://localhost:8080/", "height": 142} id="GLGVW2GQnMJF" outputId="72df0705-c94d-4cb7-b8ec-61467f4e5d61"
df.iloc[:,1:3]
# + colab={"base_uri": "https://localhost:8080/", "height": 142} id="uxeC5rSnnVro" outputId="e2ad5fb4-63e2-4609-dc4c-defb928019b0"
df = pd.DataFrame([[1, 2], [4, 5], [7, 8]],
index=['cobra', 'viper', 'sidewinder'],
columns=['max_speed', 'shield'])
df
# + colab={"base_uri": "https://localhost:8080/"} id="jcXC5D1fqJDe" outputId="b24dc188-9405-40d4-f3c0-fb6ddb3ac309"
df.loc['cobra','shield']
# + colab={"base_uri": "https://localhost:8080/", "height": 110} id="ZeuFwFHiqPci" outputId="374ca096-6efd-4d68-a5f5-33e4587c662e"
df.loc['cobra':'viper','max_speed':'shield']
# + id="5kFgTKuGrRas"
| iloc_pandas.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="N6ZDpd9XzFeN"
# ##### Copyright 2018 The TensorFlow Hub Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# + cellView="both" id="KUu4vOt5zI9d"
# Copyright 2018 The TensorFlow Hub Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# + [markdown] id="CxmDMK4yupqg"
# # オブジェクト検出
#
# + [markdown] id="MfBg1C5NB3X0"
# <table class="tfo-notebook-buttons" align="left">
# <td><a target="_blank" href="https://www.tensorflow.org/hub/tutorials/object_detection"><img src="https://www.tensorflow.org/images/tf_logo_32px.png"> TensorFlow.orgで表示</a></td>
# <td><a target="_blank" href="https://colab.research.google.com/github/tensorflow/docs-l10n/blob/master/site/ja/hub/tutorials/object_detection.ipynb"><img src="https://www.tensorflow.org/images/colab_logo_32px.png">Run in Google Colab</a></td>
# <td><a target="_blank" href="https://github.com/tensorflow/docs-l10n/blob/master/site/ja/hub/tutorials/object_detection.ipynb"><img src="https://www.tensorflow.org/images/GitHub-Mark-32px.png">GitHub でソースを表示</a></td>
# <td><a href="https://storage.googleapis.com/tensorflow_docs/docs-l10n/site/ja/hub/tutorials/object_detection.ipynb"><img src="https://www.tensorflow.org/images/download_logo_32px.png">ノートブックをダウンロード/a0}</a></td>
# <td><a href="https://tfhub.dev/s?q=google%2Ffaster_rcnn%2Fopenimages_v4%2Finception_resnet_v2%2F1%20OR%20google%2Ffaster_rcnn%2Fopenimages_v4%2Finception_resnet_v2%2F1"><img src="https://www.tensorflow.org/images/hub_logo_32px.png">TF Hub モデルを見る</a></td>
# </table>
# + [markdown] id="Sy553YSVmYiK"
# この Colab では、オブジェクト検出を実行するようにトレーニングされた TF-Hub モジュールの使用を実演します。
# + [markdown] id="v4XGxDrCkeip"
# ## セットアップ
#
# + cellView="both" id="6cPY9Ou4sWs_"
#@title Imports and function definitions
# For running inference on the TF-Hub module.
import tensorflow as tf
import tensorflow_hub as hub
# For downloading the image.
import matplotlib.pyplot as plt
import tempfile
from six.moves.urllib.request import urlopen
from six import BytesIO
# For drawing onto the image.
import numpy as np
from PIL import Image
from PIL import ImageColor
from PIL import ImageDraw
from PIL import ImageFont
from PIL import ImageOps
# For measuring the inference time.
import time
# Print Tensorflow version
print(tf.__version__)
# Check available GPU devices.
print("The following GPU devices are available: %s" % tf.test.gpu_device_name())
# + [markdown] id="ZGkrXGy62409"
# ## 使用例
# + [markdown] id="vlA3CftFpRiW"
# ### 画像のダウンロードと視覚化用のヘルパー関数
#
# 必要最低限の単純な機能性を得るために、[TF オブジェクト検出 API](https://github.com/tensorflow/models/blob/master/research/object_detection/utils/visualization_utils.py) から採用された視覚化コードです。
# + id="D9IwDpOtpIHW"
def display_image(image):
fig = plt.figure(figsize=(20, 15))
plt.grid(False)
plt.imshow(image)
def download_and_resize_image(url, new_width=256, new_height=256,
display=False):
_, filename = tempfile.mkstemp(suffix=".jpg")
response = urlopen(url)
image_data = response.read()
image_data = BytesIO(image_data)
pil_image = Image.open(image_data)
pil_image = ImageOps.fit(pil_image, (new_width, new_height), Image.ANTIALIAS)
pil_image_rgb = pil_image.convert("RGB")
pil_image_rgb.save(filename, format="JPEG", quality=90)
print("Image downloaded to %s." % filename)
if display:
display_image(pil_image)
return filename
def draw_bounding_box_on_image(image,
ymin,
xmin,
ymax,
xmax,
color,
font,
thickness=4,
display_str_list=()):
"""Adds a bounding box to an image."""
draw = ImageDraw.Draw(image)
im_width, im_height = image.size
(left, right, top, bottom) = (xmin * im_width, xmax * im_width,
ymin * im_height, ymax * im_height)
draw.line([(left, top), (left, bottom), (right, bottom), (right, top),
(left, top)],
width=thickness,
fill=color)
# If the total height of the display strings added to the top of the bounding
# box exceeds the top of the image, stack the strings below the bounding box
# instead of above.
display_str_heights = [font.getsize(ds)[1] for ds in display_str_list]
# Each display_str has a top and bottom margin of 0.05x.
total_display_str_height = (1 + 2 * 0.05) * sum(display_str_heights)
if top > total_display_str_height:
text_bottom = top
else:
text_bottom = top + total_display_str_height
# Reverse list and print from bottom to top.
for display_str in display_str_list[::-1]:
text_width, text_height = font.getsize(display_str)
margin = np.ceil(0.05 * text_height)
draw.rectangle([(left, text_bottom - text_height - 2 * margin),
(left + text_width, text_bottom)],
fill=color)
draw.text((left + margin, text_bottom - text_height - margin),
display_str,
fill="black",
font=font)
text_bottom -= text_height - 2 * margin
def draw_boxes(image, boxes, class_names, scores, max_boxes=10, min_score=0.1):
"""Overlay labeled boxes on an image with formatted scores and label names."""
colors = list(ImageColor.colormap.values())
try:
font = ImageFont.truetype("/usr/share/fonts/truetype/liberation/LiberationSansNarrow-Regular.ttf",
25)
except IOError:
print("Font not found, using default font.")
font = ImageFont.load_default()
for i in range(min(boxes.shape[0], max_boxes)):
if scores[i] >= min_score:
ymin, xmin, ymax, xmax = tuple(boxes[i])
display_str = "{}: {}%".format(class_names[i].decode("ascii"),
int(100 * scores[i]))
color = colors[hash(class_names[i]) % len(colors)]
image_pil = Image.fromarray(np.uint8(image)).convert("RGB")
draw_bounding_box_on_image(
image_pil,
ymin,
xmin,
ymax,
xmax,
color,
font,
display_str_list=[display_str])
np.copyto(image, np.array(image_pil))
return image
# + [markdown] id="D19UCu9Q2-_8"
# ## モジュールを適用する
#
# Open Images v4 から公開画像を読み込み、ローカルの保存して表示します。
# + cellView="both" id="YLWNhjUY1mhg"
# By <NAME>, Source: https://commons.wikimedia.org/wiki/File:Naxos_Taverna.jpg
image_url = "https://upload.wikimedia.org/wikipedia/commons/6/60/Naxos_Taverna.jpg" #@param
downloaded_image_path = download_and_resize_image(image_url, 1280, 856, True)
# + [markdown] id="t-VdfLbC1w51"
# オブジェクト検出モジュールを選択し、ダウンロードされた画像に適用します。モジュールのリストを示します。
#
# - **FasterRCNN+InceptionResNet V2**: 高精度
# - **ssd+mobilenet V2**: 小規模で高速
# + id="uazJ5ASc2_QE"
module_handle = "https://tfhub.dev/google/faster_rcnn/openimages_v4/inception_resnet_v2/1" #@param ["https://tfhub.dev/google/openimages_v4/ssd/mobilenet_v2/1", "https://tfhub.dev/google/faster_rcnn/openimages_v4/inception_resnet_v2/1"]
detector = hub.load(module_handle).signatures['default']
# + id="znW8Fq1EC0x7"
def load_img(path):
img = tf.io.read_file(path)
img = tf.image.decode_jpeg(img, channels=3)
return img
# + id="kwGJV96WWBLH"
def run_detector(detector, path):
img = load_img(path)
converted_img = tf.image.convert_image_dtype(img, tf.float32)[tf.newaxis, ...]
start_time = time.time()
result = detector(converted_img)
end_time = time.time()
result = {key:value.numpy() for key,value in result.items()}
print("Found %d objects." % len(result["detection_scores"]))
print("Inference time: ", end_time-start_time)
image_with_boxes = draw_boxes(
img.numpy(), result["detection_boxes"],
result["detection_class_entities"], result["detection_scores"])
display_image(image_with_boxes)
# + id="vchaUW1XDodD"
run_detector(detector, downloaded_image_path)
# + [markdown] id="WUUY3nfRX7VF"
# ### その他の画像
#
# 時間トラッキングを使用して、追加の画像に推論を実行します。
#
# + id="rubdr2JXfsa1"
image_urls = [
# Source: https://commons.wikimedia.org/wiki/File:The_Coleoptera_of_the_British_islands_(Plate_125)_(8592917784).jpg
"https://upload.wikimedia.org/wikipedia/commons/1/1b/The_Coleoptera_of_the_British_islands_%28Plate_125%29_%288592917784%29.jpg",
# By <NAME>, Source: https://commons.wikimedia.org/wiki/File:Biblioteca_Maim%C3%B3nides,_Campus_Universitario_de_Rabanales_007.jpg
"https://upload.wikimedia.org/wikipedia/commons/thumb/0/0d/Biblioteca_Maim%C3%B3nides%2C_Campus_Universitario_de_Rabanales_007.jpg/1024px-Biblioteca_Maim%C3%B3nides%2C_Campus_Universitario_de_Rabanales_007.jpg",
# Source: https://commons.wikimedia.org/wiki/File:The_smaller_British_birds_(8053836633).jpg
"https://upload.wikimedia.org/wikipedia/commons/0/09/The_smaller_British_birds_%288053836633%29.jpg",
]
def detect_img(image_url):
start_time = time.time()
image_path = download_and_resize_image(image_url, 640, 480)
run_detector(detector, image_path)
end_time = time.time()
print("Inference time:",end_time-start_time)
# + id="otPnrxMKIrj5"
detect_img(image_urls[0])
# + id="H5F7DkD5NtOx"
detect_img(image_urls[1])
# + id="DZ18R7dWNyoU"
detect_img(image_urls[2])
| site/ja/hub/tutorials/object_detection.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="wquAnPg0p4Y8"
# # Flax seq2seq Example
#
# <a href="https://colab.research.google.com/github/google/flax/blob/main/examples/seq2seq/seq2seq.ipynb" ><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
#
# Demonstration notebook for
# https://github.com/google/flax/tree/main/examples/seq2seq
#
# + [markdown] id="UuqrLz3he_1M"
# The **Flax Notebook Workflow**:
#
# 1. Run the entire notebook end-to-end and check out the outputs.
# - This will open Python files in the right-hand editor!
# - You'll be able to interactively explore metrics in TensorBoard.
# 2. Change `config` and train for different hyperparameters. Check out the
# updated TensorBoard plots.
# 3. Update the code in `train.py`. Thanks to `%autoreload`, any changes you
# make in the file will automatically appear in the notebook. Some ideas to
# get you started:
# - Change the model.
# - Log some per-batch metrics during training.
# - Add new hyperparameters to `configs/default.py` and use them in
# `train.py`.
# 4. At any time, feel free to paste code from `train.py` into the notebook
# and modify it directly there!
# + [markdown] id="2cMTM3W4hcsZ"
# ## Setup
# + id="xVAH-aWN3NzF" colab={"base_uri": "https://localhost:8080/"} outputId="3322393b-f9c1-4e8b-c287-17df2c63d0fa"
# Install CLU & latest Flax version from Github.
# !pip install -q clu git+https://github.com/google/flax
# + id="SwX8bCNEGhJM" tags=[]
example_directory = 'examples/seq2seq'
editor_relpaths = ('train.py',)
repo, branch = 'https://github.com/google/flax', 'master'
# + id="o65RonwHp4Y9" cellView="form" colab={"base_uri": "https://localhost:8080/", "height": 198} outputId="c0e690bd-142b-495b-91da-04b6bc440587"
# (If you run this code in Jupyter[lab], then you're already in the
# example directory and nothing needs to be done.)
#@markdown **Fetch newest Flax, copy example code**
#@markdown
#@markdown **If you select no** below, then the files will be stored on the
#@markdown *ephemeral* Colab VM. **After some time of inactivity, this VM will
#@markdown be restarted an any changes are lost**.
#@markdown
#@markdown **If you select yes** below, then you will be asked for your
#@markdown credentials to mount your personal Google Drive. In this case, all
#@markdown changes you make will be *persisted*, and even if you re-run the
#@markdown Colab later on, the files will still be the same (you can of course
#@markdown remove directories inside your Drive's `flax/` root if you want to
#@markdown manually revert these files).
if 'google.colab' in str(get_ipython()):
import os
os.chdir('/content')
# Download Flax repo from Github.
if not os.path.isdir('flaxrepo'):
# !git clone --depth=1 -b $branch $repo flaxrepo
# Copy example files & change directory.
mount_gdrive = 'no' #@param ['yes', 'no']
if mount_gdrive == 'yes':
DISCLAIMER = 'Note : Editing in your Google Drive, changes will persist.'
from google.colab import drive
drive.mount('/content/gdrive')
example_root_path = f'/content/gdrive/My Drive/flax/{example_directory}'
else:
DISCLAIMER = 'WARNING : Editing in VM - changes lost after reboot!!'
example_root_path = f'/content/{example_directory}'
from IPython import display
display.display(display.HTML(
f'<h1 style="color:red;" class="blink">{DISCLAIMER}</h1>'))
if not os.path.isdir(example_root_path):
os.makedirs(example_root_path)
# !cp -r flaxrepo/$example_directory/* "$example_root_path"
os.chdir(example_root_path)
from google.colab import files
for relpath in editor_relpaths:
s = open(f'{example_root_path}/{relpath}').read()
open(f'{example_root_path}/{relpath}', 'w').write(
f'## {DISCLAIMER}\n' + '#' * (len(DISCLAIMER) + 3) + '\n\n' + s)
files.view(f'{example_root_path}/{relpath}')
# + id="xcXZ-F3_zBuJ" colab={"base_uri": "https://localhost:8080/"} outputId="4a604681-4c3e-4724-8ee6-6990e5027cca"
# Note : In Colab, above cell changed the working directory.
# !pwd
# + [markdown] id="Tt0rL4ycp4ZB"
# ## Imports
# + id="EdzHCJuop4ZB"
from absl import app
app.parse_flags_with_usage(['seq2seq'])
from absl import logging
logging.set_verbosity(logging.INFO)
import jax
# + tags=[] id="6Y1ru2Ovp4ZI"
# Local imports from current directory - auto reload.
# Any changes you make to train.py will appear automatically.
# %load_ext autoreload
# %autoreload 2
import train
# + [markdown] id="gGi7zcRpp4ZL"
# ## Dataset
# + colab={"base_uri": "https://localhost:8080/"} id="xce4axo5Y9xp" outputId="81cf5931-4252-4d5f-dbe2-c6a4d5bad781"
# Examples are generated on the fly.
list(train.get_examples(5))
# + colab={"base_uri": "https://localhost:8080/"} id="k_ZD70nIYlEq" outputId="b26163a4-1316-4409-9325-6300e422db11"
batch = train.get_batch(5)
# A single query (/answer) is one-hot encoded.
batch['query'][0]
# + colab={"base_uri": "https://localhost:8080/"} id="UF19Nr2zZRQo" outputId="1700d364-c0c2-4901-e71f-a602329851f4"
# Note how CTABLE encodes PAD=0, EOS=1, '0'=2, '1'=3, ...
train.decode_onehot(batch['query'][:1])
# + [markdown] id="KqW8WP5bp4ZS"
# ## Training
# + id="zzBxSXGGyEfw"
# Get a live update during training - use the "refresh" button!
# (In Jupyter[lab] start "tensorboard" in the local directory instead.)
if 'google.colab' in str(get_ipython()):
# %load_ext tensorboard
# %tensorboard --logdir=./workdirs
# + id="LR9apE1dcFy0"
import time
workdir = f'./workdirs/{int(time.time())}'
# + colab={"base_uri": "https://localhost:8080/"} id="HgjiCPuAbZ5m" outputId="59e41d30-4dc4-48f9-8091-1ab76619f938"
# Train 2k steps & log 20 times.
app.parse_flags_with_usage([
'seq2seq',
'--num_train_steps=2000',
'--decode_frequency=100',
])
# + colab={"base_uri": "https://localhost:8080/"} id="az3CUuNacBkS" outputId="cdcd2be0-2dbb-42fa-84bc-d8b162fc0092"
state = train.train_model(workdir=workdir)
# + tags=[] id="mZOKD0Y7p4ZW" colab={"base_uri": "https://localhost:8080/"} cellView="form" outputId="4681faa5-0323-4404-eeef-3cd7baa4995a"
if 'google.colab' in str(get_ipython()):
#@markdown You can upload the training results directly to https://tensorboard.dev
#@markdown
#@markdown Note that everbody with the link will be able to see the data.
upload_data = 'no' #@param ['yes', 'no']
if upload_data == 'yes':
# !tensorboard dev upload --one_shot --logdir ./workdirs --name 'Flax examples/seq2seq (Colab)'
# + [markdown] id="GBh-2D-Wp4ZY"
# ## Inference
# + colab={"base_uri": "https://localhost:8080/"} id="hwi0ylrOgVKT" outputId="2fe44b10-5f9a-42f7-d6c6-3ca99699a250"
inputs = train.encode_onehot(['2+40'])
# batch, max_length, vocab_size
inputs.shape
# + id="hNRtka4Ng61k"
# Using different random seeds generates different samples.
preds = train.decode(state.params, inputs, jax.random.PRNGKey(0))
# + colab={"base_uri": "https://localhost:8080/"} id="2LWKWLyohTt8" outputId="de61eec7-7503-482e-9bdc-b0c29f85cee1"
train.decode_onehot(preds)
| examples/seq2seq/seq2seq.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Listas
# Vamos a empezar por definir una cadena.
myword = "mathematics"
# La función `list` convierte una variable en una lista.
letters = list("mathematics")
# Tenga en cuenta que `myword` es una cadena, y los métodos asociados con esta variable son métodos de cadena, mientras que` letters` es un objeto **list**, por lo que sus métodos son diferentes:
letters
type(letters)
dir(letters)
# También podemos preguntar cuántos elementos hay en una lista:
len(letters)
letters
# Para acceder a cada elemento de una lista, hacemos lo mismo que hicimos para las cadenas (recuerde que los índices de Python comienzan en 0)
letters[0]
# También podemos acceder al último elemento de la lista mediante un índice especial:
letters[-1]
# De hecho, los índices negativos funcionan como esperaríamos:
letters[-3]
# ---
#
# Algunos métodos especiales son muy interesantes:
# El método `append` agrega un nuevo elemento al final de la lista:
sentence = "It's a beautiful day"
pieces = sentence.split()
pieces
pieces.append("!")
pieces
# El método `insert` agrega un elemento a la lista, en la posición indicada por el usuario:
pieces.insert(2,"really")
pieces
# En Python, el *operador* `del` se puede usar para excluir un elemento de una lista, dado su índice:
del pieces[2]
pieces
# Sin embargo, es más común excluir un elemento de una lista utilizando el método `pop` (que excluye un elemento seleccionado por su índice, como el operador` del`) o el método `remove` (que elimina un elemento de la lista seleccionado por su *valor*):
pieces.remove("beautiful")
pieces
# El método `pop` es interesante porque devuelve, al final de la ejecución, el valor para el elemento eliminado:
pieces.pop(3)
# (el valor de retorno se mostró en la salida de la celda anterior)
# También podemos recuperar el índice de un elemento en la lista dado su valor:
pieces.index("day")
# Para averiguar si un elemento determinado está en la lista, usamos la siguiente sintaxis:
"!" in pieces
# Tenga en cuenta que en la lista `piezas`, ¡cada entrada también puede verse como una lista!
pieces[2]
pieces[2][0]
# Tómese un tiempo para entender la sintaxis de arriba ...
# ## Cortar
#
# Podemos seleccionar partes de una lista (o cadena) fácilmente usando el concepto de *rebanar*.
numbers = [1,3,6,9,12,15,18,21]
numbers[0]
numbers[-1]
# Para seleccionar artículos del índice 3 al 5, usamos
numbers[3:6]
# ¡Lo sé! Pero hay una razón por la que esa salida no incluye el elemento con el índice 6: cuando decimos `list [start: end]`, el resultado es una lista con elementos `end-start`.
numbers[3:4]
# Tenga en cuenta que con un parámetro adicional podemos seleccionar todos los demás elementos de la lista:
numbers[0:11:2]
# Podemos generar una nueva lista con piezas de la lista original:
numbers = [numbers[4:6], numbers[3:8]]
print(numbers)
# Desafortunadamente, el resultado anterior es una lista de listas: cada elemento de la lista es, por sí mismo, otra lista:
numbers[0]
# Ahora, para acceder a un elemento individual de la lista `numbers` necesitamos usar un índice para la lista externa, y otro para la lista interna:
numbers[0][1]
# Para transformar esta lista en una lista plana (simple), podemos usar el siguiente comando (es bastante mágico, pero funciona! ;) )
mylist = [item for sublist in numbers for item in sublist]
mylist
# Si desea comprender este último ejemplo, revise el notebook extra - Lista de Comprensiones
| content/es/Notebook 03 - Listas.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="u88YQ_g5_E10"
# You now know the following
#
# 1. Generate open-loop control from a given route
#
# 2. Simulate vehicular robot motion using bicycle/ unicycle model
#
# Imagine you want to make an utility for your co-workers to try and understand vehicle models.
# Dashboards are common way to do this.
#
# There are several options out there : Streamlit, Voila, Observable etc
#
# Follow this
# <a href="https://medium.com/plotly/introducing-jupyterdash-811f1f57c02e">Medium post</a> on Jupyter Dash and see how to package what you learnt today in an interactive manner
#
# Here is a <a href="https://stackoverflow.com/questions/53622518/launch-a-dash-app-in-a-google-colab-notebook">stackoverflow question </a> on how to run dash applications on Collab
# + [markdown] id="dCSchex3_E12"
# What can you assume?
# # + Fix $v,\omega$ or $v,\delta$ depending on the model (users can still pick the actual value)
# # + fixed wheelbase for bicycle model
#
# Users can choose
# # + unicycle and bicycle models
# # + A pre-configured route ("S", "inverted-S", "figure-of-eight" etc)
# # + 1 of 3 values for $v, \omega$ (or $\delta$)
# + id="hDAEnWqh_E13" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1624638724448, "user_tz": -330, "elapsed": 14444, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "03894933400923877588"}} outputId="e09c8e38-3488-460b-ed3b-80c25f126ec9"
pip install jupyter-dash -q
# + id="Pu6KAYvXl4SR" colab={"base_uri": "https://localhost:8080/", "height": 671} executionInfo={"status": "ok", "timestamp": 1624641282362, "user_tz": -330, "elapsed": 1059, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "03894933400923877588"}} outputId="de7eefea-1512-4a3a-f731-80d92dc4032f"
from jupyter_dash import JupyterDash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import plotly.express as px
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
# Load Data
# df = px.data.tips()
# Build App
app = JupyterDash(__name__)
app.layout = html.Div([
html.H1("Vehicle Simulation models"),
dcc.Graph(id='graph'),
html.Br(),
html.Label([
"model",
dcc.RadioItems(
id='model-options',
value='unicycle',options=[{'label':'unicycle model','value':'unicycle'},{'label':'bicycle model','value':'bicycle'}]
)
]),
html.Br(),
html.Label([
"route",
dcc.RadioItems(
id='route-options',
value='s',options=[
{'label':'S','value':'s'},
{'label':'inverted-S','value':'smirror'},
{'label':'figure-of-eight','value':'eight'}]
)
]),
html.Br(),
html.Label([
"straight_line_velocity",
dcc.Slider(
id='velocity-value-straight', value = 1,
min=0,max=5,step=0.5,
marks={i: '{}'.format(i) for i in np.arange(0,5,0.5)}
)
]),
html.Br(),
html.Label([
"turn_velocity",
dcc.Slider(
id='velocity-value-turn', value = 0.5,
min=0,max=3,step=0.2,
marks={i: '{}'.format(i) for i in np.arange(0,3,0.2)}
)
]),
html.Br(),
html.Label([
"step_angular_velocity",
dcc.Slider(
id='angular-velocity-value', value = 0.2,
min=0,max=2,step=0.1,
marks={i: '{}'.format(i) for i in np.arange(0,2,0.1)}
)
])
# html.Div(id='graph')
])
# , style={'columnCount': 2}
# Define callback to update graph
@app.callback(
Output('graph', 'figure'),
Input('model-options', 'value'),
Input('route-options', 'value'),
Input('velocity-value-straight', 'value'),
Input('velocity-value-turn', 'value'),
Input('angular-velocity-value', 'value')
)
def update_figure(model_choice,route_choice,vc_fast,vc_slow,ang_vel):
dt = 0.1
pose = (0,0,np.pi/2)
st_dist = 10
routes = {'s':[("right",90), ("straight", st_dist), ("left", 90), ("straight", st_dist), ("left", 90), ("straight", st_dist), ("right", 90),("straight", st_dist), ("right", 90), ("straight", st_dist)],
'smirror':[("left",90), ("straight", st_dist), ("right", 90), ("straight", st_dist), ("right", 90), ("straight", st_dist), ("left", 90),("straight", st_dist), ("left", 90), ("straight", st_dist)],
'eight':[("right",90), ("straight", st_dist), ("left", 90), ("straight", st_dist), ("left", 90), ("straight", st_dist), ("right", 90),("straight", st_dist), ("right", 90), ("straight", st_dist),
("right", 90), ("straight", st_dist), ("right", 90), ("straight", st_dist), ("left", 90),("straight", st_dist), ("left", 90), ("straight", st_dist)]}
route = routes[route_choice]
vcfast = vc_fast
vcslow = vc_slow
wc = ang_vel
if model_choice == 'unicycle':
robot_trajectory = unicycle_model(pose, route, vcfast,vcslow, wc,dt)
elif model_choice == 'bicycle':
robot_trajectory = bicycle_model(pose, route, vcfast,vcslow, wc,dt)
fig = px.scatter(x=robot_trajectory[:,0], y=robot_trajectory[:,1])
fig.layout.yaxis.scaleanchor = 'x'
fig.layout.height = 600
fig.layout.width = 600
fig.layout.title = 'vehicle path'
fig.update_traces(mode='lines+markers')
return fig
def unicycle_model(pose, route,vcfast,vcslow, wc, dt = 0.01):
all_w = []
all_v = []
deltas = {"straight":0, "left":wc, "right":-wc}
for maneuver,value in route:
u = deltas[maneuver]
angle_steps = np.ceil(np.deg2rad(value)/(wc*dt))
all_w += [u]*np.ceil(value/(vcfast*dt)).astype(int) if maneuver == "straight" else [u]*angle_steps.astype(int)
all_v += [vcfast]*np.ceil(value/(vcfast*dt)).astype(int) if maneuver == "straight" else [vcslow]*angle_steps.astype(int)
robot_trajectory = []
for v, w in zip(all_v, all_w):
x, y, theta = pose
x += v*np.cos(theta)*dt
y += v*np.sin(theta)*dt
theta += w*dt
theta = np.arctan2(np.sin(theta), np.cos(theta))
new_pose = (x, y, theta)
robot_trajectory.append(pose)
pose = new_pose
robot_trajectory = np.array(robot_trajectory)
return robot_trajectory
def bicycle_model(pose, route,vcfast,vcslow, deltac, dt = 0.01):
all_delta = []
all_v = []
deltas = {"straight":0, "left":deltac, "right":-deltac}
for maneuver,angle in route:
u = deltas[maneuver]
angle_steps = np.ceil(np.deg2rad(angle)*0.9/(vcslow*np.tan(deltac)*dt))
all_delta += [u]*np.ceil(angle/(vcfast*dt)).astype(int) if maneuver == "straight" else [u]*angle_steps.astype(int)
all_v += [vcfast]*np.ceil(angle/(vcfast*dt)).astype(int) if maneuver == "straight" else [vcslow]*angle_steps.astype(int)
robot_trajectory = []
for v, delta in zip(all_v, all_delta):
x, y, theta = pose
x += v*np.cos(theta)*dt
y += v*np.sin(theta)*dt
theta += (v/0.9)*np.tan(delta)*dt
theta = np.arctan2(np.sin(theta), np.cos(theta))
new_pose = (x, y, theta)
robot_trajectory.append(pose)
pose = new_pose
robot_trajectory = np.array(robot_trajectory)
return robot_trajectory
# Run app and display result inline in the notebook
app.run_server(mode='inline')
# + id="DiVQcG3zmL0G"
| week1/akshathaj/Q5 - Q/Attempt1_filesubmission_dashboard.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Problem-1
# Visualize a PDB structure, set styles, and add labels.
import py3Dmol
# ### TODO-1
# Instantiate py3Dmol viewer with PDB structure 1NCA (Neuraminidase-FAB complex)
... your code here ...
# ### TODO-2
# Apply the following styles to this structure:
# * chain N (Neuraminidase): orange cartoon
# * chain H (Heavy chain): blue sphere
# * chain L (Light chain): lightblue sphere
#
... your code here ...
# ### TODO-3: Add text labels to the three chains
... your code here ...
# ### Bonus: Set the style for sugar residues MAN, BMA, and NAG to stick and color by a greenCarbon colorscheme.
... your code here ...
| 1-3D-visualization/Problem-1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # pyABF-APisolation - Code Walkthrough
# pyABF-APisolation, is a simplistic module built to quickly extract action potentials and their raw features from abf files. This module is built on top of swhardens excellent 'pyABF'. Features are defined by their values located in the Allen Institute's electrophysiology white papers
# The goal of this document is to show the exactly how the code goes about extracting the features.
# ### Step 0: import the dependencies, import the ABF
# To begin with we need to import the dependencies. This code utilises numpy, and pandas for data storage and output. Additionally, we utilise the previously written abfderivative tools found here: https://github.com/smestern/abfderivative.
import numpy as np
from numpy import genfromtxt
import matplotlib.pyplot as plt
from abfderivative import *
from nuactionpotential import *
import pyabf
from pyabf.tools import *
from pyabf import filter
import os
import pandas as pd
import statistics
# Now we simply import our ABF file
abf = pyabf.ABF('./processed/1971003-p.abf')
# After importing the ABF file, we can take a look to ensure its working.
abf.setSweep(4) #Set to sweep number 4
plt.plot(abf.sweepX, abf.sweepY, 'b-') #plot our sweep
# Now, we can simply call apisolate. This is the function you will want to use most often. Ap isolate returns both a feature array and the raw traces for all the action potentials found in a file.
aparray, featarray, abf = apisolate(abf, 0, 'tag', False, True, plot=1)
display(featarray.head())
# As you can see, apisolate() returns a reasonably complete feature array. Now lets look at how it determines these values.
# ### Step 1: Isolate the raw traces
# First, apisolate() calls appreprocess(). This function serves to isolate the 'raw' traces, as well as some feature data.
# appreprocess begins by constructing arrays to fill. Then it calls thresholdavg() to find the 5% threshold avg.
# + slideshow={"slide_type": "-"}
thresholdsl = (thresholdavg(abf, 4) * 0.05)
print(thresholdsl)
# -
# This function utilizes SWhardens pyabf.tools.ap.ap_points_currentSweep() function. The gist of this function is to look for points where the derivative of the sweep crosses 15 mV/mS.
# It somewhat acomplishes this by fitting a line and looking for crossings. Then centres on the dV/dT peak.
# + tags=["hide_input"]
slopex, slopey = derivative(abf,4)
index = pyabf.tools.ap.ap_points_currentSweep(abf)
plt.plot(abf.sweepX[0:700], slopey[0:700])
plt.plot(abf.sweepX[0:700], np.full(700, 20))
plt.plot(abf.sweepX[index], slopey[index], 'rx')
plt.xlim(0.020, 0.025)
plt.xlabel('time (S)')
plt.ylabel('dV/dT')
# -
# thresholdavg() simply tabulates the mean of these returned indicies.
# From here, appreprocess() takes each index and steps backwards until it reaches below 5% of the threshold. This is marked as our (single) action potential threshold, and the start of our ap.
# Now find the point where DVDT falls below the 5% threshold
apstrt = (int(index[0] - (abf.dataPointsPerMs * 2)))
thresholdslloc = index[0]
indexloc = np.nonzero(np.where(slopey[apstrt:thresholdslloc] < thresholdsl, 1, 0))[0]
if indexloc.size < 1:
idx = apstrt
else:
indexloc += apstrt
idx = indexloc[-1]
apstrt = idx
# + tags=["hide_input"]
plt.plot(abf.sweepX[0:700], slopey[0:700])
plt.plot(abf.sweepX[0:700], np.full(700, thresholdsl))
plt.axvspan(abf.sweepX[int(index[0] - (abf.dataPointsPerMs * 2))], abf.sweepX[thresholdslloc], facecolor='#2ca02c', alpha=0.25, label='search area')
plt.plot(abf.sweepX[apstrt], slopey[apstrt], 'gx', label='Intersection at threshold')
plt.xlim(0.020, 0.025)
plt.xlabel('time (S)')
plt.ylabel('dV/dT')
plt.legend()
# -
# Next, the algorithm searches forwards for the absolute peak of the Action potential
apstrt = (int(index[0] - (abf.dataPointsPerMs * 2)))
if apstrt < 0:
apstrt=0
apend = int(index[0] + (abf.dataPointsPerMs * 3))
aploc = np.argmax(abf.sweepY[apstrt:apend]) + apstrt
# + tags=["hide_input"]
plt.plot(abf.sweepX[0:700], abf.sweepY[0:700])
plt.axvspan(abf.sweepX[apstrt], abf.sweepX[apend], facecolor='#2ca02c', alpha=0.25, label='search area')
plt.plot(abf.sweepX[aploc], abf.sweepY[aploc], 'gx', label='Peak mV')
plt.xlim(0.020, 0.0275)
plt.xlabel('time (S)')
plt.ylabel('mV')
plt.legend()
# -
# Now that we have an mostly established peak, we need to cap off the action potential. We cap at either 10ms post peak or the next ap, whatever is sooner.
## Now we check to ensure the action potentials do not over lap
if((index[0] - index[1]) > (abf.dataPointsPerMs * 10)): ##if the next ap is over 10ms away then we simple cap off at 10ms
apend = abs(int(aploc + abf.dataPointsPerMs * 10))
elif apend > index[1]:
apend = index[1] #otherwise we cap the end at the next threshold
aploc = np.argmax(abf.sweepY[apstrt:apend]) + apstrt #and re-find the peak
else:
apend = abs(int(aploc + abf.dataPointsPerMs * 10)) #if this is the last ap in the sweep we cap at 10ms
k, = abf.sweepY.shape
print(index)
if apend > k:
apend = int(k) - 1
plt.plot(abf.sweepX[0:apend+200], abf.sweepY[0:apend+200])
plt.axvspan(abf.sweepX[apstrt], abf.sweepX[(abf.dataPointsPerMs * 10)+aploc], facecolor='#2ca02c', alpha=0.25, label='search area')
plt.plot(abf.sweepX[apend], abf.sweepY[apend], 'gx', label='Peak mV')
plt.xlim(0.020, 0.035)
plt.xlabel('time (S)')
plt.ylabel('mV')
plt.legend()
| bin/pyABF-APisolation - Code Walkthrough.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import logging
import os
import matplotlib
import matplotlib.pyplot as plt
from matplotlib import offsetbox
import numpy as np
import pandas as pd
import seaborn as sns
from scipy import stats
from statsmodels.stats.multicomp import MultiComparison
import divisivenormalization.analysis as analysis
import divisivenormalization.utils as helpers
from divisivenormalization.data import Dataset, MonkeySubDataset
helpers.config_ipython()
logging.basicConfig(level=logging.INFO)
sns.set()
sns.set_style("ticks")
# adjust sns paper context rc parameters
font_size = 8
rc_dict = {
"font.size": font_size,
"axes.titlesize": font_size,
"axes.labelsize": font_size,
"xtick.labelsize": font_size,
"ytick.labelsize": font_size,
"legend.fontsize": font_size,
"figure.figsize": (helpers.cm2inch(8), helpers.cm2inch(8)),
"figure.dpi": 300,
"pdf.fonttype": 42,
"savefig.transparent": True,
"savefig.bbox_inches": "tight",
}
sns.set_context("paper", rc=rc_dict)
class args:
num_best = 10
num_val = 10
fname_best_csv = "df_best.csv"
fname_val_csv = "df_val.csv"
weights_path = "weights"
train_logs_path = "train_logs"
orientation_binsize = np.deg2rad(10)
stim_full_size = 140 # full size of stimulus w/o subsampling and cropping
stim_subsample = 2
oriented_threshold = 0.125
# -
# ### Load data
# +
results_df = pd.read_csv("results.csv")
# Save a simplified version of the csv file, sorted by validation set performance
df_plain = helpers.simplify_df(results_df)
df_plain.to_csv("results_plain.csv")
data_dict = Dataset.get_clean_data()
data = MonkeySubDataset(data_dict, seed=1000, train_frac=0.8, subsample=2, crop=30)
# -
# ### Get and save FEV performance on test set
# Use the 10 best models for analysis.
# Split the csv files accordingly. Also, extract some weights to be used for later analysis and save
# them as pickle. As this operation requires model loading, we do it only if it was not done before.
# +
try:
df_best = pd.read_csv(args.fname_best_csv)
logging.info("loaded data from " + args.fname_best_csv)
except FileNotFoundError:
df_best = df_plain[0 : args.num_best].copy()
fev_lst = []
for i in range(args.num_best):
run_no = df_best.iloc[i]["run_no"]
logging.info("load run no " + str(run_no))
model = helpers.load_subunit_model(run_no, results_df, data, args.train_logs_path)
fev = model.evaluate_fev_testset()
fev_lst.append(fev)
feve = model.evaluate_fev_testset_per_neuron()
helpers.pkl_dump(feve, run_no, "feve.pkl", args.weights_path)
df_best["fev"] = fev_lst
df_best.to_csv(args.fname_best_csv)
# -
fev = df_best.fev.values * 100
print("Mean FEV", fev.mean())
print("SEM", stats.sem(fev, ddof=1))
print("max FEV", fev.max())
print("FEV of model with max correlation on validation set", fev[0])
| subunit_net/analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # abc
# +
doc_a = "Brocolli is good to eat. My brother likes to eat good brocolli, but not my mother."
doc_b = "My mother spends a lot of time driving my brother around to baseball practice."
doc_c = "Some health experts suggest that driving may cause increased tension and blood pressure."
doc_d = "I often feel pressure to perform well at school, but my mother never seems to drive my brother to do better."
doc_e = "Health professionals say that brocolli is good for your health."
# compile sample documents into a list
doc_set = [doc_a, doc_b, doc_c, doc_d, doc_e]
# -
# # Tokenization
from nltk.tokenize import RegexpTokenizer
tokenizer = RegexpTokenizer(r'\w+')
raw = doc_a.lower()
tokens = tokenizer.tokenize(raw)
print(tokens)
# # Stop words
from nltk.corpus import stopwords
# create English stop words list
en_stop = stopwords.words('english')
# +
# remove stop words from tokens
stopped_tokens = [i for i in tokens if not i in en_stop]
print(stopped_tokens)
# -
# # Stemming
# +
from nltk.stem.porter import PorterStemmer
# Create p_stemmer of class PorterStemmer
p_stemmer = PorterStemmer()
# -
# stem token
texts = [p_stemmer.stem(i) for i in stopped_tokens]
print(texts)
# # Constructing a document-term matrix
# +
# #!pip install gensim
# -
a = []
a.append(texts)
a
# +
from gensim import corpora, models
dictionary = corpora.Dictionary(a)
# -
corpus = [dictionary.doc2bow(text) for text in a]
print(corpus[0])
# # Applying the LDA model
ldamodel = models.ldamodel.LdaModel(corpus, num_topics=3, id2word = dictionary, passes=20)
# # Examining the result
print(ldamodel.print_topics(num_topics=3, num_words=3))
# +
ldamodel = models.ldamodel.LdaModel(corpus, num_topics=2, id2word = dictionary, passes=20)
b = ldamodel.print_topics(num_topics=2, num_words=4)
# -
len(b)
for i in b:
print(i)
# -----
# # My Example
doc_f = "The decision to ban lawmaker <NAME> from running in a rural representative election was based on a shaky argument that could be struck down in court, according to leading legal scholars, who also called on Hong Kong’s courts to clarify the vagueness in election laws. <NAME>, the former law dean of the University of Hong Kong, was speaking on Sunday after Chu was told he would not be allowed to run for a post as a local village’s representative. Returning officer <NAME> pointed to Chu’s stance on Hong Kong independence and said the lawmaker had dodged his questions on his political beliefs. Yuen took this to imply that Chu supported the possibility of Hong Kong breaking with Beijing in the future. Chan, however, said Chu’s responses to the returning officer were open to interpretation. The legal scholar did not believe they met the standard of giving the election officer “cogent, clear and compelling” evidence as required by the precedent set in the case of And<NAME> Ho-tin. <NAME> was barred from standing in a Legislative Council by-election in New Territories West in 2016 because of his political beliefs. According to Section 24 of the Rural Representative Election Ordinance, candidates are required to declare their allegiance to the Hong Kong Special Administrative Region and to state they will uphold the Basic Law, Hong Kong’s mini-constitution, when filing their application. The allegiance requirement was written into law in 2003, mirroring clauses in the rules for the Legco and district council elections, but it had never been applied by an election officer. The situation changed after separatist <NAME> lost his election appeal in February this year, with the courts saying returning officers could ban candidates who held political views that ran contrary to the Basic Law. While the landmark ruling was concerned only with Legco elections, <NAME> said, after Chu’s case, returning officers for other elections could have similar powers to ban candidates from running, including in the district council elections next year. <NAME>, the lawyer who represented <NAME>, said the ruling would be binding on returning officers for other elections. <NAME>, another legal scholar at HKU, said Yuen had provided weak reasons for disqualifying Chu. He agreed that there will be room for Chu to launch an appeal. “The logic has become – if your interpretation of the Basic Law is different from the government’s, it means you have no intention of upholding the Basic Law,” Cheung said. He also said Hong Kong courts must clarify the vagueness in election laws and process such appeals more quickly. <NAME>, the former deputy home affairs secretary who led the government’s effort to formalise rural representative elections under the ordinance, said it was “common sense” that rural representatives had to uphold allegiance to Hong Kong. “The village representatives are also elected by people, and they are empowered to identify who the indigenous villagers are,” Fisher said before Chu’s disqualification. “So it’s normal that the legal drafting [of the ordinance] follows the law on Legislative Council and district council elections.” Fisher, who would not comment on Chu’s case, said it would have been “unthinkable” for anyone back then to have imagined a candidate being disqualified for their political views. “The requirement was written there, but it was never contentious,” Fisher said. Chu was disqualified by Yuen because he had “defended independence as an option to Hongkongers” in a statement in 2016. Pressed twice by the returning officer to clarify his position, Chu would say only that he did not support Hong Kong’s independence, but added that he would support another’s right to peacefully advocate it. Johannes Chan said Chu’s political stance was open to interpretation, and the election officer could hardly fulfil the criteria for providing “cogent, clear and compelling” evidence to disqualify him. “At best, we could argue Chu’s reply to the officer was vague about self-determination – even the returning officer himself confessed Chu was only ‘implicitly’ confirming independence as an option,” he said. “But we can’t take a candidate’s silence as his stance. That would have jumped many, many steps.” The decision on Sunday would also create a “conflicting” situation over Chu's political allegiance, Chan added, since the lawmaker remained in office but was disqualified in a separate election. Both Chan and Li said how the returning officer had come to the disqualification might require clarification in any future court ruling. “It was as if they [government officials] could read your mind,” Li said. “The court still has not clarified how far back election officials can look – such as in this case, could we go back to statements Chu made two years ago?” Chan asked."
# # Tokenization
from nltk.tokenize import RegexpTokenizer
tokenizer = RegexpTokenizer(r'\w+')
my_raw = doc_f.lower()
my_tokens = tokenizer.tokenize(my_raw)
#print(my_tokens)
# # Stop words
from nltk.corpus import stopwords
# create English stop words list
eng_stop = stopwords.words('english')
# +
# remove stop words from tokens
my_stopped_tokens = [i for i in my_tokens if not i in eng_stop]
#print(my_stopped_tokens)
# -
# # Stemming
# +
from nltk.stem.porter import PorterStemmer
# Create p_stemmer of class PorterStemmer
p_stemmer = PorterStemmer()
# -
# stem token
my_texts = [p_stemmer.stem(i) for i in my_stopped_tokens]
#print(texts)
my_texts_list = []
#my_texts_list.append(my_texts)
my_texts_list.append(my_stopped_tokens)
#my_texts_list
# +
from gensim import corpora, models
my_dictionary = corpora.Dictionary(my_texts_list)
# -
my_corpus = [my_dictionary.doc2bow(text) for text in my_texts_list]
corpus[0]
# # Applying the LDA model
my_ldamodel = models.ldamodel.LdaModel(my_corpus, num_topics=3, id2word = my_dictionary, passes=20)
result = my_ldamodel.print_topics(num_topics=3, num_words=3)
result
| notebook/LDA.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.7.2 64-bit (''base'': conda)'
# name: python3
# ---
import cv2
fileName = "test"
mg_cv = cv2.imread(fileName + ".png")
gray=cv2.cvtColor(mg_cv,cv2.COLOR_BGR2GRAY)
import matplotlib.pyplot as plt
plt.imshow(gray, cmap='gray')
gray.shape
from ctypes import cdll
test = cdll.LoadLibrary("./lib/libtest.so")
type(gray)
import ctypes
import numpy as np
n = gray.shape[0]
m = gray.shape[1]
n,m
res = np.empty((gray.shape[0], gray.shape[1]), dtype='float64')
gray = gray.astype('float64') # double
sigma = 10.0
kr = 16
ks = 4
ko = 8
S = 1
sigma_init = 0.5
contrast_threshold = 0.03
edge_response_threshold = 10.0
max_iterpolation = 10
time_arr4 = np.empty(4, dtype='float64')
# +
# res_num = 20
# res_img = np.empty(gray.shape[0] * gray.shape[1] * 4 * res_num, dtype='float64')
# res_n = np.empty(res_num, dtype='int32')
# res_m = np.empty(res_num, dtype='int32')
# # gray_t* gray_img, int n, int m, gray_t* res_img, int* res_n, int* res_m, int res_num, int S, double sigma_init
# test.test.argtypes = [np.ctypeslib.ndpointer(dtype=gray.dtype, ndim=2, shape=gray.shape, flags='C_CONTIGUOUS'),
# ctypes.c_int, # n
# ctypes.c_int, # m
# np.ctypeslib.ndpointer(dtype=res_img.dtype, ndim=1, shape=res_img.shape, flags='C_CONTIGUOUS'),
# np.ctypeslib.ndpointer(dtype=res_n.dtype, ndim=1, shape=res_n.shape, flags='C_CONTIGUOUS'),
# np.ctypeslib.ndpointer(dtype=res_m.dtype, ndim=1, shape=res_m.shape, flags='C_CONTIGUOUS'),
# ctypes.c_int, # res_num
# ctypes.c_int, # S
# ctypes.c_double # sigma_init
# ]
# test.test(gray, n, m, res_img, res_n, res_m, res_num, S, sigma_init)
# cur = 0
# show_all = True
# for i in range(res_num):
# img = res_img[cur:cur+res_n[i] * res_m[i]]
# if show_all or ((i % (S + 3)) > 0 and (i % (S + 3)) < S + 1):
# plt.xlim([0, 2 * gray.shape[1]])
# plt.ylim([2 * gray.shape[0], 0])
# plt.imshow(img.reshape(res_n[i], res_m[i]), cmap='gray')
# plt.savefig("dog{}.png".format(i))
# #plt.show()
# cur += res_n[i] * res_m[i]
# print(res_img[res_n[0] * res_m[0]:res_n[0] * res_m[0]+res_n[1] * res_m[1]])
# -
test.sift.argtypes = [np.ctypeslib.ndpointer(dtype=gray.dtype, ndim=2, shape=gray.shape, flags='C_CONTIGUOUS'),
np.ctypeslib.ndpointer(dtype=res.dtype, ndim=2, shape=res.shape, flags='C_CONTIGUOUS'),
ctypes.c_int, # n
ctypes.c_int, # m
ctypes.c_int, # kr
ctypes.c_int, # ks
ctypes.c_int, # ko
ctypes.c_int, # S
ctypes.c_double, # sigma_init
ctypes.c_double, # contrast_threshold
ctypes.c_double, # edge_response_threshold
ctypes.c_int, # max_iterpolation
np.ctypeslib.ndpointer(dtype=time_arr4.dtype, ndim=1, shape=time_arr4.shape, flags='C_CONTIGUOUS')
]
test.sift(gray, res, n, m, kr, ks, ko, S,
sigma_init, contrast_threshold,
edge_response_threshold, max_iterpolation, time_arr4)
time_arr4
n,m
plt.imshow(res, cmap="gray")
plt.savefig(fileName+"_res.png")
res.shape
import numpy as np
he = np.array(range(9), dtype='float64')
he[1] = he[3]
he[2] = he[6]
he[5] = he[7]
he_inv = np.array(range(9), dtype='float64')
# +
# 0a1 1b1 2c1
# 3a2 4b2 5c2
# 6a3 7b3 8c3
# -
det = he[0] * (he[4] * he[8] - he[5] * he[7]) \
- he[3] * (he[1] * he[8] - he[2] * he[7]) \
+ he[6] * (he[1] * he[5] - he[2] * he[4])
he_inv[0] = (he[4] * he[8] - he[5] * he[7]) / det
he_inv[1] = (he[2] * he[7] - he[1] * he[8]) / det
he_inv[2] = (he[1] * he[5] - he[2] * he[4]) / det
he_inv[3] = (he[5] * he[6] - he[3] * he[8]) / det
he_inv[4] = (he[0] * he[8] - he[2] * he[6]) / det
he_inv[5] = (he[3] * he[2] - he[0] * he[5]) / det
he_inv[6] = (he[3] * he[7] - he[4] * he[6]) / det
he_inv[7] = (he[1] * he[6] - he[0] * he[7]) / det
he_inv[8] = (he[0] * he[4] - he[3] * he[1]) / det
print(he.reshape(3,3))
print(he_inv.reshape(3,3))
import time
# https://docs.opencv.org/4.5.2/d7/d60/classcv_1_1SIFT.html
sift = cv2.SIFT_create(nOctaveLayers=S, contrastThreshold=contrast_threshold,
edgeThreshold=edge_response_threshold, sigma=sigma_init)
t = time.time()
kp1, des1 = sift.detectAndCompute(mg_cv, None) #des是描述子
t = time.time() - t
img3 = cv2.drawKeypoints(mg_cv,kp1,mg_cv,color=(255,0,255))
plt.imshow(img3)
t
np.array([1]).dtype
| src/.ipynb_checkpoints/Untitled-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # broadening the scope of our classes
# So far, we've used ImageNet data as the basis for teaching our machine about the relationship between language and visual features. The 200 classes of tiny ImageNet and the 1000 classes of ImageNet are aggregations of images described by a number of WordNet nouns - that's where all of our `wordnet_id`s come from.
# In this notebook, we test the hypothesis that we needn't confine ourselves to the 1000 classes of ImageNet. Instead of a large number of images associated with a small number of classes, we can invert the relationship to obtain a small number of images for each of a large number of classes, thereby mapping the word-vector space more completely. When using ImageNet, we precisely define the points in word vector space which map to certain visual features, but the rest of the space must be geometrically inferred. By reducing the precision but increasing the breadth, the hope is that we'll see a more informed network.
# +
# %matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style("whitegrid")
plt.rcParams["figure.figsize"] = (20, 20)
import os
import io
import requests
import numpy as np
import pandas as pd
from PIL import Image
from scipy.spatial.distance import cdist
from scipy.io import loadmat
from bs4 import BeautifulSoup
import torch
from torch import nn, optim
from torch.utils.data import Dataset, DataLoader
from torchvision import models, transforms
from tqdm._tqdm_notebook import tqdm_notebook as tqdm
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# -
# # get wordnet nouns
# +
id_to_word = {}
wordnet_url = "http://files.fast.ai/data/classids.txt"
for line in requests.get(wordnet_url).text.split("\n"):
try:
id, word = line.split()
id_to_word[id] = word
except:
pass
# -
wordnet_nouns = [word.lower().replace("_", "-") for word in id_to_word.values()]
# # load word vectors
# +
wv_path = "/mnt/efs/nlp/word_vectors/fasttext/crawl-300d-2M.vec"
wv_file = io.open(wv_path, "r", encoding="utf-8", newline="\n", errors="ignore")
word_vectors = {
line.split()[0]: np.array(line.split()[1:]).astype(np.float)
for line in tqdm(list(wv_file))
}
# +
word_vector_set = set(word_vectors.keys())
wordnet_set = set(wordnet_nouns)
valid_queries = list(word_vector_set.intersection(wordnet_set))
# -
# # get images of the valid wordnet nouns from google
# We'll use google images to obtain the corresponding image sets for our wordnet nouns. By concatenating the wordnet noun with a google search query string and parsing the response with beautifulsoup, we can build up a broad set of small images relatively quickly, as demonstrated below with a random query.
# +
query = np.random.choice(valid_queries)
base_url = "https://www.google.com/search?tbm=isch&q="
soup = BeautifulSoup(requests.get(base_url + query).content)
urls = [img["src"] for img in soup.findAll("img")]
print(query)
images = [
(
Image.open(io.BytesIO(requests.get(url).content))
.resize((64, 64), resample=Image.BILINEAR)
.convert("RGB")
)
for url in urls
]
Image.fromarray(np.concatenate(images, axis=1))
# -
# We can wrap up that functionality for convenience
def image_search(query):
base_url = "https://www.google.com/search?tbm=isch&q="
soup = BeautifulSoup(requests.get(base_url + query).content)
urls = [img["src"] for img in soup.findAll("img")]
images = [
Image.open(io.BytesIO(requests.get(url).content)).convert("RGB") for url in urls
]
return images
images = [i.resize((224, 224)) for i in image_search("dog")]
Image.fromarray(np.concatenate(images, axis=1))
# # save the data
# Let's churn through our wordnet nouns and save a collection of images for each
# +
save_path = "/mnt/efs/images/google_scraping/"
for query in tqdm(np.random.choice(valid_queries, 2000)):
images = image_search(query)
for i, image in enumerate(images):
image.save(save_path + "{}_{}.jpg".format(query, i))
# -
# from here onwards, the process is much the same as before. We'll define our data loading processes, build a simple model with a pre-trained feature-extracting backbone and train it until the loss bottoms out. Then we'll evaluate how well it has generalised against a pre-defined test set and run some test queries using out-of-vocabulary words.
#
# # datasets and dataloaders
# +
df = {}
for file_name in os.listdir(save_path):
df[save_path + file_name] = file_name.split("_")[0]
df = pd.Series(df).to_frame().reset_index()
df.columns = ["path", "word"]
# -
df = df.sample(frac=1).reset_index(drop=True)
# +
split_ratio = 0.8
train_size = int(split_ratio * len(df))
train_df = df.loc[:train_size]
test_df = df.loc[train_size:]
# -
class ImageDataset(Dataset):
def __init__(self, dataframe, word_vectors, transform=transforms.ToTensor()):
self.image_paths = dataframe["path"].values
self.words = dataframe["word"].values
self.word_vectors = word_vectors
self.transform = transform
def __getitem__(self, index):
image = Image.open(self.image_paths[index]).convert("RGB")
if self.transform is not None:
image = self.transform(image)
target = torch.Tensor(word_vectors[self.words[index]])
return image, target
def __len__(self):
return len(self.words)
# +
train_transform = transforms.Compose(
[
transforms.RandomResizedCrop(224, scale=[0.6, 0.9]),
transforms.RandomHorizontalFlip(),
transforms.RandomRotation(15),
transforms.RandomGrayscale(0.25),
transforms.ToTensor(),
]
)
test_transform = transforms.Compose(
[transforms.RandomResizedCrop(224, scale=[0.6, 0.9]), transforms.ToTensor()]
)
# -
train_dataset = ImageDataset(train_df, word_vectors, train_transform)
test_dataset = ImageDataset(test_df, word_vectors, test_transform)
# +
batch_size = 128
train_loader = DataLoader(
dataset=train_dataset, batch_size=batch_size, num_workers=5, shuffle=True
)
test_loader = DataLoader(dataset=test_dataset, batch_size=batch_size, num_workers=5)
# -
# # building the model
backbone = models.vgg16_bn(pretrained=True).features
for param in backbone.parameters():
param.requires_grad = False
class DeViSE(nn.Module):
def __init__(self, backbone, target_size=300):
super(DeViSE, self).__init__()
self.backbone = backbone
self.head = nn.Sequential(
nn.Linear(in_features=(25088), out_features=target_size * 2),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(in_features=target_size * 2, out_features=target_size),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(in_features=target_size, out_features=target_size),
)
def forward(self, x):
x = self.backbone(x)
x = x.view(x.size(0), -1)
x = self.head(x)
x = x / x.max()
return x
devise_model = DeViSE(backbone).to(device)
# # train loop
# +
losses = []
def train(model, train_loader, loss_function, optimiser, n_epochs):
for epoch in range(n_epochs):
model.train()
loop = tqdm(train_loader)
for images, targets in loop:
images = images.cuda(non_blocking=True)
targets = targets.cuda(non_blocking=True)
flags = torch.ones(len(targets)).cuda(non_blocking=True)
optimiser.zero_grad()
predictions = model(images)
loss = loss_function(predictions, targets, flags)
loss.backward()
optimiser.step()
loop.set_description("Epoch {}/{}".format(epoch + 1, n_epochs))
loop.set_postfix(loss=loss.item())
losses.append(loss.item())
# +
trainable_parameters = filter(lambda p: p.requires_grad, devise_model.parameters())
loss_function = nn.CosineEmbeddingLoss()
optimiser = optim.Adam(trainable_parameters, lr=0.001)
# -
train(
model=devise_model,
n_epochs=3,
train_loader=train_loader,
loss_function=loss_function,
optimiser=optimiser,
)
# +
loss_data = pd.Series(losses).rolling(window=15).mean()
ax = loss_data.plot()
ax.set_xlim(
0,
)
ax.set_ylim(0, 1);
# -
# # evaluate on test set
# +
preds = []
test_loss = []
devise_model.eval()
with torch.no_grad():
test_loop = tqdm(test_loader)
for images, targets in test_loop:
images = images.cuda(non_blocking=True)
targets = targets.cuda(non_blocking=True)
flags = torch.ones(len(targets)).cuda(non_blocking=True)
predictions = devise_model(images)
loss = loss_function(predictions, targets, flags)
preds.append(predictions.cpu().data.numpy())
test_loss.append(loss.item())
test_loop.set_description("Test set")
test_loop.set_postfix(loss=np.mean(test_loss[-5:]))
# -
preds = np.concatenate(preds).reshape(-1, 300)
np.mean(test_loss)
# # run a search on the predictions
preds.shape
def search(query, n=5):
image_paths = test_df["path"].values
distances = cdist(word_vectors[query].reshape(1, -1), preds)
closest_n_paths = image_paths[np.argsort(distances)].squeeze()[:n]
close_images = [
np.array(Image.open(image_path).convert("RGB").resize((224, 224)))
for image_path in closest_n_paths
]
return Image.fromarray(np.concatenate(close_images, axis=1))
search("bridge")
# again, this works! We're getting somewhere now, and making significant changes to the established theory set out in the original DeViSE paper.
| notebooks/devise/notebooks/03 - broadening the scope of our classes.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Convergence Plots
# The Convergence Plots consist of two Plotly FigureWidget Subplots, the `plasma_plot` and the `t_inner_luminosities_plot`. The plots can be displayed by setting the `show_convergence_plots` option in the `run_tardis` function to `True`. The plots are stored in the `convergence_plots` attribute of the simulation object `sim` and can be accessed using `sim.convergence_plots.plasma_plot` and `sim.convergence_plots.t_inner_luminosities_plot`.
# <div class="alert alert-info">
#
# Note
#
# You only need to include `export_convergence_plots=True` in the `run_tardis` function when you want to share the notebook. The function shows the plot using the Plotly `notebook_connected` renderer, which helps display the plot online. You don't need to do it when running the notebook locally.
#
# </div>
# +
from tardis import run_tardis
from tardis.io.atom_data.util import download_atom_data
# We download the atomic data needed to run the simulation
download_atom_data('kurucz_cd23_chianti_H_He')
# We run a simulation
sim = run_tardis('tardis_example.yml', show_convergence_plots=True, export_convergence_plots=True)
# -
# ## Displaying Convergence Plots
# You can also call the plots outside of `run_tardis` function.
sim.convergence_plots.plasma_plot.show(renderer="notebook_connected")
sim.convergence_plots.t_inner_luminosities_plot.show(renderer="notebook_connected")
# ## Changing Line Colors
# The default line-colors of the plasma plots can be changed by passing the name of the cmap in the `plasma_cmap` option.
#
# ```py
# sim = run_tardis("tardis_example.yml", show_convergence_plots=True, plasma_cmap="viridis")
# ```
#
# Alongwith the cmap name, one can also provide a list of colors in rgb, hex or css-names format in the `t_inner_luminosities_colors` option to change the default colors of the luminosity and inner boundary temperature plots.
# ```py
# # hex colors example list
# colors = [
# '#8c564b', # chestnut brown
# '#e377c2', # raspberry yogurt pink
# '#7f7f7f', # middle gray
# '#bcbd22', # curry yellow-green
# '#17becf' # blue-teal
# ]
#
# # rgb colors example list
# colors = ['rgb(31, 119, 180)',
# 'rgb(255, 127, 14)',
# 'rgb(44, 160, 44)',
# 'rgb(214, 39, 40)',
# 'rgb(148, 103, 189)',]
#
# # css colors
# colors = ["indigo","lightseagreen", "midnightblue", "pink", "teal"]
# ```
# For more css-names please see [this](https://www.w3schools.com/colors/colors_names.asp).
sim = run_tardis(
"tardis_example.yml",
show_convergence_plots=True,
plasma_cmap= "viridis",
t_inner_luminosities_colors = ['rgb(102, 197, 204)',
'rgb(246, 207, 113)',
'rgb(248, 156, 116)',
'rgb(220, 176, 242)',
'rgb(135, 197, 95)'],
export_convergence_plots = True
)
# ## Changing the default layout
# You can override the default layout by passing dictionaries as arguments in `t_inner_luminosities_config` and `plasma_plot_config` in the `run_tardis` function. The dictionaries should have the format of `plotly.graph_objects.FigureWidget().to_dict()`. For more information on the structure of the dictionary, please see the [plotly documentation](https://plotly.com/python/figure-structure/).
#
# For sake of simplicity, all properties in the data dictionary are applied equally across all traces, meaning traces-specific properties can't be changed from the function. They however be changed after the simulation has finished, for example:
# ```py
# sim.convergence_plots.t_inner_luminosities_plot.data[0].line.dash = "dashdot"
# ```
#
# You can investigate more about the layout/data of any plots by calling `sim.convergence_plots.t_inner_luminosities_plot.layout` or `sim.convergence_plots.t_inner_luminosities_plot.data`.
#
# Here is an example:
sim = run_tardis(
"tardis_example.yml",
show_convergence_plots=True,
plasma_plot_config={
"layout": {
"template": "ggplot2",
"xaxis1": {
"nticks": 20
},
"xaxis2": {
"title": {"text": "new changed title of x axis2"},
"nticks": 20
},
},
},
t_inner_luminosities_config={
"data": {
"line":{
"dash":"dot"
},
"mode": "lines+markers",
},
"layout": {
"template": "plotly_dark",
"hovermode":"x",
"xaxis":{"showgrid":False},
"xaxis2":{"showgrid":False},
"xaxis3":{"showgrid":False},
},
},
export_convergence_plots = True)
from tardis.visualization import ConvergencePlots
help(ConvergencePlots)
| docs/io/visualization/convergence_plot.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Python notebooks
# * interactive
# * contain code and presentation
# * facilitate collaboration
# * easy to write and test code
# * provide quick results
# * easy to display graphs
# #### Start coding
"""
MyNewShinyDataType - A class for demonstration purposes.
The class has 2 attributes:
- attribute1 - text (str)
- attribute2 - numeric (int or float)
The class allows for the update of the numeric attribute.
- method1 updates attribute2
"""
class MyNewShinyDataType:
def __init__(self, parameter1 = "default value", parameter2 = 0):
self.attribute1 = parameter1
self.attribute2 = parameter2
def __str__(self):
return f"MyNewShinyDataType object: attribute1 = '{self.attribute1}', attribute2 = {self.attribute2}"
def __repr__(self):
return f"MyNewShinyDataType('{self.attribute1}',{self.attribute2})"
def method1(self, parameter1 = 0):
"""
Add parameter value to attribute2.
Keyword arguments:
numeric: parameter1 - the number to add (0)
Returns:
str: updated attribute2
"""
old_value = self.attribute2
try:
self.attribute2 = self.attribute2 + parameter1
except TypeError:
self.attribute2 = self.attribute2 + 2
print(f"'{parameter1}' is not a numeric value, we added 2 instead")
finally:
print(f"Old value was {old_value}, new value is {self.attribute2}")
return self.attribute2
# #### Do more coding
# +
"""
EnhancedNewShinyDataType - A class for demonstration purposes.
The class extends the MyNewShinyDataType:
- method2 - updates attribute1
- method3 - a len-based update of attribute2
"""
class EnhancedNewShinyDataType(MyNewShinyDataType):
def method2(self, parameter1 = ""):
"""
Add parameter text to attribute1.
Keyword arguments:
str: parameter1 - the string to add ("")
Returns:
str: updated attribute1
"""
old_value = self.attribute1
try:
self.attribute1 = self.attribute1 + " " + parameter1
index = self.attribute1.index("test")
except TypeError:
self.attribute1 = self.attribute1 + " " + str(parameter1)
print(f"'{parameter1}' is not a string, we made the conversion and added it")
except ValueError:
self.attribute1 = self.attribute1 + " test"
print(f"'{self.attribute1}' does not contain 'test', we added 'test' to it")
finally:
print(f"Old value was '{old_value}', new value is '{self.attribute1}'")
return self.attribute1
def method3(self, parameter1 = ""):
"""
Add parameter length to attribute2.
"""
pass # implement this method
# -
# ### From exploration work to production
# ### Python scripts
# !touch test.py
# !echo '#!/usr/bin/env python' > test.py
# !echo 'print("This is a python script")' >> test.py
# !chmod u+x test.py
import test
# !python test.py
# !./test.py
# #### Adding a function
def test_function():
print("This is a function in a python script")
test_function()
import test as t
dir(t)
t.test_function()
# add a test variable, restart kernel, import
import test as t
dir(t)
t.test_variable
# ### __main__ — Top-level script environment
#
# '__main__' is the name of the scope in which top-level code executes. A module’s __name__ is set equal to '__main__' when read from standard input, a script, or from an interactive prompt.
#
# A module can discover whether or not it is running in the main scope by checking its own __name__, which allows a common idiom for conditionally executing code in a module when it is run as a script or with python -m but not when it is imported.
#
# ```python
# if __name__ == "__main__":
# # execute only if run as a script
# main() # function that contais the code to execute
# ```
#
# https://docs.python.org/3/library/__main__.html
list.__name__
def main():
test_variable = 10
print(f'The test variable value is {test_variable}')
main()
# add main, restart kernel, import
import test as t
dir(t)
# !python test.py
# #### `sys.argv`
#
# The list of command line arguments passed to a Python script. argv[0] is the script name (it is operating system dependent whether this is a full pathname or not). <br>
# If the command was executed using the -c command line option to the interpreter, argv[0] is set to the string '-c'. <br>
# If no script name was passed to the Python interpreter, argv[0] is the empty string.
# The Python sys module provides access to any command-line arguments using the sys.argv object.
#
# The sys.argv is the list of all the command-line arguments.<br>
# len(sys.argv) is the total number of length of command-line arguments.
# Add to the script
#
# ```python
# import sys
#
# print('Number of arguments:', len(sys.argv))
# print ('Argument List:', str(sys.argv))
# ```
# !./test.py
# #### Give some arguments
# !./test.py [1,2,4] message 1
# ```import numpy as np```
import numpy as np
np.array("[1, 2 , 3]".strip('][').split(','), dtype = int)
#
# #### Argument parsing
# `import getopt`
#
# `opts, args = getopt.getopt(argv, 'a:b:', ['foperand', 'soperand'])`
#
# The signature of the getopt() method looks like:
#
# `getopt.getopt(args, shortopts, longopts=[])`
#
# * `args` is the list of arguments taken from the command-line.
# * `shortopts` is where you specify the option letters. If you supply a:, then it means that your script should be supplied with the option a followed by a value as its argument. Technically, you can use any number of options here. When you pass these options from the command-line, they must be prepended with '-'.
# * `longopts` is where you can specify the extended versions of the shortopts. They must be prepended with '--'.
#
# https://www.datacamp.com/community/tutorials/argument-parsing-in-python
# https://docs.python.org/2/library/getopt.html
# https://www.tutorialspoint.com/python/python_command_line_arguments.htm
# ```python
# try:
# # Define the getopt parameters
# opts, args = getopt.getopt(sys.argv[1:], 'l:s:n:', ['list','string',"number"])
# print(len(opts))
# if len(opts) != 3:
# print ('usage: test.py -l <list_operand> -s <string_operand> -n <number_operand>')
# else:
# print(opts)
# test_array = np.array(opts[0][1].strip('][').split(','), dtype = int)
# string_text = opts[1][1]
# number_text = int(opts[2][1])
# test_array = test_array * number_text
# print(f'Info {string_text}, for updated list {test_array}')
# except getopt.GetoptError:
# print ('usage: test.py -l <list_operand> -s <string_operand> -n <number_operand>')
# ```
# !./test.py -l [1,2,4] -s message
#
# #### `argparse` -increased readability
# `import argparse`
#
# `class argparse.ArgumentParser(prog=None, usage=None, description=None, epilog=None, parents=[], formatter_class=argparse.HelpFormatter, prefix_chars='-', fromfile_prefix_chars=None, argument_default=None, conflict_handler='error', add_help=True, allow_abbrev=True)`<br>
# https://docs.python.org/3/library/argparse.html#argumentparser-objects
#
# Argument definition<br>
# `ArgumentParser.add_argument(name or flags...[, action][, nargs][, const][, default][, type][, choices][, required][, help][, metavar][, dest])`<br>
# https://docs.python.org/3/library/argparse.html#the-add-argument-method
#
# `ap.add_argument("-i", "--ioperand", required=True, help="important operand")`
#
# * -i - letter version of the argument
# * --ioperand - extended version of the argument
# * required - whether the argument or not
# * help - maningful description
#
# https://www.datacamp.com/community/tutorials/argument-parsing-in-python
# https://docs.python.org/3/library/argparse.html
# https://realpython.com/command-line-interfaces-python-argparse/
# ```python
# ap = argparse.ArgumentParser()
#
# # Add the arguments to the parser
# ap.add_argument("-l", "--list_operand", required=True, help="list operand")
# ap.add_argument("-s", "--string_operand", required=True, help="string operand")
# ap.add_argument("-n", "--number_operand", required=True, help="number operand")
#
# args = vars(ap.parse_args())
# print(args)
# test_array = np.array(args['list_operand'].strip('][').split(','), dtype = int)
# string_text = args['string_operand']
# number_text = int(args['number_operand'])
# test_array = test_array * number_text
#
# print(f'With argparse. Info {string_text}, for updated list {test_array}')
# ```
#
# !./test.py -h
# !./test.py -l [1,2,4] --string_operand message -n 3
#
# ##### `action` parameter - count example
# https://docs.python.org/3/library/argparse.html#action
# 'count' - This counts the number of times a keyword argument occurs. For example, this is useful for increasing verbosity levels:
#
# `ap.add_argument("-v", "--verbose", action='count', default=0)`
#
# !./test.py -l [1,2,4] --string_operand message -n 3 -vvv
# ### Modules
#
# https://docs.python.org/3/tutorial/modules.html
# https://www.python.org/dev/peps/pep-0008/#package-and-module-names
# If you want to write a somewhat longer program, you are better off <b>using a text editor to prepare the input for the interpreter and running it with that file as input instead. This is known as creating a script.</b>
#
# As your program gets longer, you may want to split it into several files for easier maintenance. You may also want to use a handy function that you’ve written in several programs without copying its definition into each program.
#
# A module is a file containing Python definitions and statements. <b>The file name is the module name with the suffix .py appended</b>. Within a module, the module’s name (as a string) is available as the value of the global variable `__name__`.
#Let's create a module for our classes
# !touch base_shiny_type.py
import base_shiny_type as bst
bst.MyNewShinyDataType()
# !touch enhanced_shiny_type.py
import enhanced_shiny_type as est
est.EnhancedNewShinyDataType()
# ### Packages
#
# https://docs.python.org/3/tutorial/modules.html#packages
# <b>Packages are a way of structuring</b> Python’s module namespace by using “dotted module names”. <b>For example, the module name A.B designates a submodule named B in a package named A</b>. Just like the use of modules saves the authors of different modules from having to worry about each other’s global variable names, the use of dotted module names saves the authors of multi-module packages like NumPy from having to worry about each other’s module names.
# !mkdir demoCM
# !cp test.py demoCM
# !cp base_shiny_type.py demoCM
# !cp enhanced_shiny_type.py demoCM
# !touch demoCM/__init__.py
from demoCM import test as tt
tt.test_function()
from demoCM import base_shiny_type as bst1
bst1.MyNewShinyDataType()
dir(bst)
from demoCM import enhanced_shiny_type as est1
# +
# restart kernel
#dir()
# -
from demoCM import *
# +
#dir()
# -
base_shiny_type
base_shiny_type.MyNewShinyDataType()
# https://towardsdatascience.com/5-advanced-features-of-python-and-how-to-use-them-73bffa373c84
# #### A <b>`lambda` function</b> is a small, anonymous function - it has no name
# https://docs.python.org/3/reference/expressions.html#lambda<br>
# https://www.geeksforgeeks.org/python-lambda-anonymous-functions-filter-map-reduce/<br>
# https://realpython.com/python-lambda/<br>
# `lambda arguments : expression`
# A lambda function can take <b>any number of arguments<b>, but must always have <b>only one expression</b>.
nameless_function = lambda x: x**3
nameless_function(4)
import numpy as np
import pandas as pd
test_series = pd.Series([1,2,3,4])
test_series
test_series.apply(lambda x: x**3)
test_series.apply(lambda x:True if x % 2 == 0 else False)
test_df = pd.DataFrame([[1,2,3,4],[5,6,7,8]])
test_df
Compute the
test_df.apply(lambda x:x[0]**2*x[1], axis = 0)
# #### Useful funtions
# https://docs.python.org/3/library/functions.html
# `zip` - make an iterator that aggregates elements from each of the iterables.
# https://docs.python.org/3/library/functions.html#zip
#
#
# `zip(*iterables)`
#
# Returns an iterator of tuples, where the i-th tuple contains the i-th element from each of the argument sequences or iterables. The iterator stops when the shortest input iterable is exhausted. With a single iterable argument, it returns an iterator of 1-tuples. With no arguments, it returns an empty iterator.
combined_res = zip([1,2,3],["A","B","C"],[True,False,True])
combined_res
list(combined_res)
dict(zip([1,2,3],["A","B","C"]))
# +
#try unequal sizes
# -
# unzip list
x, y = zip(*zip([1,2,3],[4,5,6]))
print(x,y)
x, y = zip(*[(1,4),(2,5),(3,6)])
print(x,y)
# `map` - apply funtion to every element of an iterable
# https://docs.python.org/3/library/functions.html#map
#
#
# `map(function, iterable, ...)`
#
# Return an iterator that applies function to every item of iterable, yielding the results. If additional iterable arguments are passed, function must take that many arguments and is applied to the items from all iterables in parallel. With multiple iterables, the iterator stops when the shortest iterable is exhausted.
map(abs,[-2,3,-5,6,-7])
for i in map(abs,[-2,3,-5,6,-7]):
print(i)
# https://www.geeksforgeeks.org/python-map-function/
# +
numbers1 = [1, 2, 3]
numbers2 = [4, 5, 6]
result = map(lambda x, y: x + y, numbers1, numbers2)
list(result)
# -
# Use a lambda funtion and the map function to compute a result from the followimg 3 lists.<br>
# If the elemnt in the third list is divisible by 3 return the sum of the elements from the fist two list, otherwise return the difference.
# +
numbers1 = [1, 2, 3, 4, 5, 6]
numbers2 = [7, 8, 9, 10, 11, 12]
numbers3 = [13, 14, 15, 16, 17, 18]
# -
# `filter` - apply funtion to every element of an iterable
# https://docs.python.org/3/library/functions.html#filter
#
# `filter(function, iterable)`
#
# Construct an iterator from those elements of iterable for which function returns true. iterable may be either a sequence, a container which supports iteration, or an iterator. If function is None, the identity function is assumed, that is, all elements of iterable that are false are removed.
test_list = [3,4,5,6,7]
result = filter(lambda x: x>4, test_list)
result
list(result)
# +
# Python Program to find all anagrams of str in
# a list of strings.
from collections import Counter
word_list = ["spear", "print", "spare", "practice", "parse"]
word = "pears"
# use anonymous function to filter anagrams of x.
# Please refer below article for details of reversed
# https://www.geeksforgeeks.org/anagram-checking-python-collections-counter/
result = list(filter(lambda x: (Counter(word) == Counter(x)), word_list))
# printing the result
print(result)
# -
# Return all the elemnts with a value divisible by 7 and a key that starts with A in the following dictionary.
# +
d = {"ACE": 21, "BAC":7, "AML":5, "ABL":14, "MAP":3}
# -
# `reduce` - apply funtion to every element of an iterable
# https://docs.python.org/3/library/functools.html#functools.reduce
#
# `functools.reduce(function, iterable[, initializer])`
#
# <b>Apply function of two arguments cumulatively to the items of iterable, from left to right, so as to reduce the iterable to a single value</b>. For example, reduce(lambda x, y: x+y, [1, 2, 3, 4, 5]) calculates ((((1+2)+3)+4)+5). The left argument, x, is the accumulated value and the right argument, y, is the update value from the iterable. If the optional initializer is present, it is placed before the items of the iterable in the calculation, and serves as a default when the iterable is empty. If initializer is not given and iterable contains only one item, the first item is returned.
from functools import reduce
reduce(lambda x,y: x+y, [47,11,42,13])
# <img src = https://www.python-course.eu/images/reduce_diagram.png width=300/>
#
# https://www.python-course.eu/lambda.php
# https://www.geeksforgeeks.org/reduce-in-python/
# https://www.tutorialsteacher.com/python/python-reduce-function
test_list = [1,2,3,4,5,6]
# compute factorial of n
n=5
reduce(lambda x,y: x*y, range(1,n+1))
# +
#intersection of multiple lists
#https://stackoverflow.com/questions/15995/useful-code-which-uses-reduce
test_list = [[1, 2, 3, 4, 5], [2, 3, 4, 5, 6], [3, 4, 5, 6, 7]]
result = reduce(set.intersection, map(set, test_list))
result
# -
| classes/advanced_scripting.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# [GeeksForGeeks](https://www.geeksforgeeks.org/file-objects-python/)<br>
# 'r' : Opens a file for reading only <br>
# 'r+': Opens a file for both reading and writing<br>
# 'w' : Opens a file for writing only<br>
# 'w+': Open a file for writing and reading.<br>
# 'a' : Opens a file for appending <br>
# 'a+': Opens a file for both appending and reading<br>
# +
# Here we used "w" letter in our argument, which indicates write and the plus sign that
# means it will create a file if it does not
# exist in library</br>
# -
open("data.txt", "r")
open("data.txt", "r+")
open("data.txt", "w")
open("data.txt", "w+")
open("data.txt", "a")
open("data.txt", "a+")
# **1. Exploring the file modes**
# +
# open file and print all the content
# -
data = open("data.txt", "r+")
text = data.read()
print(text)
data.close()
# +
# open file and write "This is Sixth line" into 'data2.txt'
# +
data = open("data2.txt", "a+")
# text = data.read()
# print(text)
data.write("\nThis is sixth line")
text = data.read()
print(text)
data.close()
# -
# **2. Printing in which type the file is opened**
# we can find the mode using mode() function
data = open("new_file.txt", "a+")
print(data.mode)
# **Exercise. Find the mode and perform the respective action**
#
# 1. if mode is read, read the entire file
# 2. if mode is write, write the "Hello World" into it
# 3. if mode is append, write the "Ths is next line" into it
#
# create file if doesn't exist
f = open("text.txt", "a+")
print(f.mode)
if f.mode == 'a+':
print(f.read())
elif f.mode == 'w+':
f.write("Hello World")
elif f.mode == 'a+':
f.write("This is next line")
else:
pass
| 16. Adv Python File Handling/03. File Different Modes and Object Attributes.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.7.0
# language: julia
# name: julia-1.7
# ---
# # Part 1
# +
# function main(args)
# return args
# end
# Load inline arguments for worker unit
# num_workers, this = main(ARGS)
num_workers = 4 #parse(Int64, num_workers)
this = 1 #parse(Int64, this)
#
# Important directories
simdir = "/remote/ceph2/group/gerda/data/mpik/gerda-simulations/gerda-gems-db/pss-dev"
simdir_old = "/remote/ceph/group/gerda/data/simulation/gerda-mage-sim"
mapping_file = simdir_old*"/UTILS/det-data/ged-mapping.json"
parameters_file = simdir_old*"/UTILS/det-data/ged-parameters.json"
config_dir = "/res/Impurity_Scan/config-dep/"
#
# Load packages and functions
include("../src/init.jl")
include("../src/fct.jl")
include("../src/worker_fct.jl")
#
# Output paths and filter settings
plots_base_path = "../../waveforms/sim/plots/raw/"
base_path_raw = "../../waveforms/sim/raw-test/"
base_path = "../../waveforms/sim/processed/"
n_sim_events = 5000
isTP = 0;
isBL = 0;
mult = 1;
sampling_time = 1u"ns"
hits_threshold = 0.005; # MeV
E = 400;
# -
parameters = JSON.parsefile("../dicts/electronics_parameters.json")
parameters[channel_to_name[24]]["par"]
# +
#
# Load workers log file
log_file = base_path_raw * "log-" * string(this) * ".json"
if !isdir(base_path_raw)
mkpath(base_path_raw)
end
if !isfile(log_file)
open(log_file, "w") do f
JSON.print(f, Dict(), 4)
end
global log = Dict()
else
global log = JSON.parsefile(log_file)
end;
#
# Collect simulation filepaths
current_dir = pwd()
cd(simdir)
filenames = glob("calib/*/*/*/*.root")
cd(current_dir)
filenames = get_share_for_worker(filenames, num_workers, this)
# -
data = nothing
for filename in filenames
E_lim = E
output_path = base_path_raw*basename(filename)*"/";
if !haskey(log, basename(filename))
Base.run(`clear`)
@info(string(findfirst(x->x == filename, filenames)) * " of " * string(length(filenames)) * " in progress!")
file = TFile(joinpath(simdir, filename))
tree = file["fTree"];
tt = Table(eventnumber = tree.eventnumber[:],
hits_iddet = tree.hits_iddet[:],
hits_edep = tree.hits_edep[:],
hits_xpos = tree.hits_xpos[:] .* 10, # mm
hits_ypos = tree.hits_ypos[:] .* 10, # mm
hits_zpos = tree.hits_zpos[:] .* 10) # mm
tt = tt |> @filter(length(_.hits_edep) != 0 && sum(_.hits_edep) >= E_lim/1000 && length(unique(_.hits_iddet)) == 1) |> Table;
data = Table(
evtno = [],
multiplicity = [],
detno = [],
hits_totnum = [],
edep = [],
pos = []
)
prog = Progress(size(tt,1), dt=0.5,
barglyphs=BarGlyphs('|','█', ['▁' ,'▂' ,'▃' ,'▄' ,'▅' ,'▆', '▇'],' ','|',),
barlen=10)
for i in eachindex(tt)
multi = 1
det = tt[i].hits_iddet[1]
parameters[sim_to_channel[det][2]]["upside_down"] == true ? upside_down = -1 : upside_down = 1
append!(data, Table(
evtno = [tt[i].eventnumber],
multiplicity = [multi],
detno = [Array{Int64,1}(zeros(length(tt[i].hits_iddet)) .+ sim_to_channel[det][1])],
hits_totnum = [length(tt[i].hits_edep)],
edep = [tt[i].hits_edep],
pos = [[ SVector{3}(([ tt[i].hits_xpos[k] .- parameters[sim_to_channel[det][2]]["detcenter_x"],
tt[i].hits_ypos[k] .- parameters[sim_to_channel[det][2]]["detcenter_y"],
upside_down .* (tt[i].hits_zpos[k] .- parameters[sim_to_channel[det][2]]["detcenter_z"] .+ upside_down * parameters[sim_to_channel[det][2]]["height"]/2)
] * u"mm")...) for k in eachindex(tt[i].hits_xpos) ]]
)
)
next!(prog)
end
tt = nothing
dets = []
for detno in data.detno
push!(dets, unique(detno)[1])
end
dets = unique(dets)
prog = Progress(length(dets), dt=0.5,
barglyphs=BarGlyphs('|','█', ['▁' ,'▂' ,'▃' ,'▄' ,'▅' ,'▆', '▇'],' ','|',),
barlen=10)
@info("Creating files for each detector")
for detno in dets
det = channel_to_name[detno]
t = data |> @filter(unique(_.detno)[1] == detno) |> Table;
!isdir(output_path) ? mkpath(output_path) : ""
HDF5.h5open(output_path * lpad(detno, 2, "0") * "-" * det * ".h5", "w") do h5f
LegendHDF5IO.writedata( h5f, "data", Table(
evtno = t.evtno,
event_mult = t.multiplicity,
detno = VectorOfArrays(t.detno),
hits_totnum = t.hits_totnum,
edep = VectorOfArrays(t.edep),
pos = VectorOfArrays(t.pos)))
end
next!(prog)
end
# Log the progress
log[basename(filename)] = "Done!"
open(log_file, "w") do f
JSON.print(f, log, 4)
end
end
end
# # Part 2
# +
# function main(args)
# return args
# end
# Load inline arguments for worker unit
# num_workers, this = main(ARGS)
num_workers = 3 #parse(Int64, num_workers)
this = 1 #parse(Int64, this)
#
# Important directories
simdir = "/remote/ceph2/group/gerda/data/mpik/gerda-simulations/gerda-gems-db/pss-dev"
simdir_old = "/remote/ceph/group/gerda/data/simulation/gerda-mage-sim"
mapping_file = simdir_old*"/UTILS/det-data/ged-mapping.json"
parameters_file = simdir_old*"/UTILS/det-data/ged-parameters.json"
config_dir = "../../../2020-02-06_8380701d_st_ffischer/res/Impurity_Scan/config-dep/"
#
# Load packages and functions
include("../src/init.jl")
include("../src/fct.jl")
include("../src/worker_fct.jl")
#
# Output paths and filter settings
plots_base_path = "../../waveforms/sim/plots/raw/"
base_path_raw = "../../waveforms/sim/raw/"
base_path = "../../waveforms/sim/raw_wf/"
n_sim_events = 10000;
sampling_time = 1u"ns";
generate_wf = false;
generate_cl_wf = true;
no_new_files = 0
channels = []
for c in 0:1:36
files = glob(base_path_raw * "raw*/" * lpad(c, 2, "0") * "-" * channel_to_name[c] * ".h5");
if length(files) > 0
push!(channels, c)
end
end
channels = get_share_for_worker(channels, num_workers, this)
# -
while no_new_files <= 3
for ch in channels
@info("Start Ch"*lpad(ch, 2, "0")*" | "*channel_to_name[ch])
@info(">---------------------------------------------------------------<")
files = glob(base_path_raw*"*/*-"*channel_to_name[ch]*".h5")
# Read the SSD simulation file of depleted detector
config_file = glob(config_dir * channel_to_name[ch] * "*.config")[1]
sim_output = glob(config_dir * "../output/"*channel_to_name[ch]*"*/" )[1]
@info("Load SSD simulation")
@time sim = @suppress readcreateh5(config_file, sim_output);
for file in files
output_dir = base_path * lpad(ch, 2, "0") * "-" * channel_to_name[ch] * "/cl-wf/"
output_dir *= split(file, "/")[end-1] * "/"
@time events = h5open(file, "r") do h5f
LegendHDF5IO.readdata(h5f, "data")
end;
last_file = glob(output_dir * "*.h5")
if length(last_file) == 0
existance_check = false
else
last_file = last_file[end]
last_event = parse(Int64,split(split(last_file,"-")[end], ".h5")[1])
existance_check = isfile(last_file) && last_event == size(events,1) && stat(last_file).size > 0
end
if existance_check
@info("This file has already been simluated")
else
no_new_files = 0
isdir(output_dir) ? "Directory exists" : output_dir
file_id = findall(x->x == file, files)[1]
if generate_wf
# IJulia.clear_output(true)
Base.run(`clear`)
@info("File $file_id | " * string(length(files)))
@info("Start simulating waveforms for ch" * lpad(ch, 2, "0") * " | " * channel_to_name[ch])
@info(">---------------------------------------------------------------<")
output_basename = lpad(ch, 2, "0") * "-" * channel_to_name[ch]*"-wf-"*split(file, "/")[end-1]
t_with_waveforms = @suppress SSD.simulate_waveforms(events, sim, output_dir, output_basename, chunk_n_physics_events = n_sim_events, Δt = sampling_time);
end
if generate_cl_wf
# IJulia.clear_output(true)
Base.run(`clear`)
@info("File $file_id | "*string(length(files)))
@info("Start simulating clustered waveforms for ch" * lpad(ch, 2, "0") * " | " * channel_to_name[ch])
@info(">---------------------------------------------------------------<")
@info("$(sum(length.(events.edep))) hits before clustering")
events_clustered = SSD.cluster_detector_hits(events, 0.2u"mm")
@info("$(sum(length.(events_clustered.edep))) hits after clustering")
output_basename = lpad(ch, 2, "0") * "-" * channel_to_name[ch] * "-cl-wf-"*split(file, "/")[end-1]
t_clustered_with_waveforms = SSD.simulate_waveforms(events_clustered, sim, output_dir, output_basename, chunk_n_physics_events = n_sim_events, Δt = sampling_time);
end
end
end
end
no_new_files += 1
sleep(300)
end
# # Part 3
# +
# function main(args)
# return args
# end
# Load inline arguments for worker unit
# num_workers, this = main(ARGS)
num_workers = 1 #parse(Int64, num_workers)
this = 1 #parse(Int64, this)
#
# Important directories
simdir = "/remote/ceph2/group/gerda/data/mpik/gerda-simulations/gerda-gems-db/pss-dev"
simdir_old = "/remote/ceph/group/gerda/data/simulation/gerda-mage-sim"
mapping_file = simdir_old*"/UTILS/det-data/ged-mapping.json"
parameters_file = simdir_old*"/UTILS/det-data/ged-parameters.json"
config_dir = "../../../2020-02-06_8380701d_st_ffischer/res/Impurity_Scan/config-dep/"
#
# Load packages and functions
include("../src/init.jl")
include("../src/fct.jl")
include("../src/worker_fct.jl")
#
# Output paths and filter settings
plots_base_path = "../../waveforms/sim/plots/raw/"
base_path_raw = "../../waveforms/sim/raw_wf/"
base_path = "../../waveforms/sim/raw_wf/"
sampling_time = 1u"ns";
no_new_files = 0
parameters = JSON.parsefile("../dicts/electronics_parameters.json")
channels = []
for c in 0:1:36
files = glob(base_path_raw * lpad(c, 2, "0") * "-" * channel_to_name[c] * "/cl-wf/raw*/*.h5");
if length(files) > 0
push!(channels, c)
end
end
channels = get_share_for_worker(channels, num_workers, this);
# -
for ch in [0]
Base.run(`clear`)
@info("Start Ch" * lpad(ch, 2, "0") * " | " * channel_to_name[ch])
@info(">---------------------------------------------------------------<")
files = glob(base_path_raw * lpad(ch, 2, "0") * "-" * channel_to_name[ch] * "/cl-wf/raw*/*.h5")
pro = Progress(length(files), dt=0.5,
barglyphs=BarGlyphs('|','█', ['▁' ,'▂' ,'▃' ,'▄' ,'▅' ,'▆', '▇'],' ','|',),
barlen=10)
for file in files
file_nr = findall(x->x==file, files)[1]
filename = split(file, basename(file))[1]
filename *= channel_to_name[ch] * "_w_filter" * split(basename(file), channel_to_name[ch])[2]
filename = split(filename, "/cl-wf/")[1] * "/w_filter/" * split(filename, "/cl-wf/")[2]
if !isfile(filename)
data_raw = HDF5.h5open(file, "r") do h5f
LegendHDF5IO.readdata(h5f, "generated_waveforms")
end
first_half = 1:Int(size(data_raw,1) / 2)
waveform = add_baseline_and_extend_tail.(data_raw.waveform[first_half], 6000, 12000)
#
# Reduce time resolution to fit data
waveform_w_bl = []
map(x-> push!(waveform_w_bl, RDWaveform(x.time[1:10:end], x.value[1:10:end])), waveform)
GBP = parameters[channel_to_name[ch]]["par"]["GBP"]*1e6
tau = parameters[channel_to_name[ch]]["par"]["tau"]*1e-6
Cd = parameters[channel_to_name[ch]]["par"]["Cd"]*1e-12
Cf = parameters[channel_to_name[ch]]["par"]["Cf"]*1e-12
data = Table(
energy = [],
multiplicity = [],
timestamp = [],
run = [],
channel = [],
waveform = []
)
for i in eachindex(waveform_w_bl)
pulse = waveform_w_bl[i].value
if sum(pulse) > 0
filtered_pulse = applyElectronics(pulse; Ts = 10e-9, GBP = GBP, tau = tau, Kv = 150e3, Cd = Cd, Cf = Cf, Rf = 500e6)
inter = find_intersect(filtered_pulse, maximum(filtered_pulse)/2, 5)
cut = filtered_pulse[(inter-399):1:(inter+400)]
time = (0:10:7990)u"ns"
cut_pulse = RDWaveform(time, cut)
append!(data, Table(
energy = [data_raw[i].edep],
multiplicity = [data_raw[i].event_mult],
timestamp = [data_raw[i].evtno],
run = [0],
channel = [unique(data_raw[i].detno)[1]],
waveform = [cut_pulse]
)
)
end
end
events = Table(
energy = VectorOfArrays(Array{typeof(data[1].energy),1}(data.energy)),
multiplicity = Int32.(data.multiplicity),
timestamp = Int32.(data.timestamp),
run = Int32.(data.run),
channel = Int32.(data.channel),
waveform = ArrayOfRDWaveforms( Array{typeof(data[1].waveform), 1}(data.waveform) )
)
!isdir(dirname(filename)) ? mkpath(dirname(filename)) : ""
HDF5.h5open(filename, "w") do h5f
LegendHDF5IO.writedata(h5f, "data", events)
end
end
next!(pro)
end
end
# # Part 4.1 - get valid range for baseline rms
# +
# function main(args)
# return args
# end
# Load inline arguments for worker unit
# num_workers, this = main(ARGS)
num_workers = 1 #parse(Int64, num_workers)
this = 1 #parse(Int64, this)
#
# Load packages and functions
include("../src/init.jl")
include("../src/fct.jl")
include("../src/worker_fct.jl")
function gauss(x, par)
scale = try par[1] catch; par[1][1] end
σ = try par[2] catch; par[2][1] end
μ = try par[3] catch; par[3][1] end
return @. scale * exp(-0.5 * ((x - μ)^2) / (σ^2)) / (sqrt(2 * π * σ^2))
end
#
# Output paths and filter settings
plots_base_path = "../../waveforms/sim/plots/"
base_path_raw = "../../waveforms/sim/raw_wf/"
base_path = "../../waveforms/sim/wf/"
plots_bl_path = "../../waveforms/baselines/plots/"
base_bl_path = "../../waveforms/baselines/wf/"
base_bl_path_filter = "../../waveforms/baselines/wf_filter/"
sampling_time = 1u"ns";
parameters = JSON.parsefile("../dicts/electronics_parameters.json")
channels = []
for c in 0:1:36
files = glob(base_path_raw * lpad(c, 2, "0") * "-" * channel_to_name[c] * "/w_filter/raw*/*.h5");
if length(files) > 0
push!(channels, c)
end
end
channels = get_share_for_worker(channels, num_workers, this);
# +
calib_filepath = "../dicts/calib.json"
calib = JSON.parsefile(calib_filepath);
datasets_str = [
"0053-0064",
"0065-0079",
"0080-0092",
"0093-0113"
]
det_lib = JSON.parsefile("../dicts/det_lib.json");
# +
for ch in 5:1:36
if channel_to_bege[ch] == false || ch in [5,6,7,13]
continue
end
ch_str = lpad(ch, 2, "0")
IJulia.clear_output(true)
@info("Start Ch" * ch_str * " | " * channel_to_name[ch])
@info(">---------------------------------------------------------------<")
bl_files = glob(base_bl_path * "*/" * ch_str* "-" * channel_to_name[ch] * "/*.h5");
filtered = []
for file in bl_files
run_str = split(basename(file), "-")[2]
run = parse(Int64, split(run_str, "run")[end])
ds_str = det_lib[channel_to_name[ch]]["run_str"]
if run in calib[channel_to_name[ch]]["data"][ds_str]["used_runs"]
push!(filtered, file)
end
end
bl_files = filtered
if length(bl_files) > 0
bl_data = Table(run = [], datasetID = [], waveform = [])
@showprogress 1 "Collecting baselines for Ch $ch ..." for file in bl_files
tmp_data = HDF5.h5open(file, "r") do h5f
LegendHDF5IO.readdata(h5f, "data")
end
if typeof(tmp_data.run) == Array{Int64,1}
# tmp_data = tmp_data |> @filter(_.run >= 80 && _.run <= 92) |> Table
if size(tmp_data,1) > 0
append!(bl_data, Table(run = tmp_data.run, datasetID = tmp_data.datasetID, waveform = tmp_data.waveform))
end
else
println(typeof(tmp_data.run))
end
end
bl_rms = []
map(x-> 1 < rms(x.value) < 15 ? push!(bl_rms, rms(x.value)) : nothing, bl_data.waveform);
hist = fit(Histogram, bl_rms, mean(bl_rms)-2*std(bl_rms):0.05:mean(bl_rms)+2*std(bl_rms))
guess = [
sum(hist.weights)/10,
0.2,
mean(bl_rms)
]
x = Array{Float64, 1}(midpoints(hist.edges[1]))
y = Array{Float64, 1}(hist.weights)
y_guess = gauss(x, guess)
p = plot(x, y, st=:step, label="Baseline RMS")
p = plot!(x, y_guess)
#
#
#
#
f = curve_fit(gauss, x, y, guess)
y_fit = gauss(x, f.param)
p = plot(hist, st=:step, label="Baseline RMS")
p = plot!(x, y_fit, label="LsqFit")
p = vline!([f.param[3] - 3 * f.param[2], f.param[3] + 3 * f.param[2]], label="3 Sigma")
p = plot!(size=(800,600), xlabel="Baseline RMS", ylabel="Samples", title=channel_to_name[ch] * " | Ch" * lpad(ch, 2, "0"))
display(p)
#
#
#
#
filename = joinpath(plots_bl_path, "RMS/Ch" * lpad(ch, 2, "0") * "-" * channel_to_name[ch] * ".png")
savefig(p, filename)
# return p
bl_dict_path = "../dicts/bl_rms_limits.json"
bl_dict = isfile(bl_dict_path) ? JSON.parsefile(bl_dict_path) : Dict()
!haskey(bl_dict, channel_to_name[ch]) ? bl_dict[channel_to_name[ch]] = Dict() : ""
bl_dict[channel_to_name[ch]]["low_rms_limit"] = f.param[3] - 3 * f.param[2]
bl_dict[channel_to_name[ch]]["high_rms_limit"] = f.param[3] + 3 * f.param[2]
bl_dict[channel_to_name[ch]]["method"] = "lsqfit"
bl_dict[channel_to_name[ch]]["fit_params"] = f.param
bl_dict[channel_to_name[ch]]["fit_params_err"] = margin_error(f)
bl_dict[channel_to_name[ch]]["bl_num"] = 0
for bl in bl_rms
if bl_dict[channel_to_name[ch]]["low_rms_limit"] < bl < bl_dict[channel_to_name[ch]]["high_rms_limit"]
bl_dict[channel_to_name[ch]]["bl_num"] += 1
end
end
open(bl_dict_path, "w") do f
JSON.print(f, bl_dict, 4)
end
end
end
# +
### Again -----------------------------------
# 16, 17
# -
# +
# check 12, 14!
# -
bl_dict_path = "../dicts/bl_rms_limits.json"
bl_dict = JSON.parsefile(bl_dict_path);
channels = []
m = []
e = []
for ch in 0:1:36
if channel_to_bege[ch]
push!(channels, ch)
push!(m, bl_dict[channel_to_name[ch]]["fit_params"][3])
push!(e, bl_dict[channel_to_name[ch]]["fit_params"][2])
# push!(e, bl_dict[channel_to_name[ch]]["fit_params_err"][3])
end
end
p = scatter(channels, m, err=e, label="")
p = plot!(framestyle=:box, xlabel="Channel Number", ylabel="Mean Baseline RMS", xticks=0:2:36)
p = plot!([7.5,10.5], [2, 2], ribbon=[-10000,5], label="Coax", color=:gray)
p = plot!([26.5,29.5], [2, 2], ribbon=[-10000,5], label="", color=:gray)
p = plot!([35.5,36.5], [2, 2], ribbon=[-10000,5], label="", color=:gray, ylim=(3,9.5))
savefig(p, "mean-baseline-rms.pdf")
p
# # Part 4.2 - add baselines to waveforms
# +
# function main(args)
# return args
# end
# Load inline arguments for worker unit
# num_workers, this = main(ARGS)
num_workers = 1 #parse(Int64, num_workers)
this = 1 #parse(Int64, this)
#
# Load packages and functions
include("../src/init.jl")
include("../src/fct.jl")
include("../src/worker_fct.jl")
calib_filepath = "../dicts/calib.json"
calib = JSON.parsefile(calib_filepath);
datasets_str = [
"0053-0064",
"0065-0079",
"0080-0092",
"0093-0113"
]
det_lib = JSON.parsefile("../dicts/det_lib.json");
bl_dict = JSON.parsefile("../dicts/bl_rms_limits.json");
parameters = JSON.parsefile("../dicts/electronics_parameters.json")
#
# Output paths and filter settings
plots_base_path = "../../waveforms/sim/plots/"
base_path_raw = "../../../2020-02-06_8380701d_st_ffischer/pulses/sim/raw_2vbb/"
base_path = "../../waveforms/sim/2vbb_check/"
plots_bl_path = "../../waveforms/baselines/plots/"
base_bl_path = "../../waveforms/baselines/wf/"
sampling_time = 1u"ns";
channels = []
for ch in 1:1:36
if channel_to_bege[ch] == false || ch in [5,6,7,13]
continue
end
push!(channels, ch)
end
channels = get_share_for_worker(channels, num_workers, this);
# +
ch = 0
# for ch in channels
ds_str = det_lib[channel_to_name[ch]]["run_str"]
str_ch = lpad(ch, 2, "0");
@info("Start Ch$str_ch | " * channel_to_name[ch])
@info(">---------------------------------------------------------------<")
@info("Read in baselines")
bl_files = glob(joinpath(base_bl_path, "*/" * str_ch*"-"*channel_to_name[ch]*"/*.h5"))
filtered = []
for file in bl_files
run = parse(Int64, split(split(basename(file), "-")[2], "run")[end])
if run in calib[channel_to_name[ch]]["data"][ds_str]["used_runs"]
push!(filtered, file)
end
end
bl_files = filtered
bl = Table(run = [], datasetID = [], waveform = [])
@showprogress 1 "Filtering baselines for Ch$str_ch ..." for file in bl_files
tmp_data = HDF5.h5open(file, "r") do h5f
LegendHDF5IO.readdata(h5f, "data")
end
tmp_data = tmp_data |> @filter(bl_dict[channel_to_name[ch]]["low_rms_limit"] < rms(_.waveform.value) < bl_dict[channel_to_name[ch]]["high_rms_limit"]) |> Table
if size(tmp_data,1) > 0
append!(bl, Table(run = tmp_data.run, datasetID = tmp_data.datasetID, waveform = tmp_data.waveform))
end
end
bl = bl |> @filter(bl_dict[channel_to_name[ch]]["low_rms_limit"] < rms(_.waveform.value) < bl_dict[channel_to_name[ch]]["high_rms_limit"]) |> Table
@info("Number of baselines: " * string(size(bl,1)))
# -
@info(">---------------------------------------------------------------<")
files = glob(joinpath(base_path_raw, string(ch) * "-" * channel_to_name[ch] * "/*w_filter-cl-wf*.h5"));
number_of_pulses = 0;
key_str = []
for file in files
push!(key_str, split(basename(file), "_evts_")[1])
end
key_str = unique(key_str)
for k in key_str
IJulia.clear_output(true)
last_file = basename(glob(joinpath(base_path_raw, string(ch) * "-" * channel_to_name[ch] * "/" * string(k) * "*.h5"))[end])
number_of_pulses += parse(Int64, split(split(split(last_file, "_evts_")[end], "-")[end], ".h5")[1])
end
@info("Found number of simulation pulses: " * string(number_of_pulses))
@info("Found number of noise baselines: " * string(size(bl,1)))
# get first_range
if number_of_pulses <= size(bl,1)
first_range = [1]
else
mult_factor = ceil(number_of_pulses/size(bl,1))
first = 1
last = 201
step = Int(round((last-first)/(mult_factor)))
first_range = first:step:last
end
@info("This results in " * string( size(bl, 1) * length(first_range) ) * " baselines for " * string(number_of_pulses) * " pulses")
@info(">---------------------------------------------------------------<")
@info("Indexing the baselines")
# indices = Table(bl=[], first=[])
# for i in 0:1:number_of_pulses
# bl_id = i%size(bl,1) + 1
# first = first_range[Int(ceil((i + 1) / size(bl,1)))]
# append!(indices, Table(bl=[bl_id], first=[first]))
# end
indices = []
for i in 0:1:(size(bl, 1) * length(first_range)-1)
bl_id = i%size(bl,1) + 1
first = first_range[Int(ceil((i+1) / size(bl,1)))]
push!(indices, [bl_id, first])
end
indices = shuffle(indices)
@info("Add baselines to pulses")
pulse_id = 1
@showprogress "Adding baselines..." for file in files[1:2]
create_file = false
ch_str = lpad(ch, 2, "0");
filename = joinpath(joinpath(base_path, str_ch * "-" * channel_to_name[ch]), basename(file))
!isdir(dirname(filename)) ? mkpath(dirname(filename)) : ""
if isfile(filename)
if stat(filename).size <= stat(file).size/2
create_file = true
end
else
create_file = true
end
if true#create_file
data_w_bl = Table(energy=[], multiplicity=[], timestamp=[], run=[], channel=[], waveform=[], evtno=[], pos=[])
file_no_filter = joinpath(dirname(file), split(basename(file), "w_filter")[1] * "w_bl" * split(basename(file), "w_filter")[end])
data = HDF5.h5open(file, "r") do h5f
LegendHDF5IO.readdata(h5f, "data")
end
data_no_filter = HDF5.h5open(file_no_filter, "r") do h5f
LegendHDF5IO.readdata(h5f, "data")
end
for i in eachindex(data)
event = data[i]
time = event.waveform.time
pulse = event.waveform.value ./ get_avg_maximum(event.waveform.value, 10)
pulse .*= sum(event.energy).val #* 1000 # Energy is stored in MeV
return plot(pulse)
bl_id = indices[pulse_id][1]
first = indices[pulse_id][2]
last = first + length(pulse) - 1
pulse .+= bl[bl_id].waveform.value[first:1:last]
# return plot(RDWaveform(time, pulse))
append!(data_w_bl, Table(energy=[event.energy],
multiplicity=[event.multiplicity],
timestamp=[event.timestamp],
run=[event.run],
channel=[event.channel],
waveform=[RDWaveform(time, pulse)],
evtno=[data_no_filter[i].evtno],
pos=[data_no_filter[i].pos])
);
pulse_id += 1
end
HDF5.h5open(filename, "w") do h5f
LegendHDF5IO.writedata(h5f, "data", Table(energy = VectorOfArrays(Array{typeof(data_w_bl[1].energy),1}(data_w_bl.energy)),
multiplicity = Int32.(data_w_bl.multiplicity),
timestamp = Int32.(data_w_bl.timestamp),
run = Int32.(data_w_bl.run),
channel = Int32.(data_w_bl.channel),
waveform = ArrayOfRDWaveforms( Array{typeof(data_w_bl[1].waveform), 1}(data_w_bl.waveform) ),
evtno = Int32.(data_w_bl.evtno),
pos = VectorOfArrays(Array{typeof(data_w_bl[1].pos), 1}(data_w_bl.pos))))
end
else
pulse_id += size(HDF5.h5open(file, "r") do h5f
LegendHDF5IO.readdata(h5f, "data")
end, 1)
end
end
pulse_id
indices = Table(bl=[], first=[])
for i in 0:1:number_of_pulses
bl_id = i%size(bl,1) + 1
first = first_range[Int(ceil((i + 1) / size(bl,1)))]
append!(indices, Table(bl=[bl_id], first=[first]))
end
# # Calc A & E
# +
base_path = "../../waveforms/sim/2vbb_check/"
output_path = "../../waveforms/sim/2vbb_AoE/"
include("../src/fct.jl")
include("../src/fitting-fct.jl")
set = "sim"
calib_filepath = "../dicts/calib.json"
calib = JSON.parsefile(calib_filepath)
cut_lib_filepath = "../dicts/cut_lib.json"
cut_lib = JSON.parsefile(cut_lib_filepath)
AE_cal_filepath = "../dicts/AE_cal.json"
AE_cal = JSON.parsefile(AE_cal_filepath)
sf_lib_filepath = "../dicts/sf_lib.json"
sf_lib = JSON.parsefile(sf_lib_filepath);
# -
for ch in [0]#0:1:36
if ch in [5,6,7,13] || channel_to_bege[ch] == false
continue
end
ch_str = lpad(ch, 2, "0")
IJulia.clear_output(true)
@info("Start with Ch$ch_str")
files = glob(joinpath(base_path, ch_str * "-" * channel_to_name[ch] * "/*.h5"));
numofele = 5
BackDelta5 = div(numofele,2)
ForwardDelta5 = isodd(numofele) ? div(numofele,2) : div(numofele,2) - 1
numofele = 201
BackDelta201 = div(numofele,2)
ForwardDelta201 = isodd(numofele) ? div(numofele,2) : div(numofele,2) - 1
data = Table(A=[], E=[], E_unsmeared=[], energy=[], evtno=[], pos=[])
@showprogress "Calculating A & E " for file in files
tmp = HDF5.h5open(file, "r") do h5f
LegendHDF5IO.readdata(h5f, "data")
end
A = []
E = []
for wf in tmp.waveform
push!(A, maximum(movingaverage(diff(wf.value),5,BackDelta5,ForwardDelta5,3)))
push!(E, maximum(movingaverage(diff(wf.value),201,BackDelta201,ForwardDelta201,13)))
end
E_cal = linmodel(E, calib[channel_to_name[ch]][set]["lin_cal"][1])
energy = []
for e in sum.(tmp.energy)
push!(energy, e.val)
end
append!(data, Table(A=A, E=E, E_unsmeared=E_cal, energy=energy, evtno=tmp.evtno, pos=tmp.pos))
end
smearing_dict = JSON.parsefile("../dicts/smearing.json")["Gauss"]
p0 = [smearing_dict[string(ch)]["params"]["0"], smearing_dict[string(ch)]["params"]["1"], smearing_dict[string(ch)]["params"]["2"]]
E = []
@showprogress "Smearing energy " for e in data.E_unsmeared
d = Normal(0, sqrt_fct(e,p0))
push!(E, e + rand(d))
end
AoE = data.A ./ E
AoE ./= calib[channel_to_name[ch]][set]["AE_norm"]
AoE ./= linmodel(E, AE_cal[channel_to_name[ch]][set]["lin_fit"])
AoE ./= AE_cal[channel_to_name[ch]][set]["DEP_fit"][1]["μ"]
AoE .-= 1
AoE ./= hypmodel(E, AE_cal[channel_to_name[ch]][set]["sig_fit"])
filename = joinpath(output_path, ch_str * "-" * channel_to_name[ch] * "-AE_smeared.h5")
!isdir(dirname(filename)) ? mkpath(dirname(filename)) : ""
HDF5.h5open(filename, "w") do h5f
LegendHDF5IO.writedata(h5f, "data", Table(A = Array{Float64,1}(data.A),
E = Array{Float64,1}(E),
AoE = Array{Float64,1}(AoE),
E_unsmeared = Array{Float64,1}(data.E_unsmeared),
energy = Array{Float64,1}(data.energy),
evtno = Array{Int64}(data.evtno),
pos = VectorOfArrays(Array{typeof(data.pos[1])}(data.pos))
)
)
end
end
| calib/SimPrep.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Quiz 1
x=10
if(x>5 and x<9):
print('hi')
x+=1
elif(x>=10 and x<15):
print('hello')
x+=1
elif(x>9 and x<=20):
print('python')
x+=1
print(x)
x='Edureka'
if(type(x) is str):
x=x+' '+"rocks" # Line 3
print('hello')
else:
x=x+' '+'Python'
break # Line 7
print('hi')
print(x)
for i in range(1,3):
if(i%2==0):
for j in range(i,2):
print('hi')
else:
print('hello')
else:
for j in range(i,3):
pass
else:
print(i)
| Module_01/Untitled.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.6 - AzureML
# language: python
# name: python3-azureml
# ---
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# Licensed under the MIT License.
# # Deploy an image classification model in Azure Container Instance (ACI)
#
# This tutorial is **part two of a two-part tutorial series**. In the [previous tutorial](img-classification-part1-training.ipynb), you trained machine learning models and then registered a model in your workspace on the cloud.
#
# Now, you're ready to deploy the model as a web service in [Azure Container Instances](https://docs.microsoft.com/azure/container-instances/) (ACI). A web service is an image, in this case a Docker image, that encapsulates the scoring logic and the model itself.
#
# ACI is a great solution for testing and understanding the workflow. For scalable production deployments, consider using Azure Kubernetes Service. For more information, see [how to deploy and where](https://docs.microsoft.com/azure/machine-learning/service/how-to-deploy-and-where).
# ## Set up the environment
# + tags=["check version"]
# %matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import azureml.core
# display the core SDK version number
print("Azure ML SDK Version: ", azureml.core.VERSION)
# -
from azureml.core import Workspace
ws = Workspace.from_config()
print(ws.name, ws.location, ws.resource_group, sep='\t')
# ## Register a local model
# + tags=["register model from file"]
from azureml.core.model import Model
model = Model.register(model_path="sklearn_mnist_model.pkl",
model_name="sklearn_mnist_local",
tags={"data": "mnist", "model": "classification"},
description="Mnist handwriting recognition",
workspace=ws)
# -
# ## Retrieve the trained model from your Machine Learning Workspace
#
# You registered a model in your workspace in the previous tutorial. Now, load this workspace and download the model to your local directory.
# + tags=["load workspace", "download model"]
import os
from azureml.core.model import Model
model=Model(ws, 'sklearn_mnist')
model.download(target_dir=os.getcwd(), exist_ok=True)
# verify the downloaded model file
file_path = os.path.join(os.getcwd(), "sklearn_mnist_model.pkl")
os.stat(file_path)
# -
print(model.name, model.description, model.version, sep = '\t')
# ## Deploy as web service
#
# Once you've tested the model and are satisfied with the results, deploy the model as a web service hosted in ACI.
#
# To build the correct environment for ACI, provide the following:
# * A scoring script to show how to use the model
# * An environment file to show what packages need to be installed
# * A configuration file to build the ACI
# * The model you trained before
#
# ### Create scoring script
#
# Create the scoring script, called score.py, used by the web service call to show how to use the model.
#
# You must include two required functions into the scoring script:
# * The `init()` function, which typically loads the model into a global object. This function is run only once when the Docker container is started.
#
# * The `run(input_data)` function uses the model to predict a value based on the input data. Inputs and outputs to the run typically use JSON for serialization and de-serialization, but other formats are supported.
#
# +
# %%writefile score.py
import json
import numpy as np
import os
import pickle
from sklearn.externals import joblib
from sklearn.linear_model import LogisticRegression
from azureml.core.model import Model
def init():
global model
# retrieve the path to the model file using the model name
model_path = Model.get_model_path('sklearn_mnist')
model = joblib.load(model_path)
def run(raw_data):
data = np.array(json.loads(raw_data)['data'])
# make prediction
y_hat = model.predict(data)
# you can return any data type as long as it is JSON-serializable
return y_hat.tolist()
# -
# ### Create environment file
#
# Next, create an environment file, called myenv.yml, that specifies all of the script's package dependencies. This file is used to ensure that all of those dependencies are installed in the Docker image. This model needs `scikit-learn` and `azureml-sdk`.
# + tags=["set conda dependencies"]
from azureml.core.conda_dependencies import CondaDependencies
myenv = CondaDependencies()
myenv.add_conda_package("scikit-learn")
with open("myenv.yml","w") as f:
f.write(myenv.serialize_to_string())
# -
# Review the content of the `myenv.yml` file.
with open("myenv.yml","r") as f:
print(f.read())
# ### Create configuration file
#
# Create a deployment configuration file and specify the number of CPUs and gigabyte of RAM needed for your ACI container. While it depends on your model, the default of 1 core and 1 gigabyte of RAM is usually sufficient for many models. If you feel you need more later, you would have to recreate the image and redeploy the service.
# + tags=["configure web service", "aci"]
from azureml.core.webservice import AciWebservice
aciconfig = AciWebservice.deploy_configuration(cpu_cores=1,
memory_gb=1,
tags={"data": "MNIST", "method" : "sklearn"},
description='Predict MNIST with sklearn')
# -
# ### Deploy in ACI
# Estimated time to complete: **about 7-8 minutes**
#
# Configure the image and deploy. The following code goes through these steps:
#
# 1. Build an image using:
# * The scoring file (`score.py`)
# * The environment file (`myenv.yml`)
# * The model file
# 1. Register that image under the workspace.
# 1. Send the image to the ACI container.
# 1. Start up a container in ACI using the image.
# 1. Get the web service HTTP endpoint.
# + tags=["configure image", "create image", "deploy web service", "aci"]
# %%time
from azureml.core.webservice import Webservice
from azureml.core.model import InferenceConfig
inference_config = InferenceConfig(runtime= "python",
entry_script="score.py",
conda_file="myenv.yml")
service = Model.deploy(workspace=ws,
name='sklearn-mnist-svc',
models=[model],
inference_config=inference_config,
deployment_config=aciconfig)
service.wait_for_deployment(show_output=True)
# -
# Get the scoring web service's HTTP endpoint, which accepts REST client calls. This endpoint can be shared with anyone who wants to test the web service or integrate it into an application.
# + tags=["get scoring uri"]
print(service.scoring_uri)
# -
# ## Test deployed service
#
# Earlier you scored all the test data with the local version of the model. Now, you can test the deployed model with a random sample of 30 images from the test data.
#
# The following code goes through these steps:
# 1. Send the data as a JSON array to the web service hosted in ACI.
#
# 1. Use the SDK's `run` API to invoke the service. You can also make raw calls using any HTTP tool such as curl.
#
# 1. Print the returned predictions and plot them along with the input images. Red font and inverse image (white on black) is used to highlight the misclassified samples.
#
# Since the model accuracy is high, you might have to run the following code a few times before you can see a misclassified sample.
# +
from utils import load_data
import os
data_folder = os.path.join(os.getcwd(), 'data')
# note we also shrink the intensity values (X) from 0-255 to 0-1. This helps the neural network converge faster
X_test = load_data(os.path.join(data_folder, 'test-images.gz'), False) / 255.0
y_test = load_data(os.path.join(data_folder, 'test-labels.gz'), True).reshape(-1)
# + tags=["score web service"]
import json
# find 30 random samples from test set
n = 30
sample_indices = np.random.permutation(X_test.shape[0])[0:n]
test_samples = json.dumps({"data": X_test[sample_indices].tolist()})
test_samples = bytes(test_samples, encoding='utf8')
# predict using the deployed model
result = service.run(input_data=test_samples)
# compare actual value vs. the predicted values:
i = 0
plt.figure(figsize = (20, 1))
for s in sample_indices:
plt.subplot(1, n, i + 1)
plt.axhline('')
plt.axvline('')
# use different color for misclassified sample
font_color = 'red' if y_test[s] != result[i] else 'black'
clr_map = plt.cm.gray if y_test[s] != result[i] else plt.cm.Greys
plt.text(x=10, y =-10, s=result[i], fontsize=18, color=font_color)
plt.imshow(X_test[s].reshape(28, 28), cmap=clr_map)
i = i + 1
plt.show()
# -
# You can also send raw HTTP request to test the web service.
# + tags=["score web service"]
import requests
# send a random row from the test set to score
random_index = np.random.randint(0, len(X_test)-1)
input_data = "{\"data\": [" + str(list(X_test[random_index])) + "]}"
headers = {'Content-Type':'application/json'}
# for AKS deployment you'd need to the service key in the header as well
# api_key = service.get_key()
# headers = {'Content-Type':'application/json', 'Authorization':('Bearer '+ api_key)}
resp = requests.post(service.scoring_uri, input_data, headers=headers)
print("POST to url", service.scoring_uri)
#print("input data:", input_data)
print("label:", y_test[random_index])
print("prediction:", resp.text)
# -
# ## Clean up resources
#
# To keep the resource group and workspace for other tutorials and exploration, you can delete only the ACI deployment using this API call:
# + tags=["delete web service"]
service.delete()
# -
#
# If you're not going to use what you've created here, delete the resources you just created with this quickstart so you don't incur any charges. In the Azure portal, select and delete your resource group. You can also keep the resource group, but delete a single workspace by displaying the workspace properties and selecting the Delete button.
# 
| notebooks/Image Classification/03 - Model Deployment as Web Service.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Overview
# - スタッキングを試してみる
# # Import everything I need :)
import warnings
warnings.filterwarnings('ignore')
import time
import multiprocessing
import glob
import gc
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import pandas as pd
from plotly.offline import init_notebook_mode, iplot
import plotly.graph_objs as go
from sklearn.preprocessing import LabelEncoder, StandardScaler
from sklearn.model_selection import KFold, train_test_split
from sklearn.metrics import mean_absolute_error
from sklearn import linear_model
from fastprogress import progress_bar
# # Preparation
nb = 75
isSmallSet = False
length = 10000
model_name = 'ridge'
pd.set_option('display.max_columns', 200)
file_path = '../input/champs-scalar-coupling/'
glob.glob(file_path + '*')
# +
# train
path = file_path + 'train.csv'
if isSmallSet:
train = pd.read_csv(path) [:length]
else:
train = pd.read_csv(path)
type_train = train.type.values
# +
# train
path = file_path + 'test.csv'
if isSmallSet:
test = pd.read_csv(path) [:length]
else:
test = pd.read_csv(path)
type_test = test.type.values
# -
# ## myFunc
# **metrics**
def kaggle_metric(df, preds):
df["prediction"] = preds
maes = []
for t in df.type.unique():
y_true = df[df.type==t].scalar_coupling_constant.values
y_pred = df[df.type==t].prediction.values
mae = np.log(mean_absolute_error(y_true, y_pred))
maes.append(mae)
return np.mean(maes)
# # Stacking
path_list = [
'nb54_{}_random_forest_regressor_-1.45569.csv',
'nb60_{}_lgb_-1.5330660525700779.csv',
'nb79_{}_extra_trees_regressor_-1.56760.csv',
'nb80_{}_extra_trees_regressor_-1.48000.csv',
'nb81_{}_bagging_regressor_-1.44452.csv',
'nb84_{}_extra_trees_regressor_-1.60943.csv',
'nb85_{}_extra_trees_regressor_-1.52972.csv',
'nb88_{}_lgb_-1.547953965914086.csv',
'nb91_{}_extra_trees_regressor_-1.47467.csv',
]
# oof
oofs = []
for path in path_list:
if isSmallSet:
df = pd.read_csv('./../output/' + path.format('oof'))[:length]
else:
df = pd.read_csv('./../output/' + path.format('oof'))
oofs.append(df)
# sub
subs = []
for path in path_list:
if isSmallSet:
df = pd.read_csv('./../output/' + path.format('submission')).drop(['id'], axis=1)[:length]
else:
df = pd.read_csv('./../output/' + path.format('submission')).drop(['id'], axis=1)
subs.append(df)
# # Create train, test data
# LabelEncode
# - `type` = {2JHC, ...}
lbl = LabelEncoder()
lbl.fit(list(type_train) + list(type_test))
type_train_lbl = lbl.transform(type_train)
type_test_lbl = lbl.transform(type_test)
y = train.scalar_coupling_constant
X = pd.concat(oofs, axis=1)
X = pd.DataFrame(X.values, columns=path_list)
X_test = pd.concat(subs, axis=1)
X_test = pd.DataFrame(X_test.values, columns=path_list)
# # Training model
# **params**
# +
# Configuration
MAX_ITER = 10000
RANDOM_STATE = 0
model_params_list = [
{'alpha': 0.016490786888660246},
{'alpha': 0.01948297047624237},
{'alpha': 0.01181445773799895},
{'alpha': 0.01},
{'alpha': 0.17022936642711317},
{'alpha': 0.9019291300867383},
{'alpha': 1.2589254117941673},
{'alpha': 0.5469291042181513}]
# -
n_folds = 6
folds = KFold(n_splits=n_folds, shuffle=True)
def train_model(X, X_test, y, folds, model_params):
model = linear_model.Ridge(**model_params, max_iter=MAX_ITER, random_state=RANDOM_STATE) # <=================
scores = []
oof = np.zeros(len(X))
prediction = np.zeros(len(X))
result_dict = {}
for fold_n, (train_idx, valid_idx) in enumerate(folds.split(X)):
print(f'Fold {fold_n + 1} started at {time.ctime()}')
model.fit(X.iloc[train_idx, :], y[train_idx])
y_valid_pred = model.predict(X.iloc[valid_idx, :])
y_train_pred = model.predict(X.iloc[train_idx, :])
prediction = model.predict(X_test)
oof[valid_idx] = y_valid_pred
score = mean_absolute_error(y[valid_idx], y_valid_pred)
score_train = mean_absolute_error(y[train_idx], y_train_pred)
scores.append(score)
print(f'fold {fold_n+1} train:{score_train :.5f} \t valid: {score :.5f}')
print('')
print('CV mean score : {0:.4f}, std: {1:.4f}.'.format(np.mean(scores), np.std(scores)))
print('kaggle mean score: {0:.4f}'.format(np.log(np.mean(scores))))
print('')
result_dict['oof'] = oof
result_dict['prediction'] = prediction
result_dict['scores'] = scores
return result_dict
# +
# %%time
# type ごとの学習
X_short = pd.DataFrame({'ind': list(X.index), 'type': type_train, 'oof': [0] * len(X), 'target': y.values})
X_short_test = pd.DataFrame({'ind': list(X_test.index), 'type': type_test, 'prediction': [0] * len(X_test)})
for i, t in enumerate(np.unique(type_train)):
idx = lbl.transform([t])[0]
model_params = model_params_list[idx]
print('*'*80)
print(f'Training of type {t}')
print('*'*80)
X_t = X.loc[type_train == t]
X_test_t = X_test.loc[type_test == t]
y_t = X_short.loc[type_train == t, 'target'].values
result_dict = train_model(X_t, X_test_t, y_t, folds, model_params)
X_short.loc[type_train == t, 'oof'] = result_dict['oof']
X_short_test.loc[type_test == t, 'prediction'] = result_dict['prediction']
print('')
print('===== finish =====')
X['scalar_coupling_constant'] = y
X['type'] = type_train
metric = kaggle_metric(X, X_short['oof'])
X = X.drop(['scalar_coupling_constant', 'prediction', 'type'], axis=1)
print('CV mean score(group log mae): {0:.4f}'.format(metric))
prediction = X_short_test['prediction']
# -
# # Save
# **submission**
# path_submittion = './output/' + 'nb{}_submission_lgb_{}.csv'.format(nb, metric)
path_submittion = f'../output/nb{nb}_stack_submission_{model_name}_{metric :.5f}.csv'
print(f'save pash: {path_submittion}')
submittion = pd.read_csv('../input/champs-scalar-coupling/sample_submission.csv')
# submittion = pd.read_csv('./input/champs-scalar-coupling/sample_submission.csv')[::100]
submittion['scalar_coupling_constant'] = prediction
if isSmallSet:
pass
else:
submittion.to_csv(path_submittion, index=False)
# ---
# **result**
path_oof = f'../output/nb{nb}_stack_oof_{model_name}_{metric :.5f}.csv'
print(f'save pash: {path_oof}')
oof = pd.DataFrame(X_short['oof'])
if isSmallSet:
pass
else:
oof.to_csv(path_oof, index=False)
# # analysis
# +
plot_data = pd.DataFrame(y)
plot_data.index.name = 'id'
plot_data['yhat'] = X_short['oof']
plot_data['type'] = type_train
def plot_oof_preds(ctype, llim, ulim):
plt.figure(figsize=(6,6))
sns.scatterplot(x='scalar_coupling_constant',y='yhat',
data=plot_data.loc[plot_data['type']==ctype,
['scalar_coupling_constant', 'yhat']]);
plt.xlim((llim, ulim))
plt.ylim((llim, ulim))
plt.plot([llim, ulim], [llim, ulim])
plt.xlabel('scalar_coupling_constant')
plt.ylabel('predicted')
plt.title(f'{ctype}', fontsize=18)
plt.show()
plot_oof_preds('1JHC', 0, 250)
plot_oof_preds('1JHN', 0, 100)
plot_oof_preds('2JHC', -50, 50)
plot_oof_preds('2JHH', -50, 50)
plot_oof_preds('2JHN', -25, 25)
plot_oof_preds('3JHC', -25, 60)
plot_oof_preds('3JHH', -20, 20)
plot_oof_preds('3JHN', -10, 15)
# -
| src/.ipynb_checkpoints/93_stacking_03_redge-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Berpikir dengan peluang
import pandas as pd
import numpy as np
import dc_stat_think as dcst
import seaborn as sns
import matplotlib.pyplot as plt
iris = pd.read_csv('iris.csv')
iris.head()
sns.set()
# %matplotlib inline
len(iris)
pertama_50, kedua_50, ketiga_50 = (np.random.permutation(iris['petal_length'].values)[0:50],
np.random.permutation(iris['petal_length'].values)[0:50],
np.random.permutation(iris['petal_length'].values)[0:50])
plt.plot(*dcst.ecdf(pertama_50), marker='.', linestyle='none')
plt.xlabel('Panjang Petal')
plt.ylabel('ECDF')
plt.plot(*dcst.ecdf(pertama_50), marker='.', linestyle='none')
plt.plot(*dcst.ecdf(kedua_50), marker='.', linestyle='none',alpha=0.5)
plt.plot(*dcst.ecdf(ketiga_50), marker='.', linestyle='none', alpha=0.5)
plt.axvline(np.mean(pertama_50), alpha=0.5)
plt.axvline(np.mean(kedua_50), alpha=0.5)
plt.axvline(np.mean(ketiga_50), alpha=0.5)
plt.xlabel('Panjang Petal')
plt.ylabel('ECDF')
for _ in range(100):
data = np.random.permutation(iris['petal_length'].values)[0:50]
plt.plot(*dcst.ecdf(data), marker='.', linestyle='none',alpha=0.5)
plt.axvline(np.mean(data), alpha=0.5)
plt.xlabel('Panjang Petal')
plt.ylabel('ECDF')
# # Remember Hacker Statistics ?
np.random.random()
np.random.seed(42)
random_numbers = np.random.random(size=4)
random_numbers
heads = random_numbers < 0.5
heads
np.sum(heads)
# ## Simulasi 4 Koin
# +
n_all_heads = 0
for _ in range(10000):
heads = np.random.random(size=4) < 0.5
n_heads = np.sum(heads)
if n_heads == 4:
n_all_heads += 1
# -
n_all_heads / 10000
# - Bagaimana mensimulasikan data
# - Mengulanginya berulang kali
# - Menggunakan peluang peluang untuk mengaproksimasi dari hasil yang ada
# # Cerita dalam Probabilitas: Distribusi Binomial
# - Peluang distribusi: Sebuah deskripsi matematika dari hasil yang ada
# Binomial: Jumlah r sukses dari n percobaan bernoulli dengan tingkat peluang berhasil p, adalah terdistribusi secara binomial
np.random.binomial(4, 0.5)
np.random.binomial(4, 0.5, size=10)
samples = np.random.binomial(60, 0.1, size=10000)
# +
plt.plot(pd.Series(samples).value_counts() / len(pd.Series(samples)), marker='.', linestyle='none')
plt.xlabel('Jumlah Sukses')
plt.ylabel('Probability')
plt.title('Binomial PMF')
plt.xticks([0, 2, 4, 6, 8, 10, 12, 14, 16, 18])
# -
x, y = dcst.ecdf(samples)
plt.plot(x, y, marker='.', linestyle='none')
plt.margins(0.02)
plt.xlabel('number of successes')
plt.ylabel('CDF')
plt.title('Binomial CDF')
# # Cerita dalam Probabilitas: Distribusi Poisson
np.random.poisson(4)
np.random.poisson(4, size=10)
samples_poisson = np.random.poisson(6, size=10000)
plt.plot(pd.Series(samples_poisson).value_counts() / len(pd.Series(samples_poisson)),
marker='.', linestyle='none')
plt.xlabel('Jumlah Sukses')
plt.ylabel('Probability')
plt.title('Poisson PMF')
plt.xticks([0, 2, 4, 6, 8, 10, 12, 14, 16, 18])
x_poi, y_poi = dcst.ecdf(samples_poisson)
plt.plot(x_poi, y_poi, marker='.', linestyle='none')
plt.margins(0.02)
plt.xlabel('Jumlah Sukses')
plt.ylabel('CDF')
plt.xticks([0, 2, 4, 6, 8, 10, 12, 14, 16, 18])
plt.title('Poisson CDF')
| Bagian 5 - Statistik dan Python 1/3. Probabilistik and Statistik Inferensia.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="3MRxbFKLsD95" colab_type="text"
# <h1><center> <font color='black'> Business Data Analytics - Practice Session_13 </font></center></h1>
# <h2><center> <font color='black'> Fairness and Explainability</font></center></h3>
# <h2><center> <font color='black'> University of Tartu - Spring 2020</font></center></h3>
# + [markdown] id="Sdh3olu5sD-c" colab_type="text"
# # Introduction:
#
# * Machine learning models are still black box capable of performing magic to give people what they want. However, the harsh reality is that without a `reasonable understanding of how machine learning models or the data science pipeline works, real-world projects rarely succeed.`
#
#
# * Considering any data science project in the real world, you will typically have a `business aspect` and the `technical or solution aspect.`
#
# * Now, data scientists typically work to build models and provide solutions for the business. However, the business may not know the details of how a model might work. But since this very model will be making a lot of decisions for them in the end, they do have a right to pose the question, **How can I trust your model?** or **How does your model really make its decisions?** Answering these questions is something data science practitioners and researchers have been trying over several years now.
#
#
# ## Understanding Machine Learning Model Interpretation
#
# * Machine Learning has seen widespread industry adoption only in the last couple of years. Hence, `model interpretation` as a concept is still mostly theoretical and subjective.
#
# * Any machine learning model at its heart has a `target function` which tries to map and explain relationships and patterns between the `independent (input) variables and the dependent (target or response) variable(s).`
#
# * When a model predicts or finds our insights, it takes certain decisions and choices. **Model interpretation** tries to understand and explain these decisions taken by the response function i.e., `the what, why and how.`
#
# * The key to model interpretation is transparency, the ability to question, and the ease of understanding model decisions by humans. The three most important aspects of model interpretation are explained as follows.
#
# * `What drives model predictions?` We should have the ability to query our model and find out latent feature interactions to get an idea of `which features might be important` in the decision-making policies of the model. This ensures **fairness** of the model.
#
# * `Why did the model take a certain decision?` We should also be able to validate and justify why certain key features were responsible in driving certain decisions taken by a model during predictions. This ensures **accountability and reliability** of the model.
#
# * `How can we trust model predictions?` We should be able to evaluate and validate any data point and how a model takes decisions on it. This should be demonstrable and easy to understand for key stakeholders that the model works as expected. This ensures **transparency** of the model.
#
#
# * **Interpretability** also popularly known as `human-interpretable interpretations (HII)` of a machine learning model is the extent to which a human (including non-experts in machine learning) can understand the choices taken by models in their decision-making process `(the how, why and what).`
#
#
# * Some of us might argue if a model is working great why bother digging deeper? Always remember that when solving data science problems in the real-world, for `the business to trust your model predictions and decisions, they will keep asking the question,` **Why should I trust your model?** and this makes perfect sense.
#
#
# * Would you be satisfied with a model just predicting and taking decisions **(the what)** like if a person has cancer or diabetes, if a person might be a risk to society or even if a customer will churn? Maybe not, we might prefer it more if we could know more about the model’s decision process **(the why and how).** This gives us more transparency into why the model makes certain decisions, what might go wrong in certain scenarios and over time it helps us build a certain amount of trust on these machine learning models.
#
#
# * **Model Interpretation** is something which can make or break a real-world machine learning project in the industry and helps us come one step closer to `explainable artificial intelligence (XAI).`
#
#
# + [markdown] id="EH2OyPaKsD-n" colab_type="text"
# ## Traditional Techniques for Model Interpretation
#
# * Model interpretation at heart, is to find out ways to understand model decision making policies better. This is to enable `fairness, accountability and transparency` which will give humans enough confidence to use these models in real-world problems which a lot of impact to business and society.
#
#
# * Hence, there are techniques which have existed for a long time now, which can be used to understand and interpret models in a better way. These can be grouped under the following two major categories.
# * `Exploratory analysis and visualization techniques` like clustering and dimensionality reduction.
# * `Model performance evaluation metrics` like accuracy and the AUC (for classification models) and root mean-square error, mean absolute error (for regression models)
#
#
# ## Limitations of Traditional Techniques and Motivation for Better Model Interpretation
#
# * The techniques we discussed in the previous techniques are definitely helpful in trying to understand more about our data, features as well as which models might be effective. However, they are quite limiting in terms of trying to discern human-interpretable ways of how a model works.
#
#
# ## Model Interpretation Techniques
#
# * There are a wide variety of new model interpretation techniques which try to address the limitations and challenges of traditional model interpretation techniques and try to combat the classic Intepretability vs. Model Performance Trade-off.
# * **Using Interpretable Models:** The easiest way to get started with model interpretation is to use models which are interpretable out of the box! This typically includes your regular parametric models like `linear regression, logistic regression, tree-based models`
# * **Feature Importance:** is generic term for the degree to which a predictive model relies on a particular feature. Typically, a feature’s importance is the increase in the model’s prediction error after we permuted the feature’s values like `Skater`, and `Shap` methods `(will be discussed in this lab session)`
# * **Partial Dependence Plots:** Partial Dependence describes the marginal impact of a feature on model prediction, holding other features in the model constant. The partial dependence plot `(PDP or PD plot)` shows the marginal effect of a feature on the predicted outcome of a previously fit model. PDPs can show if the relationship between the target and a feature is linear, monotonic or more complex. `(will be discussed in this lab session)` using `Skater and SHAP`
# * **Global Surrogate Models:** A global surrogate model is an interpretable model that is trained to approximate the predictions of a black box model which can essentially be any model regardless of its complexity or training algorithm. `Skater`
# * **Local Interpretable Model-agnostic Explanations (LIME):** LIME is a novel algorithm designed by <NAME>, <NAME>, <NAME> to access the behavior of the any base estimator(model) using local interpretable surrogate models (e.g. linear classifier/regressor). Basically, LIME explanations are based on local surrogate models. These, surrogate models are interpretable models (like a linear model or decision tree) that are learned on the predictions of the original black box model. But instead of trying to fit a global surrogate model, LIME focuses on fitting local surrogate models to explain why single predictions were made. `LIME`.
# * **Shapley Values and SHapley Additive exPlanations (SHAP):** is a unified approach to explain the output of any machine learning model. SHAP connects game theory with local explanations, uniting several previous methods and representing the only possible consistent and locally accurate additive feature attribution method based on what they claim! (do check out the SHAP NIPS paper for details).
#
#
#
#
#
#
#
# + [markdown] id="0PJoTyVRsD_B" colab_type="text"
# # Outline:
#
# * In Lab session, we will be looking at a comprehensive guide to `building and interpreting machine learning models` using all the new techniques we learnt in the introduction. We will be using several state-of-the-art model interpretation frameworks for this.
#
# 1. Hands-on guides on using the latest state-of-the-art model interpretation frameworks
# 2. Features, concepts and examples of using frameworks like **ELI5**, **Skater** and **SHAP**
# 3. Explore concepts and see them in action — `Feature importances, partial dependence plots, surrogate models, interpretation and explanations` with `LIME`, `SHAP values`
# 4. Hands-on Machine Learning Model Interpretation on a supervised learning example
# + [markdown] id="h5yL1BmnsD_U" colab_type="text"
# # Load necessary dependencies
# + id="D6roMu7fsD_a" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 627} outputId="08a46e3e-c3c3-4d98-abe2-b1d8ab8e59e8" executionInfo={"status": "ok", "timestamp": 1589186816979, "user_tz": -120, "elapsed": 18331, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg9UiBaJaGnafDynGv5Ght5Z18MwCGvDUW8ttd2=s64", "userId": "05621694697917066197"}}
# !pip install eli5
# !pip install shap
# + id="vKXyA6slsEAt" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 115} outputId="10b987cf-f34d-49e5-9924-1193bdc24bd6" executionInfo={"status": "ok", "timestamp": 1589186820656, "user_tz": -120, "elapsed": 21980, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg9UiBaJaGnafDynGv5Ght5Z18MwCGvDUW8ttd2=s64", "userId": "05621694697917066197"}}
import pandas as pd
import numpy as np
import model_evaluation_utils as meu
import matplotlib.pyplot as plt
from collections import Counter
import shap
import eli5
import warnings
warnings.filterwarnings('ignore')
plt.style.use('fivethirtyeight')
# %matplotlib inline
shap.initjs()
# + [markdown] id="IRF15NrRsEBw" colab_type="text"
# # Load the Census Income Dataset
# + id="YCdNgncdsEB5" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 52} outputId="0e1b4f85-83c7-443b-906c-7b89e0284fe7" executionInfo={"status": "ok", "timestamp": 1589186821332, "user_tz": -120, "elapsed": 22635, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg9UiBaJaGnafDynGv5Ght5Z18MwCGvDUW8ttd2=s64", "userId": "05621694697917066197"}}
data, labels = shap.datasets.adult(display=True)
print(labels)
labels = np.array([int(label) for label in labels])
data.shape, labels.shape
# + [markdown] id="avCqYP7OsECV" colab_type="text"
# # Understanding the Census Income Dataset
#
# Let's now take a look at our dataset attributes and understand their meaning and significance.
#
#
# | Attribute Name | Type | Description |
# |-----------------------|----------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|
# | Age | Continuous | Represents age of the person |
# | Workclass | Categorical | Represents the nature of working class\category<br>(Private, Self-emp-not-inc, Self-emp-inc, Federal-gov, <br>Local-gov, State-gov, Without-pay, Never-worked) |
# | Education-Num | Categorical | Numeric representation of educational qualification.<br>Ranges from 1-16.<br>(Bachelors, Some-college, 11th, HS-grad, Prof-school, Assoc-acdm, Assoc-voc, <br>9th, 7th-8th, 12th, Masters, 1st-4th, 10th, Doctorate, 5th-6th, Preschool) |
# | Marital Status | Categorical | Represents the marital status of the person<br>(Married-civ-spouse, Divorced, Never-married, Separated, <br>Widowed, Married-spouse-absent, Married-AF-spouse) |
# | Occupation | Categorical | Represents the type of profession\job of the person<br>(Tech-support, Craft-repair, Other-service, Sales, Exec-managerial, <br>Prof-specialty, Handlers-cleaners, Machine-op-inspct, Adm-clerical, <br>Farming-fishing, Transport-moving, Priv-house-serv, Protective-serv, <br>Armed-Forces) |
# | Relationship | Categorical | Represents the relationship status of the person<br>(Wife, Own-child, Husband, Not-in-family, Other-relative, Unmarried) |
# | Race | Categorical | Represents the race of the person<br>(White, Asian-Pac-Islander, Amer-Indian-Eskimo, Other, Black) |
# | Sex | Categorical | Represents the gender of the person<br>(Female, Male) |
# | Capital Gain | Continuous | The total capital gain for the person |
# | Capital Loss | Continuous | The total capital loss for the person |
# | Hours per week | Continuous | Total hours spent working per week |
# | Country | Categorical | The country where the person is residing |
# | Income Label (labels) | Categorical (class label) | The class label column is the one we want to predict<br>(False: Income <= \$50K & True: Income > \$50K) | |
#
# * We have a total of 12 features and our **objective** is to predict `if the income of a person will be more than $50K$ (True) or less than $50K$ (False).` Hence we will be building and interpreting a classification model
# + id="IeIgF4QMsECa" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 310} outputId="b7992eaa-3cb2-4dc3-9814-a3f3b987f25e" executionInfo={"status": "ok", "timestamp": 1589186821334, "user_tz": -120, "elapsed": 22617, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg9UiBaJaGnafDynGv5Ght5Z18MwCGvDUW8ttd2=s64", "userId": "05621694697917066197"}}
data.head()
# + id="L-L-3R5BsEC0" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 347} outputId="6e26a388-ffed-4728-f3e1-5cd6bdc177e9" executionInfo={"status": "ok", "timestamp": 1589186821336, "user_tz": -120, "elapsed": 22603, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg9UiBaJaGnafDynGv5Ght5Z18MwCGvDUW8ttd2=s64", "userId": "05621694697917066197"}}
data.info()
# + id="S1AoGYjrsEDm" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="448ec07e-4a1d-492e-c374-73a5d099fc8b" executionInfo={"status": "ok", "timestamp": 1589186821339, "user_tz": -120, "elapsed": 22592, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg9UiBaJaGnafDynGv5Ght5Z18MwCGvDUW8ttd2=s64", "userId": "05621694697917066197"}}
labels
# + [markdown] id="MFNBvnFDsEEQ" colab_type="text"
# # Basic Feature Engineering
#
# * Here we convert the categorical columns with string values to numeric representations.
#
#
# * Typically the XGBoost model can handle categorical data natively being a tree-based model so we don't one-hot encode the features
# + id="1gOvJ2rNsEEb" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 223} outputId="25678142-b6da-4a67-fdcf-87a6d1e9c443" executionInfo={"status": "ok", "timestamp": 1589186821342, "user_tz": -120, "elapsed": 22580, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg9UiBaJaGnafDynGv5Ght5Z18MwCGvDUW8ttd2=s64", "userId": "05621694697917066197"}}
cat_cols = data.select_dtypes(['category']).columns
data[cat_cols] = data[cat_cols].apply(lambda x: x.cat.codes)
data.head()
# + [markdown] id="qpqKna3nsEEu" colab_type="text"
# ## Viewing distribution of people with <= \$50K (False) and > \$50K (True) income
# + id="fmJtIFbSsEEz" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="54de4146-af3e-4c87-a2b8-3f48562e0c26" executionInfo={"status": "ok", "timestamp": 1589186821345, "user_tz": -120, "elapsed": 22569, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg9UiBaJaGnafDynGv5Ght5Z18MwCGvDUW8ttd2=s64", "userId": "05621694697917066197"}}
Counter(labels)
# + [markdown] id="k4mqMzbEsEFV" colab_type="text"
# # Building Train and Test Datasets
#
# * For any machine learning model, we always need train and test datasets. We will be building the model on the train dataset and test the performance on the test dataset.
#
# * We maintain two datasets `(one with the encoded categorical values and one with the original values)` so we can train with the encoded dataset but use the original dataset as needed later on for model interpretation.
# + id="20CFdH2csEFd" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="ba4069ca-5667-499e-98d6-b7c5b8e0b412" executionInfo={"status": "ok", "timestamp": 1589186821345, "user_tz": -120, "elapsed": 22556, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg9UiBaJaGnafDynGv5Ght5Z18MwCGvDUW8ttd2=s64", "userId": "05621694697917066197"}}
# encoded data set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(data, labels, test_size=0.3, random_state=42)
X_train.shape, X_test.shape
# + id="XPbGIwAssEF3" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 161} outputId="50ec1322-a293-4e3b-e726-2a8be59c8a78" executionInfo={"status": "ok", "timestamp": 1589186821347, "user_tz": -120, "elapsed": 22544, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg9UiBaJaGnafDynGv5Ght5Z18MwCGvDUW8ttd2=s64", "userId": "05621694697917066197"}}
X_train.head(3)
# + id="MQVSshN6sEGZ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="3bba95b6-ea41-4656-e3df-e17c030074a6" executionInfo={"status": "ok", "timestamp": 1589186821350, "user_tz": -120, "elapsed": 22531, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg9UiBaJaGnafDynGv5Ght5Z18MwCGvDUW8ttd2=s64", "userId": "05621694697917066197"}}
# Original data set
data_disp, labels_disp = shap.datasets.adult(display=True)
X_train_disp, X_test_disp, y_train_disp, y_test_disp = train_test_split(data_disp, labels_disp, test_size=0.3, random_state=42)
X_train_disp.shape, X_test_disp.shape
# + id="2bsyBN72sEG9" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 230} outputId="00d7af8f-e7f2-4093-bf3d-5b5cec5fa1e9" executionInfo={"status": "ok", "timestamp": 1589186821352, "user_tz": -120, "elapsed": 22519, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg9UiBaJaGnafDynGv5Ght5Z18MwCGvDUW8ttd2=s64", "userId": "05621694697917066197"}}
X_train_disp.head(3)
# + [markdown] id="tbwkF7Y-sEHa" colab_type="text"
# # Training the classification model
#
# We will now train and build a basic boosting classification model on our training data using the popular [XGBoost](https://xgboost.readthedocs.io/en/latest/)
# framework, an optimized distributed gradient boosting library designed to be highly efficient, flexible and portable
# + id="QgcG1Hm_sEHc" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 52} outputId="af0a5643-af5b-4dc8-98bd-701df84b05f0" executionInfo={"status": "ok", "timestamp": 1589186830252, "user_tz": -120, "elapsed": 31405, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg9UiBaJaGnafDynGv5Ght5Z18MwCGvDUW8ttd2=s64", "userId": "05621694697917066197"}}
# %%time
import xgboost as xgb
xgc = xgb.XGBClassifier(n_estimators=500, max_depth=5, base_score=0.5,
objective='binary:logistic', random_state=42)
xgc.fit(X_train, y_train)
# + [markdown] id="zF7y_043sEH2" colab_type="text"
# # Making predictions on the test data
#
# Here we do the usual, use the trained model to make predictions on the test dataset
# + id="DPvOgAfgsEH5" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="067c2c5c-1d00-4b27-f032-bee1ee48c9b5" executionInfo={"status": "ok", "timestamp": 1589186830259, "user_tz": -120, "elapsed": 31400, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg9UiBaJaGnafDynGv5Ght5Z18MwCGvDUW8ttd2=s64", "userId": "05621694697917066197"}}
predictions = xgc.predict(X_test)
predictions[:10]
# + [markdown] id="D8p_yfEcsEIO" colab_type="text"
# # Model Performance Evaluation
#
# Time to put the model to the test! Let's evaluate how our model has performed with its predictions on the test data.
# We use my nifty `model_evaluation_utils` module for this which leverages `scikit-learn` internally to give us
# standard classification model evaluation metrics
# + id="novKGHuesEIQ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 330} outputId="a486b3eb-dbfa-4157-eec5-6a22708c1559" executionInfo={"status": "ok", "timestamp": 1589186830263, "user_tz": -120, "elapsed": 31393, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg9UiBaJaGnafDynGv5Ght5Z18MwCGvDUW8ttd2=s64", "userId": "05621694697917066197"}}
class_labels = list(set(labels))
meu.display_model_performance_metrics(true_labels=y_test, predicted_labels=predictions, classes=class_labels)
# + [markdown] id="eQjbpiTCsEIm" colab_type="text"
# # Default Model Interpretation Methods
#
# By default it is difficult to gauge on specific model interpretation methods for machine learning models out of the box.
# `Parametric models` like logistic regression are easier to interpret given that the total number of parameters of the model
# are fixed regardless of the volume of data and one can make some interpretation of the model's prediction decisions leveraging
# the parameter coefficients.
#
# `Non-parametric models` are harder to interpret given that the total number of parameters remain unbounded and increase
# with the increase in the data volume. Some non-parametric models like tree-based models do have some out of the box model
# interpretation methods like feature importance which helps us in understanding which features might be influential in
# the model making its prediction decisions
#
# ## Classic feature importances from XGBoost
#
# Here we try out the global feature importance calcuations that come with XGBoost. The model enables us to view feature importances based on the following.
#
# - __Feature Weights:__ This is based on the number of times a feature appears in a tree across the ensemble of trees
# - __Gain:__ This is based on the average gain of splits which use the feature
# - __Coverage:__ This is based on the average coverage (number of samples affected) of splits which use the feature
#
# Note that they all contradict each other, which motivates the use of model interpretation frameworks like SHAP which uses something known as SHAP values, which claim to come with consistency guarantees (meaning they will typically order the features correctly).
# + id="JJMRJK1SsEIq" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 783} outputId="2391d825-c03b-4c61-99a2-c8e139ca809f" executionInfo={"status": "ok", "timestamp": 1589186831490, "user_tz": -120, "elapsed": 32605, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg9UiBaJaGnafDynGv5Ght5Z18MwCGvDUW8ttd2=s64", "userId": "05621694697917066197"}}
fig = plt.figure(figsize = (16, 12))
title = fig.suptitle("Default Feature Importances from XGBoost", fontsize=14)
ax1 = fig.add_subplot(2,2, 1)
xgb.plot_importance(xgc, importance_type='weight', ax=ax1)
t=ax1.set_title("Feature Importance - Feature Weight")
ax2 = fig.add_subplot(2,2, 2)
xgb.plot_importance(xgc, importance_type='gain', ax=ax2)
t=ax2.set_title("Feature Importance - Split Mean Gain")
ax3 = fig.add_subplot(2,2, 3)
xgb.plot_importance(xgc, importance_type='cover', ax=ax3)
t=ax3.set_title("Feature Importance - Sample Coverage")
# + [markdown] id="hxCguUZ1sEI5" colab_type="text"
# # Model Interpretation with ELI5
#
# [__ELI5__](https://github.com/TeamHG-Memex/eli5) is a Python package which helps to debug machine learning classifiers and explain their predictions in an easy to understand an intuitive way. It is perhaps the easiest of the three machine learning frameworks to get started with since it involves minimal reading of documentation! However it doesn't support true model-agnostic interpretations and support for models are mostly limited to tree-based and other parametric\linear models. Let's look at some intuitive ways of model interpretation with ELI5 on our classification model.
#
# ### Installation Instructions
#
# We recommend installing this framework using __`pip install eli5`__ since the `conda` version appears to be a bit out-dated. Also feel free to check out [__the documentation__](https://eli5.readthedocs.io/en/latest/overview.html) as needed.
# + [markdown] id="wLBlwzJjsEI9" colab_type="text"
# ## Feature Importances with ELI5
#
# Typically for tree-based models ELI5 does nothing special but uses the out-of-the-box feature importance computation methods which we discussed in the previous section. By default, __'gain'__ is used, that is the average gain of the feature when it is used in trees.
# + id="MBDkUMxRsEJD" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 243} outputId="2f8d101b-fc1c-48be-f237-ea4edf511069" executionInfo={"status": "ok", "timestamp": 1589186831498, "user_tz": -120, "elapsed": 32596, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg9UiBaJaGnafDynGv5Ght5Z18MwCGvDUW8ttd2=s64", "userId": "05621694697917066197"}}
eli5.show_weights(xgc.get_booster())
# + [markdown] id="kK4OknxXsEJY" colab_type="text"
# ## Explaining Model Prediction Decisions with ELI5
#
# One of the best way to explain model prediction decisions to either a technical or a more business-oriented individual, is to `examine individual data-point predictions.` Typically, ELI5 does this by showing weights for each feature depicting how influential it might have been in contributing to the final prediction decision across all trees. The idea for weight calculation is described in http://blog.datadive.net/interpreting-random-forests/; ELI5 provides an independent implementation of this algorithm for XGBoost and most scikit-learn tree ensembles which is definitely on the path towards model-agnostic interpretation but not purely model-agnostic like LIME.
#
# Typically, the prediction can be defined as the sum of the feature contributions + the “bias” (i.e. the mean given by the topmost region that covers the entire training set)
# + [markdown] id="hJvt19gfsEJc" colab_type="text"
# ### Predicting when a person's income <= \$50K
#
# Here we can see the most influential features being the __`Age`, `Hours per week`, `Marital Status`, `Occupation` & `Relationship`__
# + id="O-VXYC6MsEJl" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 350} outputId="c8bff0fa-62f9-4b03-a2f1-f307f6995fee" executionInfo={"status": "ok", "timestamp": 1589186831508, "user_tz": -120, "elapsed": 32591, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg9UiBaJaGnafDynGv5Ght5Z18MwCGvDUW8ttd2=s64", "userId": "05621694697917066197"}}
doc_num = 0
print('Actual Label:', y_test[doc_num])
print('Predicted Label:', predictions[doc_num])
eli5.show_prediction(xgc.get_booster(), X_test.iloc[doc_num],
feature_names=list(data.columns) ,show_feature_values=True)
# + [markdown] id="ZDQBI8fwsEJ7" colab_type="text"
# ### Predicting when a person's income > \$50K
#
# Here we can see the most influential features being the __`Education`, `Relationship`, `Occupation`, `Hours per week` & `Marital Status`__
# + id="DkmTXwXEsEJ8" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 350} outputId="a01cdc93-ce7c-4667-f6cc-e0af628b78a9" executionInfo={"status": "ok", "timestamp": 1589186832160, "user_tz": -120, "elapsed": 33230, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg9UiBaJaGnafDynGv5Ght5Z18MwCGvDUW8ttd2=s64", "userId": "05621694697917066197"}}
doc_num = 2
print('Actual Label:', y_test[doc_num])
print('Predicted Label:', predictions[doc_num])
eli5.show_prediction(xgc.get_booster(), X_test.iloc[doc_num], feature_names=list(data.columns) ,show_feature_values=True)
# + [markdown] id="rww9bR6msEKW" colab_type="text"
# It is definitely interesting to see how similar features play an influential role in explaining model prediction decisions for both classes!
# + [markdown] id="jB73IE9PsEKd" colab_type="text"
# # Global Interpretations with Skater
#
# A predictive model is a mapping from an input space to an output space. Interpretation algorithms are divided into those that offer statistics and metrics on regions of the domain, such as the marginal distribution of a feature, or the joint distribution of the entire training set. In an ideal world there would exist some representation that would allow a human to interpret a decision function in any number of dimensions. Given that we generally can only intuit visualizations of a few dimensions at time, global interpretation algorithms either aggregate or subset the feature space.
#
# Currently, model-agnostic global interpretation algorithms supported by skater include partial dependence and feature importance with a very new release of tree-surrogates also. We will be covering feature importance and partial dependence plots here
#
# ## Creating an interpretation object
# The general workflow within the skater package is to create an interpretation, create a model, and run interpretation algorithms. Typically, an `Interpretation` consumes a dataset, and optionally some metadata like feature names and row ids. Internally, the `Interpretation` will generate a `DataManager` to handle data requests and sampling.
#
# - __Local Models(`InMemoryModel`):__ To create a skater model based on a local function or method, pass in the predict function to an `InMemoryModel`. A user can optionally pass data samples to the examples keyword argument. This is only used to infer output types and formats. Out of the box, skater allows models return `numpy` arrays and `pandas` dataframes.
#
# - __Operationalized Model(`DeployedModel`):__ If your model is accessible through an API, use a `DeployedModel`, which wraps the requests library. `DeployedModels` require two functions, an input formatter and an output formatter, which speak to the requests library for posting and parsing. The input formatter takes a `pandas` DataFrame or a `numpy` ndarray, and returns an object (such as a dict) that can be converted to JSON to be posted. The output formatter takes a requests.response as an input and returns a `numpy` ndarray or `pandas` DataFrame.
#
# We will use the following workflow:
#
# - Build an interpretation object
# - Build an in-memory model
# - Perform interpretations
# + [markdown] id="0axrmSP7sEKl" colab_type="text"
# # Local Interpretations with Skater
#
# Local Interpretation could be possibly be achieved in two ways. Firstly, one could possibly approximate the behavior of a complex predictive model in the vicinity of a single input using a simple interpretable auxiliary or surrogate model (e.g. Linear Regressor). Secondly, one could use the base estimator to understand the behavior of a single prediction using intuitive approximate functions based on inputs and outputs.
#
# ## Local Interpretable Model-Agnostic Explanations(LIME)
# * **LIME** is a novel algorithm designed by <NAME>, <NAME>, <NAME> to `access the behavior of the any base estimator(model) using interpretable surrogate models (e.g. linear classifier/regressor)`. Such form of comprehensive evaluation helps in generating explanations which are locally faithful but may not align with the global behavior. Basically, `LIME explanations are based on local surrogate models.` These, surrogate models are interpretable models (like a linear model or decision tree) that are learned on the predictions of the original black box model. But instead of trying to fit a global surrogate model, LIME focuses on fitting local surrogate models to explain why single predictions were made.
#
# The idea is very intuitive. To start with, just try and unlearn what you have done so far! Forget about the training data, forget about how your model works! Think that your model is a black box model with some magic happening inside, where you can input data points and get the models predicted outcomes. You can probe this magic black box as often as you want with inputs and get output predictions.
#
# Now, you main objective is to understand why the machine learning model which you are treating as a magic black box, gave the outcome it produced. `LIME tries to do this for you! It tests out what happens to you black box model's predictions when you feed variations or perturbations of your dataset into the black box model.` Typically, LIME generates a new dataset consisting of perturbed samples and the associated black box model's predictions. On this dataset LIME then trains an interpretable model weighted by the proximity of the sampled instances to the instance of interest.
#
# * Following is a standard high-level workflow for this.
# - Choose your instance of interest for which you want to have an explanation of the predictions of your black box model.
# - Perturb your dataset and get the black box predictions for these new points.
# - Weight the new samples by their proximity to the instance of interest.
# - Fit a weighted, interpretable (surrogate) model on the dataset with the variations.
# - Explain prediction by interpreting the local model.
#
# We recommend you to read the [LIME chapter](https://christophm.github.io/interpretable-ml-book/lime.html) in <NAME>'s excellent book on Model Interpretation which talks about this in detail.
# + [markdown] id="EO_NkquWsEKs" colab_type="text"
# ## Explaining Model Predictions with Skater using LIME
#
# Skater can leverage LIME to explain model predictions. Typically, its __`LimeTabularExplainer`__ class helps in explaining predictions on tabular (i.e. matrix) data. For numerical features, it perturbs them by sampling from a Normal(0,1) and doing the inverse operation of mean-centering and scaling, according to the means and stds in the training data. For categorical features, it perturbs by sampling according to the training distribution, and making a binary feature that is 1 when the value is the same as the instance being explained. The __`explain_instance()`__ function generates explanations for a prediction. First, we generate neighborhood data by randomly perturbing features from the instance. We then learn locally weighted linear (surrogate) models on this neighborhood data to explain each of the classes in an interpretable way.
#
# Since XGBoost has some issues with feature name ordering when building models with dataframes, we will build our same model with numpy arrays to make LIME work without additional hassles of feature re-ordering. Remember the model being built is the same ensemble model which we treat as our black box machine learning model
# + id="GrWB5ySTsEKw" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 139} outputId="d74fa2db-39c4-4050-e0af-fd83955a2f7d" executionInfo={"status": "ok", "timestamp": 1589186839911, "user_tz": -120, "elapsed": 40968, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg9UiBaJaGnafDynGv5Ght5Z18MwCGvDUW8ttd2=s64", "userId": "05621694697917066197"}}
xgc_np = xgb.XGBClassifier(n_estimators=500, max_depth=5, base_score=0.5,
objective='binary:logistic', random_state=42)
xgc_np.fit(X_train.values, y_train) # x_train as numpy array
# + id="mSp42pz1stPL" colab_type="code" colab={}
# #!pip install skater
# + id="3wIKHviJsELK" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 375} outputId="d593e95c-7002-4e8c-93ca-1bc371d7cba3" executionInfo={"status": "error", "timestamp": 1589186840851, "user_tz": -120, "elapsed": 41888, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14Gg9UiBaJaGnafDynGv5Ght5Z18MwCGvDUW8ttd2=s64", "userId": "05621694697917066197"}}
from skater.core.local_interpretation.lime.lime_tabular import LimeTabularExplainer
exp = LimeTabularExplainer(X_test.values, feature_names=list(data.columns),
discretize_continuous=True,
class_names=['$50K or less', 'More than $50K'])
# + [markdown] id="Sk12BtK2sELe" colab_type="text"
# ### Predicting when a person's income <= \$50K
#
# Skater gives a nice reasoning below showing which features were the most influential in the model taking the correct decision of predicting the person's income as below \$50K
# + id="xBNEDCgesELg" colab_type="code" colab={}
doc_num = 0
print('Actual Label:', y_test[doc_num])
print('Predicted Label:', predictions[doc_num])
exp.explain_instance(X_test.iloc[doc_num].values, xgc_np.predict_proba).show_in_notebook()
# + [markdown] id="h41w6hMQsEMJ" colab_type="text"
# ### Predicting when a person's income > \$50K
#
# Skater gives a nice reasoning below showing which features were the most influential in the model taking the correct decision of predicting the person's income as above \$50K
# + id="JAIXR6HisEMU" colab_type="code" colab={}
doc_num = 2
print('Actual Label:', y_test[doc_num])
print('Predicted Label:', predictions[doc_num])
exp.explain_instance(X_test.iloc[doc_num].values, xgc_np.predict_proba).show_in_notebook()
# + [markdown] id="_4Sbl82vsEXh" colab_type="text"
# ## Path to more interpretable models with Tree Surrogates using Skater
#
# We have see various ways to interpret machine learning models with features, dependence plots and even LIME. But can we build an approximation or a surrogate model which is more interpretable from a really complex black box model like our XGBoost model having hundreds of decision trees?
#
# Here in, we introduce the novel idea of using __`TreeSurrogates`__ as means for explaining a model's learned decision policies (for inductive learning tasks), which is inspired by the work of <NAME> described as the TREPAN algorithm.
#
# We recommend checking out the following excellent papers on the TREPAN algorithm to build surrogate trees.
# - [_<NAME>(1996) EXTRACTING COMPREHENSIBLE MODELS FROM TRAINED NEURAL NETWORKS_](http://ftp.cs.wisc.edu/machine-learning/shavlik-group/craven.thesis.pdf)
# - [_<NAME> and <NAME>(NIPS, 96). Extracting Thee-Structured Representations of Thained Networks_](https://papers.nips.cc/paper/1152-extracting-tree-structured-representations-of-trained-networks.pdf)
#
# Briefly, Trepan constructs a decision tree in a best-first manner. It maintains a queue of leaves which are expanded into subtrees as they are removed from the queue. With each node in the queue, Trepan stores,
#
# - a subset of the training examples,
# - another set of instances (query instances),
# - a set of constraints.
#
# The stored subset of training examples consists simply of those examples that reach the node. The query instances are used, along with the training examples, to select the splitting test if the node is an internal node or to determine the class
# label if it is a leaf. The constraint set describes the conditions that instances must satisfy in order to reach the node; this information is used when drawing a set of query instances for a newly created node. The process of expanding a node in Trepan is much like it is in conventional decision tree algorithms: a splitting test is selected for the node, and a child is created for each outcome of the test. Each child is either made a leaf of the tree or put into the queue for future expansion.
#
#
# For Skater's implementation, for building explainable surrogate models, the base estimator(Oracle) could be any form of a supervised learning predictive model - our black box model. The explanations are approximated using Decision Trees(both for Classification/Regression) by learning decision boundaries similar to that learned by the Oracle (predictions from the base model are used for learning the Decision Tree representation). The implementation also generates a fidelity score to quantify tree based surrogate model’s approximation to the Oracle. Ideally, the score should be 0 for truthful explanation both globally and locally. Let's check this out in action!
#
# __NOTE:__ :: Experimental :: The implementation is currently experimental and might change in future.
# + [markdown] id="f5mkG_T_sEXk" colab_type="text"
# ### Using the interpreter instance invoke call to the TreeSurrogate
# + id="X_OgeHY-sEXo" colab_type="code" colab={}
from skater.core.explanations import Interpretation
from skater.model import InMemoryModel
interpreter = Interpretation(training_data=X_test, training_labels=y_test, feature_names=list(data.columns))
im_model = InMemoryModel(xgc.predict_proba, examples=X_train, target_names=['$50K or less', 'More than $50K'])
surrogate_explainer = interpreter.tree_surrogate(oracle=im_model, seed=42)
# + [markdown] id="IVzg8_NisEX6" colab_type="text"
# ### Using the surrogate model to learn the decision boundaries learned by the base estimator
# - Reports the fidelity value when compared to the base estimator (closer to 0 is better)
# - Learner uses F1 score as the default metric of choice for classification.
# + id="VzKIMss7sEX9" colab_type="code" colab={}
surrogate_explainer.fit(X_train, y_train, use_oracle=True, prune='pre', scorer_type='f1')
# + [markdown] id="hipIZMaXsEYN" colab_type="text"
# ### Taking a look at the position for each feature
# - We do this since the feature names in the surrogate tree are not displayed (but are present in the model)
# + id="SDOP1wedsEYQ" colab_type="code" colab={}
pd.DataFrame([('X'+str(idx), feature) for (idx, feature) in enumerate(data.columns)]).T
# + [markdown] id="Z8NQEr-tsEYg" colab_type="text"
# ### Visualizing the Surrogate Tree
# + id="_KxukhPWsEYl" colab_type="code" colab={}
from skater.util.dataops import show_in_notebook
from graphviz import Source
from IPython.display import SVG
graph = Source(surrogate_explainer.plot_global_decisions(colors=['coral', 'darkturquoise'],
file_name='test_tree_pre.png').to_string())
svg_data = graph.pipe(format='svg')
with open('dtree_structure.svg','wb') as f:
f.write(svg_data)
SVG(svg_data)
# + [markdown] id="J24eN5iosEY0" colab_type="text"
# ### Interesting rules from the surrogate tree
#
# Here are some interesting rules you can observe from the above tree
# - If `Relationship` < 0.5 (means 0) and `Education-num` <= 9.5 and `Capital Gain` <= 4225 __→__ 70% chance of person making <= \$50K
# - If `Relationship` < 0.5 (means 0) and `Education-num` <= 9.5 and `Capital Gain` >= 4225 __→__ 94.5% chance of person making > \$50K
# - If `Relationship` < 0.5 (means 0) and `Education-num` >= 9.5 and `Educatin-num` is also >= 12.5 __→__ 94.7% chance of person making > \$50K
#
# Feel free to derive more interesting rules from this and also your own models! Let's look at how our surrogate model performs on the test dataset now
# + [markdown] id="yipp4hdUsEY2" colab_type="text"
# ### Surrogate Model Performance Evaluation
#
# Just as expected, the model performance drops a fair bit but still we get an overall F1 score of 83% as compared to our boosted model's score of 87% which is quite good!
# + id="yb9avHVDsEY7" colab_type="code" colab={}
surrogate_predictions = surrogate_explainer.predict(X_test)
class_labels = list(set(labels))
meu.display_model_performance_metrics(true_labels=y_test, predicted_labels=surrogate_predictions, classes=class_labels)
# + [markdown] id="T6SBbN-YsEZG" colab_type="text"
# # Model Interpretation with SHAP
#
# **SHAP (SHapley Additive exPlanations)** is a unified approach to explain the output of any machine learning model. SHAP connects game theory with local explanations, uniting several previous methods and representing the only possible consistent and locally accurate additive feature attribution method based on what they claim! (do check out the [SHAP NIPS paper](http://papers.nips.cc/paper/7062-a-unified-approach-to-interpreting-model-predictions) for details).
#
#
#
# ### Install
#
# SHAP can be installed from [PyPI](https://pypi.org/project/shap)
#
# ```
# pip install shap
# ```
#
# or [conda-forge](https://anaconda.org/conda-forge/shap)
#
# ```
# conda install -c conda-forge shap
# ```
#
# The really awesome aspect about this framework is while SHAP values can explain the output of any machine learning model, for really complex ensemble models it can be slow. But they have developed a high-speed exact algorithm for tree ensemble methods ([Tree SHAP arXiv paper](https://arxiv.org/abs/1802.03888)). Fast C++ implementations are supported for *XGBoost*, *LightGBM*, *CatBoost*, and *scikit-learn* tree models!
#
# * SHAP (SHapley Additive exPlanations) `assigns each feature an importance value for a particular prediction.` Its novel components include: the identification of a new class of additive feature importance measures, and theoretical results showing there is a unique solution in this class with a set of desirable properties.
#
# * Typically, `SHAP values try to explain the output of a model (function) as a **sum of the effects of each feature** being introduced into a conditional expectation.` Importantly, for non-linear functions the order in which features are introduced matters. The SHAP values result from averaging over all possible orderings. Proofs from game theory show this is the only possible consistent approach.
#
# * An intuitive way to understand the Shapley value is the following: `The feature values enter a room in random order.` All feature values in the room participate in the `game (= contribute to the prediction).` The Shapley value __$ϕ_{ij}$__ is the average marginal contribution of feature value __$x_{ij}$__ by joining whatever features already entered the room before, i.e.
#
# $$\phi_{ij}=\sum_{\text{All.orderings}}val(\{\text{features.before.j}\}\cup{}x_{ij})-val(\{\text{features.before.j}\})$$
#
# The following figure from the KDD 18 paper, [_Consistent Individualized Feature Attribution for Tree Ensembles_](https://arxiv.org/pdf/1802.03888.pdf) summarizes this in a nice way!
#
# 
#
# Let's now dive into SHAP and leverage it for interpreting our model!
# + [markdown] id="rSx_Rq6PsEZK" colab_type="text"
# ## Explain predictions with SHAP
#
# Here we use the Tree SHAP implementation integrated into XGBoost to explain the test dataset! Remember that there are a variety of explainer methods based on the type of models you are building. We estimate the SHAP values for a set of samples (test data)
# + id="mRGA062UsEZN" colab_type="code" colab={}
explainer = shap.TreeExplainer(xgc)
shap_values = explainer.shap_values(X_test)
# + id="Xz_Ui4JGsEZg" colab_type="code" colab={}
pd.DataFrame(shap_values).head()
# + [markdown] id="-7yXhNrtsEZu" colab_type="text"
# This returns a matrix of SHAP values (`# samples x # features`). Each row sums to the difference between the model output for that sample and the expected value of the model output (which is stored as `expected_value` attribute of the explainer). Typically this difference helps us in explaining why the model is inclined on predicting a specific class outcome.
# + id="mX8ey_T5sEZx" colab_type="code" colab={}
print('Expected Value:', explainer.expected_value)
# + [markdown] id="rB9cpcjksEZ-" colab_type="text"
# ### Predicting when a person's income <= \$50K
#
# * SHAP gives a nice reasoning below showing which features were the most influential in the model taking the
# correct decision of predicting the person's income as below \$50K. `The below explanation shows features each contributing to push the model output from the base value (the average model output over the training dataset we passed) to the actual model output. Features pushing the prediction higher are shown` in **red**, `those pushing the prediction lower are in` **blue**.
# + id="Ft-6h308sEaB" colab_type="code" colab={}
shap.force_plot(explainer.expected_value, shap_values[0,:], X_test_disp.iloc[0,:])
# + [markdown] id="HdIaL_6jsEaO" colab_type="text"
# ### Predicting when a person's income > \$50K
#
# Similarly, SHAP gives a nice reasoning below showing which features were the most influential in the model taking the correct decision of predicting the person's income as greater than \$50K.
# + id="I-cZEIX1sEaS" colab_type="code" colab={}
shap.force_plot(explainer.expected_value, shap_values[2,:], X_test_disp.iloc[2,:])
# + [markdown] id="xEEPpXi-sEaf" colab_type="text"
# ## Visualizing and explaining multiple predictions
#
# One of the key advantages of SHAP is it can build beautiful interactive plots which can visualize and explain multiple predictions at once. Here we visualize model prediction decisions for the first 1000 test data samples.
# + id="mXuQWM3tsEag" colab_type="code" colab={}
shap.force_plot(explainer.expected_value, shap_values[:1000,:], X_test_disp.iloc[:1000,:])
# + [markdown] id="Ku12QWS-sEaw" colab_type="text"
# The above visualization can be interacted with in multiple ways. The default visualization shows some interesting model prediction pattern decisions.
#
# - The first 100 test samples all probably __earn more than \$50K__ and they __are married__ or\and have a __good capital gain__ or\and have a __higher education level__!
# - The next 170+ test samples all probably __earn less than or equal to \$50K__ and they __are mostly un-married__ and\or are __very young in age or divorced__!
# - The next 310+ test samples have an inclination towards mostly __earning more than \$50K__ and they are of diverse profiles including married folks, people with different age and education levels and occupation. Most dominant features pushing the model towards making a prediction for higher income is the person being married i.e. __relationship: husband or wife__!
# - The remaining 400+ test samples have an inclination towards mostly __earning less than \$50K__ and they are of diverse profiles however dominant patterns include __relationship: either unmarried or divorced__ and __very young in age__!
#
# Definitely interesting how we can find out patterns which lead to the model making specific decisions and being able to provide explanations for them.
# + [markdown] id="2Kry66g3sEa2" colab_type="text"
# ## Feature Importances with SHAP
#
# This basically takes the average of the SHAP value magnitudes across the dataset and plots it as a simple bar chart.
# + id="s0BbaZq8sEa4" colab_type="code" colab={}
shap.summary_plot(shap_values, X_test, plot_type="bar")
# + [markdown] id="EOf94QSTsEbK" colab_type="text"
# ## SHAP Summary Plot
#
# Besides a typical feature importance bar chart, SHAP also enables us to use a `density scatter plot` of SHAP values for each feature `to identify how much impact each feature has on the model output for individuals in the validation dataset.` Features are sorted by the sum of the SHAP value magnitudes across all samples. `It is interesting to note that the age and marital status feature has more total model impact than the captial gain feature, but for those samples where capital gain matters it has more impact than age or marital status.` In other words, **capital gain** effects a few predictions by a **large amount**, while `age or marital status effects all predictions by a smaller amount.`
#
# Note that when the scatter points don't fit on a line they pile up to show density, and the color of each point represents the feature value of that individual.
# + id="2_i_uSEfsEbO" colab_type="code" colab={}
shap.summary_plot(shap_values, X_test)
# + [markdown] id="G9YJJZoIsEba" colab_type="text"
# ## SHAP Dependence Plots
#
# * **SHAP dependence plots** `show the effect of a single (or two) feature across the whole dataset.` They plot a `feature's value vs. the SHAP value` of that feature across many samples. SHAP dependence plots are similar to partial dependence plots, but account for the interaction effects present in the features, and are only defined in regions of the input space supported by data. The vertical dispersion of SHAP values at a single feature value is driven by interaction effects, and another feature can be chosen for coloring to highlight possible interactions.
#
#
# + [markdown] id="Elz_i4QzsEbd" colab_type="text"
# ### PDP of 'Age' affecting model prediction
#
# Just like we observed before. the middle-aged people have a slightly higher shap value, pushing the model's prediction decisions to say that these individuals make more money as compared to younger or older people
# + id="GeEPDmcHsEbh" colab_type="code" colab={}
shap.dependence_plot(ind='Age', interaction_index='Age',
shap_values=shap_values,
features=X_test,
display_features=X_test_disp)
# + [markdown] id="W22-3yfQsEbs" colab_type="text"
# ### PDP of 'Education-Num' affecting model prediction
#
# Higher education levels have higher shap values, pushing the model's prediction decisions to say that these individuals make more money as compared to people with lower education levels
# + id="SLl3EZSLsEbu" colab_type="code" colab={}
shap.dependence_plot(ind='Education-Num', interaction_index='Education-Num',
shap_values=shap_values,
features=X_test,
display_features=X_test_disp)
# + [markdown] id="WgHHwp5ysEb6" colab_type="text"
# ### PDP of 'Relationship' affecting model prediction
#
# Just like we observed during the model prediction explanations, married people (husband or wife) have a slightly higher shap value, pushing the model's prediction decisions to say that these individuals make more money as compared to other folks!
# + id="kbgjtqgIsEb9" colab_type="code" colab={}
shap.dependence_plot(ind='Relationship', interaction_index='Relationship',
shap_values=shap_values,
features=X_test,
display_features=X_test_disp)
# + [markdown] id="MR5BupcXsEcK" colab_type="text"
# ### PDP of 'Capital Gain' affecting model prediction
#
# * Here typically a Capital Gain of more than \$5K - \$8K leads to a huge spike in the SHAP values making the model push towards prediction decisions to say that these individuals make more money as compared to others!
# + id="cYK_ow7msEcM" colab_type="code" colab={}
shap.dependence_plot(ind='Capital Gain', interaction_index='Capital Gain',
shap_values=shap_values,
features=X_test,
display_features=X_test_disp)
# + [markdown] id="eqxzNFEGsEcc" colab_type="text"
# ### Two-way PDP showing interactions between features 'Age' and 'Capital Gain' and their effect on making more than \$50K
#
# The vertical dispersion of SHAP values at a single feature value is driven by interaction effects, and another feature is chosen for coloring to highlight possible interactions. Here we are trying to see interactions between `Age` and `Capital Gain`and also their effect on the SHAP values which lead to the model predicting if the person will make more money or not, with the help of a two-way partial dependence plot.
#
# Interesting to see higher the higher capital gain and the middle-aged folks (30-50) having the highest chance of making more money!
# + id="nJ_ObX2XsEcd" colab_type="code" colab={}
shap.dependence_plot(ind='Age', interaction_index='Capital Gain',
shap_values=shap_values, features=X_test,
display_features=X_test_disp)
# + [markdown] id="fuO5_CszsEco" colab_type="text"
# ### Two-way PDP showing interactions between features 'Education-Num' and 'Relationship' and their effect on making more than \$50K
#
# Here we are trying to see interactions between `Education-Num` and `Relationship`and also their effect on the SHAP values which lead to the model predicting if the person will make more money or not, with the help of a two-way partial dependence plot.
#
# Interesting to see higher the higher education level and the husband or wife (married) folks having the highest chance of making more money!
# + id="dxG1CXvLsEcq" colab_type="code" colab={}
shap.dependence_plot(ind='Education-Num', interaction_index='Relationship',
shap_values=shap_values, features=X_test,
display_features=X_test_disp)
# + [markdown] id="itRWDXodsEc5" colab_type="text"
# ### Two-way PDP showing interactions between features 'Marital Status' and 'Relationship' and their effect on making more than \$50K
#
# Here we are trying to see interactions between `Marital Status` and `RElationship`and also their effect on the SHAP values which lead to the model predicting if the person will make more money or not, with the help of a two-way partial dependence plot.
#
# This is interesting because both the features are similar in some context, we can see typically married people with relationship status of either husband or wife having the highest chance of making more money!
# + id="Oj3RcjOGsEc9" colab_type="code" colab={}
shap.dependence_plot(ind='Marital Status', interaction_index='Relationship',
shap_values=shap_values, features=X_test,
display_features=X_test_disp)
# + [markdown] id="TKt69TZ6sEdE" colab_type="text"
# ### Two-way PDP showing interactions between features 'Age' and 'Hours per week' and their effect on making more than \$50K
#
# Here we are trying to see interactions between `Age` and `Hours per week`and also their effect on the SHAP values which lead to the model predicting if the person will make more money or not, with the help of a two-way partial dependence plot.
#
# Nothing extra-ordinary here, middle-aged people working the most make the most money!
# + id="8Mvpy_SnsEdE" colab_type="code" colab={}
shap.dependence_plot(ind='Age', interaction_index='Hours per week',
shap_values=shap_values, features=X_test,
display_features=X_test_disp)
# + [markdown] id="18r5msxHsEdU" colab_type="text"
# # Final Words
#
# If you are reading this, I would like to really commend your efforts on going through this comprehensive tutorial on machine learning model interpretation and encourage you to try out some of these frameworks with your own models and datasets and explore the world of model interpretation!
| lab13_original.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Happy Valentine's Day
# <img src = 'Valentine.jpg' width="400">
# <font color=red>**Let us celebrate Valentine's Day by doing something cool, e.g.,**</font>
# # Lithofacies classification and prediction using support vector machines
# In this exercise, we will train a support vector machine classifier to predict facies using well log measurements. The dataset consists of well logging data from eight wells that have been labeled with a facies type based on oberservation of core, and comes from The University of Kansas on [Neural Networks and Fuzzy Systems](http://www.people.ku.edu/~gbohling/EECS833/), as part of a consortium project to use machine learning techniques to create a reservoir model of the largest gas fields in North America, the Hugoton and Panoma Fields. To learn more about the data set, see [Bohling and Dubois (2003)](http://www.kgs.ku.edu/PRS/publication/2003/ofr2003-50.pdf) and [Dubois et al. (2007)](http://dx.doi.org/10.1016/j.cageo.2006.08.011).
#
# After finishing this exercise, you will understand: <br>
# - How to preprocess your data and get them ready for machine learning; <br>
# - How to split the whole data sets into two parts: training set and validation/test set; <br>
# - How to implement support vector machine classification using Scikit-Learn; <br>
# - the typical steps involved in performing machine learning <br>
# <br>
#
# Author: <NAME> at University of Houston, 02/14/2019
# # 1. Support vector machines (SVM)
# <font color = red>**Task 1:**</font> Write a paragraph that summarizes what you know about SVM. <font color = red>**(15 points)**</font>
#
# **HINT:**
# * What is SVM?
# * How is it different from logistic regression?
# * What is hard margin classification?
# * What is soft margin classification?
# * How does SVM handle nonlinear decision boundaries? <br>
#
# Again, please refer to the lecture slides if you need to refresh your memory.
# (answer to Task 1:)
#
#
#
# First of all, as always, we need to import some of the libraries that we are going to use later.
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
import matplotlib as mpl
import matplotlib.colors as colors
from mpl_toolkits.axes_grid1 import make_axes_locatable
from pandas import set_option
import faciesplot
from faciesplot import make_facies_log_plot
set_option("display.max_rows", 10)
pd.options.mode.chained_assignment = None
# # 2. Import data
# Our data is stored in .csv file. This type of tabular data can be easily read by the pandas.read_csv method. You can actually take a look at the data in this file by openning it using Microsoft Excel. For this lab exercise, we will use the pandas.read_csv method to load the data into a dataframe, which provides a convenient data structure to work with well log data.
data = pd.read_csv('training_data.csv')
# # 3. Get to know your data
# Let us take a look at the first five rows using the DataFrame's head() method.
data.head()
# Another useful way to have a quick overview of the statistical distribution of the data is to call the data.describe().
data.describe()
# Or, you can simply do the following to see what is actually in this set of data.
data
# The data set consists of seven features (five wireline log measurements and two indicator variables) and a facies label at half-foot depth intervals. In machine learning terminology, the set of measurements at each depth interval comprises a **feature vector**, and each **feature vector** is associated with a **label** (the facies type). <br>
#
# From the above table, we observe that we have 3232 feature vectors in the data sets.
#
# The seven predictor variables (or, input variables) are: <br>
# * Five wire line log curves include <br>
# 1\. [gamma ray](http://petrowiki.org/Gamma_ray_logs) (GR), <br>
# 2\. [resistivity logging](http://petrowiki.org/Resistivity_and_spontaneous_%28SP%29_logging) (ILD_log10), <br>
# 3\. [photoelectric effect](http://www.glossary.oilfield.slb.com/en/Terms/p/photoelectric_effect.aspx) (PE), <br>
# 4\. [neutron-density porosity difference](http://petrowiki.org/Neutron_porosity_logs) (DeltaPHI ). <br>
# 5\. [average neutron-density porosity](http://petrowiki.org/Neutron_porosity_logs) (PHIND). <br>
# <br>
# * Two geologic constraining variables: <br>
# 6\. nonmarine-marine indicator (NM_M). <br>
# 7\. relative position (RELPOS). <br>
#
# Note, some wells do not have PE. <br>
#
# The nine discrete facies (classes of rocks) are:
# <img src = "1_facies.PNG">
#
# These facies aren't discrete, and gradually blend into one another. Some have neighboring facies that are rather close. Mislabeling within these neighboring facies can be expected to occur. The following table lists the facies, their abbreviated labels and their approximate neighbors.
# To find out the unique well names, you can use the numpy.unique().
data['Well Name'].unique()
# To look at the distribution of different facies, please run the code in the following cell.
# +
#count the number of unique entries for each facies, sort them by facies number (instead of by number of entries)
facies_counts = data['Facies'].value_counts().sort_index()
#use facies labels to index each count
# 1=sandstone 2=c_siltstone 3=f_siltstone
# 4=marine_silt_shale 5=mudstone 6=wackestone 7=dolomite
# 8=packstone 9=bafflestone
facies_colors = ['#F4D03F', '#F5B041','#DC7633','#6E2C00','#1B4F72','#2E86C1', '#AED6F1', '#A569BD', '#196F3D']
facies_labels_unique = ['SS', 'CSiS', 'FSiS', 'SiSh', 'MS','WS', 'D','PS', 'BS']
facies_counts.index = facies_labels_unique
facies_counts.plot(kind='bar',color=facies_colors,
title='Distribution of Training Data by Facies')
plt.show()
facies_counts
# -
# This shows the distribution of examples by facies for the 3232 training examples in the training set. Dolomite (facies 7) has the fewest with 98 examples. There are also only 161 bafflestone examples. Depending on the performance of the classifier we are going to train, we may consider getting more examples of these facies.
# Let us take a look at the well log measurements and the lithofacies from the well 'SHRIMPLIN'.
make_facies_log_plot(data[data['Well Name'] == 'SHRIMPLIN'], facies_colors)
plt.show()
# <font color = red>**Task 2:**</font> Creat a similar plot that shows the well log measurements and the facies from well 'NEWBY'. <font color = red>**(5 points)**</font>
#
#
# # 4 Split data into training and test set
# We will remove one well from the data that we just explored above, for the purpose of testing the performance of our classifier. The next line of code removes the data from well '<font color = red>**SHANKLE**</font>' and put it aside for testing purpose. That is, after we finish training our classifier and fine-tuning all the hyperparameters, we will use this set of data to test our support vector machine classifier.
test_data = data[data['Well Name'] == 'SHANKLE']
# We will use all the other remaining data as our training and cross-valiation data.
training_data = data[data['Well Name']!='SHANKLE']
# Remember that our training data set consists of two parts: feature vectors and labels. We now need to separate them.
features = ['GR','ILD_log10','DeltaPHI','PHIND','PE','NM_M','RELPOS']
input_feature_vectors = training_data[features] # feature vectors in the training data set
output_facies_labels = training_data['Facies'] # labels in the training data set
# Crossplots are a familiar tool in the geosciences to visualize how two properties vary with rock type. This dataset contains 5 log variables, and scatter matrix can help to quickly visualize the variation between the all the variables in the dataset. We can employ the very useful [Seaborn library](https://stanford.edu/~mwaskom/software/seaborn/) to quickly create a nice looking scatter matrix. Each panel in the plot shows the relationship between two of the variables on the x and y axis, with a stacked bar plot showing the distribution of each type of measurement along the diagonal.
sns.pairplot(input_feature_vectors[['GR','ILD_log10','DeltaPHI','PHIND','PE']])
plt.show()
# It is not clear from these crossplots what relationships exist between the measurements and facies labels. This is where machine learning will prove useful.
# # 5. Preprocessing data
# Many machine-learning algorithms assume the feature data are normally distributed (i.e., Gaussian with zero mean and unit
# variance). The above figure shows us that this is not the case with our training data. We will condition, or standardize, the training data so that it has this property. The same factors used to standardize the training set must be applied to any subsequent data set that will be classified. Scikit-learn includes a handy StandardScalar class that can be applied to the training set and later used to standardize any input data. <br>
#
# Recall that you already implemented the scaling, or standardizing, procedure in the **lab exercise** on <font color=red>**logistic regression**</font>.
# <font color = red>**Task 3:**</font> Import <font color=blue>**StandardScaler**</font> from **Scikit-learn**. <font color = red>**(10 points)**</font>
#
# **HINT**: If you forget how to do it, please refer back to your lab exercise on logitic regression.
# <font color = red>**Task 4:**</font> Perform scaling using the imported <font color=blue>**StandardScaler**</font>, and assign the final scaled data to a new variable <font color=blue>**scaled_features**</font>. <font color = red>**(15 points)**</font>
#
# **HINT**: If you forget how to do it, please refer back to your lab exercise on logitic regression. Remember that we are only scaling the input feature vectors (i.e., input_feature_vectors), NOT the labels.
# Next, let us create a new variable **X_train** for the input variables (i.e., feature vectors) from the training data. Similarly, we will also create a new varaible **y_train** for the output variables (i.e., our labels).
X_train = scaled_features
y_train = output_facies_labels
# # 6. Train a SVM classifier
# Now, we are ready to train a SVM classifier.
# <font color = red>**Task 5:**</font> Import support vector machine classifier from Scikit-learn. <font color = red>**(10 points)**</font>
#
# **HINT**: If you forget how to do it, please refer back to my lecture slides (the last page), or the example notebook in this folder: SVM_Example2_Moon.ipynb
# <font color = red>**Task 6**</font>: Assign the SVC method to a new variable <font color=blue>*svm_clf*</font>. <font color = red>**(10 points)**</font>
#
# **HINT**: Please refer back to SVM_Example2_Moon.ipynb. Note that, in SVM_Example2_Moon.ipynb, I set kernel = 'poly', degree = 3, coef0 = 1, C = 5. However, for this task, please use the folowing hyperparameters: **kernel = 'rbf', C = 10, gamma = 1** instead.
# <font color = red>**Task 7:**</font> Train a SVM classifier using our training data, i.e., <font color=blue>**X_train**</font> and <font color=blue>**y_train**</font>. <font color = red> **(10 points)**</font>
#
# **HINT:** Again, if you do not know how to do it, please take a look at SVM_Example2_Moon.ipynb. Only one line of code is necessary for this task.
# Congratulations! You just trained your first SVM classifier! <br>
# # 7. Make predictions on test data
# Now, let us test this classifer on the test data set. First, we need to get the test data ready.
y_test = test_data['Facies'] # This is the true labels (i.e., true answeres) from the test well.
test_well_features = test_data.drop(['Facies','Formation','Well Name','Depth'], axis = 1)
X_test = scaler.transform(test_well_features) # This is the data on which we are going to make predictions using the SVM classifier that you just trained.
# <font color = red>**Task 8:**</font> Make predictions on test data, and assign the predictions to a new variable <font color=blue>*y_pred*</font>. <font color = red> **(10 points)**</font>
#
# **HINT:** To make predicions, all you need to do is something like <font color=blue>*name_of_classifier.predict(test_data)*</font>.
# # 8. Evaluation
# Because we know the true facies labels in the test data set, we can use these true labels to evaluate how good our predictions are.
from sklearn.metrics import classification_report
print(classification_report(y_test, y_pred, target_names=facies_labels_unique))
# Your results should look very similar to the following:
# <img src = 'ClassificationReport.PNG'>
# Remember that, precision and recall are metrics that tell us how the classifier is performing for individual facies. Precision is the probability that, given a classification result for a sample, the sample actually belongs to that class. Recall is the probability that a sample will be correctly classified for a given class. For example, if the classifier predicts that an interval is fine siltstone (FSiS), there is a 64% probability that the interval is actually fine siltstone (precision). If an interval is actually fine siltstone, there is a 55% probability that it will be correctly classified (recall). The F1 score combines both accuracy and precision to give a single measure of relevancy of the classifier results.
# <font color = red>**Task 9:**</font> Interpret the precision and recall scores for CSiS. <font color = red> **(10 points)**</font>
#
#
# Now, let us plot up the predictions in the log-plot form and compare them with the ground truth
from faciesplot import compare_facies_plot # import the plotting codes
test_data['Prediction'] = y_pred
compare_facies_plot(test_data, 'Prediction',facies_colors)
plt.show()
# Excellent!
# # 9. Applications to SVM to geoscience
# <font color = red>**Task 10:**</font> Do a literature search and look for at least one example where SVM is used to solve some geoscience-related problems. Then, report the source of the information (e.g., URL, DOI, etc.), and summarize the example using a few sentences. <font color = red> **(5 points)**</font>
(answer to Task 10:)
# # Bonus
# In the above example, our prediction precision is 0.49, recall is 0.46, and F1 score is 0.44. Try to improve the prediction. <font color = red> **(10 points)**</font> <br>
# <br>
# For example, in the above example, I asked you to use the following parameters: kernel = 'rbf', C = 10, gamma = 1, when setting up the suppoert vector machine classiifer. Please feel free to try other parameter settings.
# (answer to Bonus)
#
# ## References
#
# 1\. <NAME>., 2015. Seismic Petrophysics: Part 1, *The Leading Edge*, 34 (4). [doi:10.1190/tle34040440.1](http://dx.doi.org/10.1190/tle34040440.1)
#
# 2\. <NAME>., and <NAME>, 2003. An Integrated Application of Neural Network and Markov Chain Techniques to Prediction of Lithofacies from Well Logs, *KGS Open-File Report* 2003-50, 6 pp. [pdf](http://www.kgs.ku.edu/PRS/publication/2003/ofr2003-50.pdf)
#
# 3\.<NAME>., <NAME>, and <NAME>, 2007, Comparison of four approaches to a rock facies classification problem, *Computers & Geosciences*, 33 (5), 599-617 pp. [doi:10.1016/j.cageo.2006.08.011](http://dx.doi.org/10.1016/j.cageo.2006.08.011)
#
# 4\. <NAME>., 2016, Facies Classification Using Machine Learning, The Leading Edge, 35(10). [doi:10.1190/tle35100906.1](http://dx.doi.org/10.1190/tle35100906.1)
| Lab4/Lab4_SVM_LabExercise.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# **EL ENUNCIADO DE ESTE EJERCICIO PIDE RESOLVERLO CON PANDAS**
#
# **SE AGREGA LA RESOLUCIÓN DE SPARKSQL DE TODAS FORMAS**
#
# > https://piazza.com/class_profile/get_resource/jkr2voxi1yw4wt/jkr2vqu7n114zx
#
# El GCPD (Gotham City Police Dept) recolecta la información de casos policiales que acontecen en Ciudad Gótica. Esta información se encuentra guardada en un dataframe con el siguiente formato: (fecha, id_caso, descripcion, estado_caso, categoria, latitud, longitud).
#
# Los posibles estados que puede tener un caso son 1: caso abierto, 2: caso resuelto, 3: cerrado sin resolución.
#
# Las fechas se encuentran en el formato YYYY-MM-DD. Por otro lado el comisionado Gordon guarda un registro detallado sobre en cuáles casos fue activada la batiseñal para
# pedir ayuda del vigilante, Batman. Esta información se encuentra en un Dataframe con el siguiente formato (id_caso, respuesta), siendo campo respuesta si la señal tuvo una respu
# esta positiva (1) o negativa (0) de parte de él.
#
# El sector encargado de las estadísticas oficiales del GCPD quiere con esta información analizar las siguientes situaciones:
#
# - Tasa de resolución de casos de la fuerza policial por categoría de caso (considerando aquellos casos en los que no participó Batman).
#
# ---
# +
# Set-up y vista rápida de las dos bases de datos truchas
import pyspark
spark = pyspark.sql.SparkSession.builder.appName("Batman").getOrCreate()
df_gcpd = spark.read.csv('../data/2018C1_GCPD.csv', header=True)
df_gcpd.createOrReplaceTempView('GCPD')
df_gordon = spark.read.csv('../data/2018C1_gordon.csv', header=True)
df_gordon.createOrReplaceTempView('GORDON')
# +
query = "SELECT GCPD.categoria, (SUM(case when GCPD.estado_caso=2 then 1 else 0 end) / COUNT(GCPD.estado_caso)) as tasa_resolucion \
from GCPD left join GORDON on GCPD.id_caso = GORDON.id_caso \
where GORDON.respuesta=0 or GORDON.respuesta is null \
group by GCPD.categoria"
spark.sql(query).show()
| sparksql/2018C1_1_GCPD.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python (cgvae)
# language: python
# name: cgvae
# ---
# ## Neural Networks
#
# The main purpose of this notebook is to help you understand how the process of backpropagation helps us to train a neural network by tuning the weights to maximise predictive accuracy. Readers should be familiar with the general concept of neural networks before attempting to fill in the notebook.
#
# For a more formal explanation of backpropagation, Bishop's [*Pattern Recognition and Machine Learning*](http://users.isr.ist.utl.pt/~wurmd/Livros/school/Bishop%20-%20Pattern%20Recognition%20And%20Machine%20Learning%20-%20Springer%20%202006.pdf) covers it in detail in section 5.3. I found it to be useful to sit down with a pen and paper to draw the network diagrams and map how the inputs and error move forwards and backwards through the network respectively!
# ## Import Libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
# ## Create Dataset
#
# Use the generative process of a linear model - i.e. a weighted sum of the features plus Gaussian noise
# +
n = 1000 #Number of observations in the training set
p = 5 #Number of parameters, including intercept
beta = np.random.uniform(-10, 10, p) #Randomly initialise true parameters
for i, b in enumerate(beta):
print(f'\u03B2{i}: {round(b, 3)}')
# -
X = np.random.uniform(0,10,(n,(p-1))) #Randomly sample features X1-X4
X0 = np.array([1]*n).reshape((n,1)) #X0 is our intercept so always equal to 1
X = np.concatenate([X0,X], axis = 1) #Join intercept to other variables to form feature matrix
Y = np.matmul(X,beta) + np.random.normal(0,10,n) #Linear combination of the features plus a normal error term
# +
#Concatenate to create dataframe
dataFeatures = pd.DataFrame(X)
dataFeatures.columns = [f'X{i}' for i in range(p)] #Name feature columns
dataTarget = pd.DataFrame(Y)
dataTarget.columns = ['Y'] #Name target
data = pd.concat([dataFeatures, dataTarget], axis = 1)
# -
# ## Quickly visualise the dataset
print(f'Number of Rows: {data.shape[0]}')
print(f'Number of Columns: {data.shape[1]}')
data.head()
# ## Create a neural network
#
# We'll use a single hidden layer and tanh activation function
class NeuralNetwork:
def __init__(self, data, target, features, hiddenSize, trainTestRatio = 0.9):
self.target = target
self.features = features
#Split up data into a training and testing set
self.train, self.test = train_test_split(data, test_size=1-trainTestRatio)
self.input = np.array(self.train[self.features])
self.hiddenSize = hiddenSize
self.weightsInputToHidden = np.random.normal(size = (self.input.shape[1],hiddenSize))
self.weightsHiddenToOutput = np.random.normal(size = (hiddenSize + 1 ,)) #+1 is for the bias term
self.y = np.array(self.train[self.target])
self.output = np.zeros(self.y.shape)
#Standardise training set
self.scaler = StandardScaler()
self.scaler.fit(self.input)
self.input = self.scaler.transform(self.input)
#Pre-allocate weight derivatives
self.dWeightsInputToHidden = np.ones(self.weightsInputToHidden.shape)
self.dWeightsHiddenToOutput = np.ones(self.weightsHiddenToOutput.shape)
def feedforward(self):
#Compute hidden activations, a, and transform them with tanh activation
self.a = #...
self.z = #...
#Add bias term onto z for the next layer of the network
#Code goes here...
self.z = self.zWithBias
#Compute Output
def backpropagation(self):
normFactor = 1/self.input.shape[0] #Normalising factor for the derivatives
#Compute Deltas
#self.deltaOutput and self.deltaHidden
#Compute Weight derivatives:
self.dWeightsInputToHidden = #...Make sure dimensions match up with weight matrix
self.dWeightsHiddenToOutput =#...
def trainNetwork(self, lr = 0.001, numEpochs = 100):
#Train by feeding the data through the network and then backpropagating error a set number (numEpochs) of times
#Apply gradient descent to update the weights
#Stop training early if the gradients vanish
ep = 0
while ep < numEpochs and (np.linalg.norm(self.dWeightsInputToHidden) + np.linalg.norm(self.dWeightsHiddenToOutput)) > 0.5:
#feedforward and backpropagate
#Update weights
#update ep
print('Training completed')
def predict(self, x):
#Works in the same way as feedforward:
pass
dataInput = np.array(data[['X0', 'X1', 'X2', 'X3', 'X4']])
dataOutput = np.array(data['Y'])
myNN = NeuralNetwork(data, 'Y', ['X0', 'X1', 'X2', 'X3', 'X4'], 3)
myNN.feedforward()
myNN.trainNetwork(lr= 0.001, numEpochs=200000)
# ## Let's see how our model performs
#
# Lets predict the labels of the held out test set and plot them against the true values
predTest = myNN.predict(myNN.test[myNN.features])
# If the poins gather around the line y = x then our model is performing as desired
plt.scatter(myNN.test[myNN.target], predTest)
plt.plot(np.arange(-100,100), np.arange(-100,100))
plt.xlabel('True Label')
plt.ylabel('Predicted Label (Neural Network)')
plt.show()
# # Summary
#
# In reality, the dataset we used to fit our neural network was not a particularly challenging one due to the relationships between variables being linear - you can check to see that using a linear regression on this dataset would give comparable predictive accuracy.
#
# The value in neural networks, though, lies in their ability to approximate more complex relationships by adding more hidden layers to the model architecture. We will see in the notebooks on 'deeper' neural networks (to be published soon, hopefully!) that such models outperform simpler models in terms of predictive accuracy - now that we've got the hang of backpropagation in the limited case presented in this notebook, it's not a big mental step to extend it to a neural network with any number of hidden layers.
| Implementations/neuralNetwork/.ipynb_checkpoints/Neural Network - Regression - Redacted-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import scipy.io as sio
from scipy.misc import imread
import tensorflow as tf
import numpy as np
import pandas as pd
import sys
import os
import scipy.io
from find_largest_image import find_largest
import tqdm
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
from xgboost import XGBClassifier
import random
from numpy.random import choice
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import accuracy_score, roc_curve
import sklearn.pipeline as pipeline
import sklearn.preprocessing as preprocessing
from sklearn.model_selection import train_test_split
data_folder = 'C:\\Users\\Mert\\Documents\\GitHub\\sigver_bmg\\data\\downloaded_pp_features\\gpds_signet_all'
user_kernel='linear'
data_f = pd.read_csv(os.path.join(data_folder,'data_features.csv'))
visual_f = pd.read_csv(os.path.join(data_folder,'visual_features.csv'))
# # MODEL SELECTION & TRAINING
fakes_preds = []
gens_preds = []
sorted_id_list = np.sort(data_f['user_id'].unique())
dev_val_user_ids = sorted_id_list[300:]
validation_user_ids = dev_val_user_ids[0:50]
dev_user_ids = dev_val_user_ids[50:]
for iteration in np.arange(0,10):
train_idx, test_idx = train_test_split(np.arange(1,25), train_size=0.5, test_size=0.5)
dev_df = data_f.loc[data_f['user_id'].isin(dev_user_ids)]
dev_vf = visual_f.loc[dev_df.index]
val_df = data_f.loc[data_f['user_id'].isin(validation_user_ids)]
val_vf = visual_f.loc[val_df.index]
dev_df_gen = dev_df.loc[dev_df['fakeness']==0]
dev_df_fake = dev_df.loc[dev_df['fakeness']==1]
dev_df_gen_12 = dev_df_gen.loc[dev_df_gen['sig_id'].isin(train_idx)]
dev_df_valid_12 = dev_df_gen.loc[dev_df_gen['sig_id'].isin(test_idx)]
val_df_gen = val_df.loc[val_df['fakeness']==0]
val_df_fake = val_df.loc[val_df['fakeness']==1]
val_df_gen_12 = val_df_gen.loc[val_df_gen['sig_id'].isin(train_idx)]
val_df_valid_gen_12 = val_df_gen.loc[val_df_gen['sig_id'].isin(test_idx)]
for user_id in tqdm.tqdm(validation_user_ids, ascii=True):
clf = SVC(C=1,gamma='scale',class_weight='balanced', probability=False, kernel=user_kernel)
y_train = (pd.concat([val_df_gen_12.loc[val_df_gen_12['user_id']==user_id],dev_df_gen_12.loc[dev_df_gen_12['user_id']!=user_id]]))['user_id']==user_id
X_train = visual_f.loc[y_train.index]
clf.fit(X_train, y_train)
y_valid_fakes = val_df_fake.loc[(val_df_fake['user_id']==user_id)]
X_valid_f = visual_f.loc[y_valid_fakes.index]
fakes_preds.append(clf.decision_function(X_valid_f))
y_valid_gens = val_df_valid_gen_12.loc[val_df_valid_gen_12['user_id']==user_id]
X_valid_g = visual_f.loc[y_valid_gens.index]
gens_preds.append(clf.decision_function(X_valid_g))
# # GLOBAL THRESHOLD SELECTION
# +
flat_fakes_preds = np.expand_dims(np.array([item for sublist in fakes_preds for item in sublist]),axis=1)
flat_gens_preds = np.expand_dims(np.array([item for sublist in gens_preds for item in sublist]),axis=1)
all_preds = np.vstack((flat_fakes_preds,flat_gens_preds))
all_labels = np.vstack((np.zeros((flat_fakes_preds.shape[0],1)),np.ones((flat_gens_preds.shape[0],1))))
fpr,tpr,threshold = roc_curve(all_labels,all_preds)
fnr = 1 - tpr
EER = fpr[np.nanargmin(np.absolute((fnr - fpr)))]
eer_th = threshold[np.nanargmin(np.absolute((fnr - fpr)))]
print('EER_glob : ', EER*100,'\nEER_Threshold_glob : ', eer_th)
glob_th = eer_th
# +
assert len(fakes_preds)==len(gens_preds)
EER_accum=0
for idx,val in enumerate(fakes_preds):
user_fakes_preds = np.expand_dims(np.array(fakes_preds[idx]),axis=1)
user_gens_preds = np.expand_dims(np.array(gens_preds[idx]),axis=1)
all_user_preds = np.vstack((user_fakes_preds,user_gens_preds))
all_user_labels = np.vstack((np.zeros((user_fakes_preds.shape[0],1)),np.ones((user_gens_preds.shape[0],1))))
fpr,tpr,threshold = roc_curve(all_user_labels,all_user_preds)
fnr = 1 - tpr
EER = fpr[np.nanargmin(np.absolute((fnr - fpr)))]
EER_accum += EER
print('EER_user : ', (EER_accum*100)/len(fakes_preds))
# -
print(glob_th)
# # TRAIN AND TEST ON THE EXPLOITATION SET
# +
test_gens_preds = []
test_fakes_preds = []
exp_user_ids = sorted_id_list[:300]
train_idx, test_idx = train_test_split(np.arange(1,25), train_size=0.5)
exp_df = data_f.loc[data_f['user_id'].isin(exp_user_ids)]
exp_vf = visual_f.loc[exp_df.index]
exp_df_gen = exp_df.loc[exp_df['fakeness']==0]
exp_df_fake = exp_df.loc[exp_df['fakeness']==1]
exp_df_fake_10 = exp_df_fake.loc[exp_df_fake['sig_id'].isin(choice(np.arange(1,31),10,replace=False))]
exp_df_gen_12 = exp_df_gen.loc[exp_df_gen['sig_id'].isin(train_idx)]
exp_df_valid_gen_12 = exp_df_gen.loc[exp_df_gen['sig_id'].isin(test_idx)]
dev_val_df = data_f.loc[data_f['user_id'].isin(dev_val_user_ids)]
dev_val_vf = visual_f.loc[dev_val_df.index]
dev_val_df_gen = dev_val_df.loc[dev_val_df['fakeness']==0]
dev_val_df_fake = dev_val_df.loc[dev_val_df['fakeness']==1]
dev_val_df_valid_gen_14 = dev_val_df_gen.loc[dev_val_df_gen['sig_id'].isin(choice(np.arange(1,25),14,replace=False))]
# -
for user_id in tqdm.tqdm(exp_user_ids, ascii=True):
clf = SVC(C=1,gamma='scale',class_weight='balanced', probability=False, kernel=user_kernel)
y_train = (pd.concat([exp_df_gen_12.loc[exp_df_gen_12['user_id']==user_id],dev_val_df_valid_gen_14.loc[dev_val_df_valid_gen_14['user_id']!=user_id]]))['user_id']==user_id
X_train = visual_f.loc[y_train.index]
clf.fit(X_train, y_train)
y_valid_fakes = exp_df_fake_10.loc[(exp_df_fake_10['user_id']==user_id)]
X_valid_f = visual_f.loc[y_valid_fakes.index]
test_fakes_preds.append(clf.decision_function(X_valid_f))
y_valid_gens = exp_df_valid_gen_12.loc[exp_df_valid_gen_12['user_id']==user_id]
X_valid_g = visual_f.loc[y_valid_gens.index]
test_gens_preds.append(clf.decision_function(X_valid_g))
flat_test_fakes_preds = np.expand_dims(np.array([item for sublist in test_fakes_preds for item in sublist]),axis=1)
flat_test_gens_preds = np.expand_dims(np.array([item for sublist in test_gens_preds for item in sublist]),axis=1)
print("____At the EER threshold decided on the Validation set____")
print("FRR : ",(1-len(flat_test_gens_preds[flat_test_gens_preds>=glob_th])/len(flat_test_gens_preds))*100)
print("FARskilled : ",(1-len(flat_test_fakes_preds[flat_test_fakes_preds<glob_th])/len(flat_test_fakes_preds))*100)
# +
all_test_preds = np.vstack((flat_test_fakes_preds,flat_test_gens_preds))
all_test_labels = np.vstack((np.zeros((flat_test_fakes_preds.shape[0],1)),np.ones((flat_test_gens_preds.shape[0],1))))
fpr,tpr,threshold = roc_curve(all_test_labels,all_test_preds)
fnr = 1 - tpr
EER = fpr[np.nanargmin(np.absolute((fnr - fpr)))]
eer_th = threshold[np.nanargmin(np.absolute((fnr - fpr)))]
print('EER_glob for test set: ', EER*100,'\nEER_Threshold_glob for test set: ', eer_th)
# +
assert len(test_fakes_preds)==len(test_gens_preds)
EER_accum=0
for idx,val in enumerate(test_fakes_preds):
user_test_fakes_preds = np.expand_dims(np.array(test_fakes_preds[idx]),axis=1)
user_test_gens_preds = np.expand_dims(np.array(test_gens_preds[idx]),axis=1)
all_user_test_preds = np.vstack((user_test_fakes_preds,user_test_gens_preds))
all_user_test_labels = np.vstack((np.zeros((user_test_fakes_preds.shape[0],1)),np.ones((user_test_gens_preds.shape[0],1))))
fpr,tpr,threshold = roc_curve(all_user_test_labels,all_user_test_preds)
fnr = 1 - tpr
EER = fpr[np.nanargmin(np.absolute((fnr - fpr)))]
EER_accum += EER
print('EER_user for test set : ', (EER_accum*100)/len(test_fakes_preds))
# -
| pp_10fold_lin_signet_orig_split.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="7_gtFoV8BuRx"
# This is a simple example of SimSwap on processing video with multiple faces. You can change the codes for inference based on our other scripts for image or single face swapping.
#
# Code path: https://github.com/neuralchen/SimSwap
#
# Paper path: https://arxiv.org/pdf/2106.06340v1.pdf or https://dl.acm.org/doi/10.1145/3394171.3413630
# + colab={"base_uri": "https://localhost:8080/"} id="0Y1RfpzsCAl9" outputId="a39470a0-9689-409d-a0a4-e2afd5d3b5dd"
## make sure you are using a runtime with GPU
## you can check at Runtime/Change runtime type in the top bar.
# !nvidia-smi
# + [markdown] id="0Qzzx2UpDkqw"
# ## Installation
#
# All file changes made by this notebook are temporary.
# You can try to mount your own google drive to store files if you want.
#
# + colab={"base_uri": "https://localhost:8080/"} id="VA_4CeWZCHLP" outputId="4b0f176f-87e7-4772-8b47-c2098d8f3bf6"
# !git clone https://github.com/neuralchen/SimSwap
# !cd SimSwap && git pull
# + id="Y5K4au_UCkKn" colab={"base_uri": "https://localhost:8080/"} outputId="9691a7a4-192e-4ec2-c3c1-1f2c933d7b6a"
# !pip install insightface==0.2.1 onnxruntime moviepy
# !pip install googledrivedownloader
# !pip install imageio==2.4.1
# + colab={"base_uri": "https://localhost:8080/"} id="gQ7ZoIbLFCye" outputId="bb35e7e2-14b7-4f36-d62a-499ba041cf64"
import os
os.chdir("SimSwap")
# !ls
# + colab={"base_uri": "https://localhost:8080/"} id="gLti1J0pEFjJ" outputId="e93c3f98-01df-458e-b791-c32f7343e705"
from google_drive_downloader import GoogleDriveDownloader
### it seems that google drive link may not be permenant, you can find this ID from our open url.
GoogleDriveDownloader.download_file_from_google_drive(file_id='1TLNdIufzwesDbyr_nVTR7Zrx9oRHLM_N',
dest_path='./arcface_model/arcface_checkpoint.tar')
GoogleDriveDownloader.download_file_from_google_drive(file_id='1PXkRiBUYbu1xWpQyDEJvGKeqqUFthJcI',
dest_path='./checkpoints.zip')
# !unzip ./checkpoints.zip -d ./checkpoints
# + colab={"base_uri": "https://localhost:8080/"} id="aSRnK5V4HI-k" outputId="e688746c-c33a-485c-808c-54a7370f0c53"
## You can upload filed manually
# from google.colab import drive
# drive.mount('/content/gdrive')
### Now onedrive file can be downloaded in Colab directly!
### If the link blow is not permanent, you can just download it from the
### open url(can be found at [our repo]/doc/guidance/preparation.md) and copy the assigned download link here.
### many thanks to woctezuma for this very useful help
# !wget --no-check-certificate "https://sh23tw.dm.files.1drv.<KEY>" -O antelope.zip
# !unzip ./antelope.zip -d ./insightface_func/models/
# + [markdown] id="BsGmIMxLVxyO"
# ## Inference
# + colab={"base_uri": "https://localhost:8080/"} id="PfSsND36EMvn" outputId="f28c98fd-4c6d-40fa-e3c7-99b606c7492a"
import cv2
import torch
import fractions
import numpy as np
from PIL import Image
import torch.nn.functional as F
from torchvision import transforms
from models.models import create_model
from options.test_options import TestOptions
from insightface_func.face_detect_crop_mutil import Face_detect_crop
from util.videoswap import video_swap
from util.add_watermark import watermark_image
# + id="rxSbZ2EDNDlf"
transformer = transforms.Compose([
transforms.ToTensor(),
#transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
transformer_Arcface = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
detransformer = transforms.Compose([
transforms.Normalize([0, 0, 0], [1/0.229, 1/0.224, 1/0.225]),
transforms.Normalize([-0.485, -0.456, -0.406], [1, 1, 1])
])
# + colab={"base_uri": "https://localhost:8080/"} id="wwJOwR9LNKRz" outputId="bdc82f7b-21c4-403f-94d1-b92911698b4a"
opt = TestOptions()
opt.initialize()
opt.parser.add_argument('-f') ## dummy arg to avoid bug
opt = opt.parse()
opt.pic_a_path = './demo_file/Iron_man.jpg' ## or replace it with image from your own google drive
opt.video_path = './demo_file/mutil_people_1080p.mp4' ## or replace it with video from your own google drive
opt.output_path = './output/demo.mp4'
opt.temp_path = './tmp'
opt.Arc_path = './arcface_model/arcface_checkpoint.tar'
opt.isTrain = False
crop_size = 224
torch.nn.Module.dump_patches = True
model = create_model(opt)
model.eval()
app = Face_detect_crop(name='antelope', root='./insightface_func/models')
app.prepare(ctx_id= 0, det_thresh=0.6, det_size=(640,640))
pic_a = opt.pic_a_path
# img_a = Image.open(pic_a).convert('RGB')
img_a_whole = cv2.imread(pic_a)
img_a_align_crop, _ = app.get(img_a_whole,crop_size)
img_a_align_crop_pil = Image.fromarray(cv2.cvtColor(img_a_align_crop[0],cv2.COLOR_BGR2RGB))
img_a = transformer_Arcface(img_a_align_crop_pil)
img_id = img_a.view(-1, img_a.shape[0], img_a.shape[1], img_a.shape[2])
# convert numpy to tensor
img_id = img_id.cuda()
#create latent id
img_id_downsample = F.interpolate(img_id, scale_factor=0.5)
latend_id = model.netArc(img_id_downsample)
latend_id = latend_id.detach().to('cpu')
latend_id = latend_id/np.linalg.norm(latend_id,axis=1,keepdims=True)
latend_id = latend_id.to('cuda')
video_swap(opt.video_path, latend_id, model, app, opt.output_path,temp_results_dir=opt.temp_path)
# + id="Rty2GsyZZrI6"
| SimSwap colab.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: aipr_dz05
# language: python
# name: aipr_dz05
# ---
# # Analiza i projektiranje računalom - 5. domaća zadaća
# U okviru ove zadaće potrebno je ostvariti metode numeričke integracije po postupku _**Runge-Kutta**_ 4. reda (*u skripti: str. 7-35*), _**trapeznom**_ postupku, _**Eulerovom**_ postupku, _**obrnutom Eulerovom**_ postupku, i _**prediktorsko-korektorskom**_ postupku. Sustav je općenitog oblika $\vec{x} = \vec{A}\vec{x} + \vec{B}\vec{r(t)}$. Program treba (iz datoteka) učitavati matrice linearnog sustava diferencijalnih jednadžbi ($\vec{A}$ i $\vec{B}$) te početno stanje $\vec{x}(t = 0)$.
# Za uporabu trapeznog i obrnutog Eulerovog postupka potrebno je zadani linearni sustav prethodno transformirati u eksplicitni oblik (*skripta 7-24, 25*).
# Potrebno je bez prevođenja programa omogućiti zadavanje **željenog koraka integracije** (T) i **vremenskog intervala** za koji se provodi postupak $\left[ 0, t_{MAX} \right]$. Za prediktorsko-korektorski postupak potrebno je omogućiti da se bez prevođenja programa bilo koji dostupni eksplicitni postupak može koristiti kao prediktor, bilo koji implicitni postupak kao korektor te da je moguće definirati koliko puta će se korektor primijeniti. Program treba riješavati sustave svim sljedećim postupcima:
# - Eulerovim
# - obrnutim Eulerovim
# - trapeznim
# - Runge-Kutta 4. rede
# - $\text{PE(CE)}^2$ (prediktor Euler, korektor obrnuti Euler)
# - $\text{PECE}$ (prediktor Euler, korektor trapezni postupak)
# Prilikom rada potrebno je ispisivati varijable stanja na ekran, no ne u svakoj iteraciji nego svakih nekoliko iteracija (omogućiti da taj broj zadaje korisnik). Osim na ekran, ispis je uputno preusmjeriti i u datoteku. Nakon završetka postupka potrebno je **grafički prikazati kretanje varijabli stanja** za oba postupka izračunavanja (vodoravna os je vrijeme, na uspravnoj su vrijednosti varijabli stanja). Crtanje se može izvesti bilo kojim pomoćnim alatom, npr. čitanjem izračunatih vrijednosti iz datoteke.
# ## Priprema za izvođenje
# +
import os
CD_KEY = "--HW05_IN_ROOT"
# +
if (
CD_KEY not in os.environ
or os.environ[CD_KEY] is None
or len(os.environ[CD_KEY]) == 0
or os.environ[CD_KEY] == "false"
):
# %cd ..
else:
print(os.getcwd())
os.environ[CD_KEY] = "true"
# -
# ## Učitavanje paketa
# +
import warnings
from matplotlib import pyplot as plt
import numpy as np
from src.constants import SUB
from src.integrators.explicit import EulerIntegrator, RungeKutta4Integrator
from src.integrators.implicit import InverseEulerIntegrator, TrapezoidalIntegrator
from src.integrators.predictor_corrector import PredictorCorrectorIntegrator
from src.utils import get_state_over_time_graphs, print_diffs
# -
np.set_printoptions(precision=3, suppress=True)
np.random.seed(21051208)
warnings.filterwarnings('ignore')
# ## Zadatci
# ### Zadatak 1
# Izračunajte ponašanje sljedećeg sustava:
# $$
# \begin{aligned}
# & \vec{x_{k+1}} = \begin{bmatrix}
# 0 & 1 \\
# -1 & 0 \\
# \end{bmatrix} \vec{x_{k}} & &
# \vec{x_0} = \begin{bmatrix}
# 1 \\
# 1 \\
# \end{bmatrix} &
# \end{aligned}
# $$
# s periodom integracije $T = 0.01$ i $t_{MAX} = 10$ za sve zadane postupke. Sustav predstavlja matematičko njihalo, gdje je $x_{0_{0}}$ početni odmak od ravnotežnog položaja, a $x_{0_{1}}$ je početna brzina.
# **Odgovor**
# +
t1_step = 0.01
t1_interval =(0, 10)
t1_a = np.array([
[0, 1],
[-1, 0],
])
t1_b = np.array([
[0, 0],
[0, 0],
])
t1_initial_state = np.array([
[1],
[1],
])
t1_steps_to_print = 100
# +
t1_integrators = (
EulerIntegrator(),
InverseEulerIntegrator(),
TrapezoidalIntegrator(),
RungeKutta4Integrator(),
PredictorCorrectorIntegrator(
predictor=EulerIntegrator(),
corrector=InverseEulerIntegrator(),
),
PredictorCorrectorIntegrator(
predictor=EulerIntegrator(),
corrector=TrapezoidalIntegrator(),
),
)
t1_results = dict()
# -
for integrator in t1_integrators[:-2]:
t1_results[integrator.name] = integrator(
step=t1_step,
interval=t1_interval,
initial_state=t1_initial_state,
time_function=None,
steps_to_print=t1_steps_to_print,
a=t1_a,
b=t1_b
)
for integrator, n_corrector_repeats in zip(t1_integrators[-2:], (2, 1)):
t1_results[integrator.name] = integrator(
step=t1_step,
interval=t1_interval,
initial_state=t1_initial_state,
time_function=None,
steps_to_print=t1_steps_to_print,
a=t1_a,
b=t1_b,
n_corrector_repeats=n_corrector_repeats
)
t1_fig, t1_ax = get_state_over_time_graphs(t1_results)
# Analitičko rješenje sustava je
# $$
# \begin{aligned}
# & \vec{x} = \begin{bmatrix}
# x_{0_{0}}\cos{t} + x_{0_{1}}\sin{t} \\
# x_{0_{1}}\cos{t} - x_{0_{0}}\sin{t} \\
# \end{bmatrix} &
# \end{aligned}
# $$
# Za svaki postupak izračunajte kumulativnu pogrešku koju svaki od postupaka napravi tijekom izvođenja, na način da zbrojite apsolutnu razliku dobivenog i stvarnog rješenja u svakoj točki integracije, a zbroj prikažete na kraju izvođenja programa.
# **Odgovor**
# +
t1_true_solutions = list()
for t in np.arange(t1_interval[0], t1_interval[1] + t1_step, t1_step):
t1_true_solutions.append([
[t1_initial_state[0][0] * np.cos(t) + t1_initial_state[1][0] * np.sin(t)],
[t1_initial_state[1][0] * np.cos(t) - t1_initial_state[0][0] * np.sin(t)]
])
t1_true_solutions = np.array(t1_true_solutions)
# +
t1_diffs = dict()
for key, value in t1_results.items():
t1_diffs[key] = np.zeros(shape=t1_true_solutions[0].shape)
for state, true_solution in zip(value["states"], t1_true_solutions):
t1_diffs[key] += abs(state["x"] - true_solution)
# -
print_diffs(t1_diffs, n_decimals=6)
# Želimo li npr. dobiti sustav s prigušenjem, $\vec{A}_{2, 2}$ treba postaviti na negativnu vrijednost.
# ### Zadatak 2
# Izračunajte ponašanje sljedećeg sustava:
# $$
# \begin{aligned}
# & \vec{x_{k+1}} = \begin{bmatrix}
# 0 & 1 \\
# -200 & -102 \\
# \end{bmatrix} \vec{x_{k}} & &
# \vec{x_0} = \begin{bmatrix}
# 1 \\
# -2 \\
# \end{bmatrix} &
# \end{aligned}
# $$
# Sustav predstavlja fizikalno njihalo s prigušenjem (zadatak s predavanja). Isprobajte rješavanje s periodom integracije $T = 0.1$ i $t_{MAX} = 1$ za sve zadane postupke i obratite pažnju na numeričku stabilnost (uz zadane početne uvjete)!
# **Odgovor**
# +
t2_step = 0.1
t2_interval =(0, 1)
t2_a = np.array([
[0, 1],
[-200, -102],
])
t2_b = np.array([
[0, 0],
[0, 0],
])
t2_initial_state = np.array([
[1],
[-2],
])
t2_steps_to_print = 1
# +
t2_integrators = (
EulerIntegrator(),
InverseEulerIntegrator(),
TrapezoidalIntegrator(),
RungeKutta4Integrator(),
PredictorCorrectorIntegrator(
predictor=EulerIntegrator(),
corrector=InverseEulerIntegrator(),
),
PredictorCorrectorIntegrator(
predictor=EulerIntegrator(),
corrector=TrapezoidalIntegrator(),
),
)
t2_results = dict()
# -
for integrator in t2_integrators[:-2]:
t2_results[integrator.name] = integrator(
step=t2_step,
interval=t2_interval,
initial_state=t2_initial_state,
time_function=None,
steps_to_print=t2_steps_to_print,
a=t2_a,
b=t2_b
)
for integrator, n_corrector_repeats in zip(t2_integrators[-2:], (2, 1)):
t2_results[integrator.name] = integrator(
step=t2_step,
interval=t2_interval,
initial_state=t2_initial_state,
time_function=None,
steps_to_print=t2_steps_to_print,
a=t2_a,
b=t2_b,
n_corrector_repeats=n_corrector_repeats
)
t2_fig, t2_ax = get_state_over_time_graphs(t2_results)
# **Komentar**
# Vidimo da su ovdje stabilni bili jedino Eulerov, obrnuti Eulerov i trapezni postupak.
# ---
# *Usporedbom rezultata odredite prikladni korak integracije za Runge-Kutta postupak.*
# **Odgovor**
# Čini se da je potreban nešto finiji korak integracije.
# +
t2_step_candidates = [(0.1 / (2 ** x)) for x in range(1, 4)]
t2_rk_integrator = [x for x in t2_integrators if x.name == "RungeKutta4Integrator"][0]
t2_rk_results = dict()
# -
for step in t2_step_candidates:
t2_rk_results[step] = t2_rk_integrator(
step=step,
interval=t2_interval,
initial_state=t2_initial_state,
time_function=None,
steps_to_print=t2_steps_to_print,
a=t2_a,
b=t2_b
)
# +
t2_fig_rk, t2_ax_rk = plt.subplots(
len(t2_step_candidates),
1,
figsize=(16, len(t2_step_candidates * 8))
)
for i, (step, results) in enumerate(t2_rk_results.items()):
x_axis = [x["t"] for x in results["states"]]
y_axis = np.array([x["x"] for x in results["states"]])
y_axis = y_axis.reshape(len(y_axis), -1).T
t2_ax_rk[i].set_title(f"Postupak Runge-Kutta 4. reda (T = {step})")
t2_ax_rk[i].set_xlabel("t")
t2_ax_rk[i].set_ylabel("x")
for index, y in enumerate(y_axis):
t2_ax_rk[i].plot(x_axis, y, label=f"x{str(index).translate(SUB)}")
t2_ax_rk[i].legend()
# -
# **Komentar**
# Vidimo da smo postigli stabilnost već za $T = 0.025$.
# **Odgovor**
# ### Zadatak 3
# Izračunajte ponašanje sljedećeg sustava:
# $$
# \begin{aligned}
# & \vec{x_{k+1}} = \begin{bmatrix}
# 0 & -2 \\
# 1 & -3 \\
# \end{bmatrix} \vec{x_{k}} + \begin{bmatrix}
# 2 & 0 \\
# 0 & 3 \\
# \end{bmatrix} \begin{bmatrix}
# 1 \\
# 1 \\
# \end{bmatrix} & &
# \vec{x_0} = \begin{bmatrix}
# 1 \\
# 3 \\
# \end{bmatrix} &
# \end{aligned}
# $$
# Isprobajte rješavanje s periodom integracije $T = 0.01$ i $t_{MAX} = 10$ za sve zadane postupke.
# **Odgovor**
# +
t3_step = 0.01
t3_interval =(0, 10)
t3_a = np.array([
[0, -2],
[1, -3],
])
t3_b = np.array([
[2, 0],
[0, 3],
])
t3_time_function = lambda t: np.array([
[1],
[1],
])
t3_initial_state = np.array([
[1],
[3],
])
t3_steps_to_print = 100
# +
t3_integrators = (
EulerIntegrator(),
InverseEulerIntegrator(),
TrapezoidalIntegrator(),
RungeKutta4Integrator(),
PredictorCorrectorIntegrator(
predictor=EulerIntegrator(),
corrector=InverseEulerIntegrator(),
),
PredictorCorrectorIntegrator(
predictor=EulerIntegrator(),
corrector=TrapezoidalIntegrator(),
),
)
t3_results = dict()
# -
for integrator in t3_integrators[:-2]:
t3_results[integrator.name] = integrator(
step=t3_step,
interval=t3_interval,
initial_state=t3_initial_state,
time_function=t3_time_function,
steps_to_print=t3_steps_to_print,
a=t3_a,
b=t3_b
)
for integrator, n_corrector_repeats in zip(t3_integrators[-2:], (2, 1)):
t3_results[integrator.name] = integrator(
step=t3_step,
interval=t3_interval,
initial_state=t3_initial_state,
time_function=t3_time_function,
steps_to_print=t3_steps_to_print,
a=t3_a,
b=t3_b,
n_corrector_repeats=n_corrector_repeats
)
t3_fig, t3_ax = get_state_over_time_graphs(t3_results)
# ### Zadatak 4
# Izračunajte ponašanje sljedećeg sustava:
# $$
# \begin{aligned}
# & \vec{x_{k+1}} = \begin{bmatrix}
# 1 & -5 \\
# 1 & -7 \\
# \end{bmatrix} \vec{x_{k}} + \begin{bmatrix}
# 5 & 0 \\
# 0 & 3 \\
# \end{bmatrix} \begin{bmatrix}
# t \\
# t \\
# \end{bmatrix} & &
# \vec{x_0} = \begin{bmatrix}
# -1 \\
# 3 \\
# \end{bmatrix} &
# \end{aligned}
# $$
# Isprobajte rješavanje s periodom integracije $T = 0.01$ i $t_{MAX} = 10$ za sve zadane postupke.
# **Odgovor**
# +
t4_step = 0.01
t4_interval =(0, 1)
t4_a = np.array([
[1, -5],
[1, -7],
])
t4_b = np.array([
[5, 0],
[0, 3],
])
t4_time_function = lambda t: np.array([
[t],
[t],
])
t4_initial_state = np.array([
[-1],
[3],
])
t4_steps_to_print = 100
# +
t4_integrators = (
EulerIntegrator(),
InverseEulerIntegrator(),
TrapezoidalIntegrator(),
RungeKutta4Integrator(),
PredictorCorrectorIntegrator(
predictor=EulerIntegrator(),
corrector=InverseEulerIntegrator(),
),
PredictorCorrectorIntegrator(
predictor=EulerIntegrator(),
corrector=TrapezoidalIntegrator(),
),
)
t4_results = dict()
# -
for integrator in t4_integrators[:-2]:
t4_results[integrator.name] = integrator(
step=t4_step,
interval=t4_interval,
initial_state=t4_initial_state,
time_function=t4_time_function,
steps_to_print=t4_steps_to_print,
a=t4_a,
b=t4_b
)
for integrator, n_corrector_repeats in zip(t4_integrators[-2:], (2, 1)):
t4_results[integrator.name] = integrator(
step=t4_step,
interval=t4_interval,
initial_state=t4_initial_state,
time_function=t4_time_function,
steps_to_print=t4_steps_to_print,
a=t4_a,
b=t4_b,
n_corrector_repeats=n_corrector_repeats
)
t4_fig, t4_ax = get_state_over_time_graphs(t4_results)
| dz/dz-05/demo/demo-01_hw-05.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="MJyTf0SFP222" colab_type="text"
# Part of https://github.com/htdt/cartpole-solved
# + id="6Dl1Vqz_uZ1Y" colab_type="code" colab={}
# !pip install torch_nightly -f https://download.pytorch.org/whl/nightly/cpu/torch_nightly.html
# !pip install -q gym ipdb
# + id="fZg7tVEP6sGj" colab_type="code" colab={}
from collections import deque
import gym, random, math, ipdb
from tqdm import trange
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
gym.logger.set_level(40)
# + id="04xWUEGXFoS1" colab_type="code" colab={}
import matplotlib.pyplot as plt
# %matplotlib inline
# + id="hugvEbVZ16wR" colab_type="code" colab={}
class DQN(nn.Module):
input_dim = 4
output_dim = 2
hidden = 64
def __init__(self):
super(DQN, self).__init__()
self.features = nn.Sequential(
nn.Linear(self.input_dim, self.hidden),
nn.ReLU(),
nn.Linear(self.hidden, self.hidden),
nn.ReLU())
self.adv = nn.Linear(self.hidden, self.output_dim)
self.val = nn.Linear(self.hidden, 1)
def forward(self, x):
x = self.features(x)
adv = self.adv(x)
val = self.val(x)
return val + adv - adv.mean(1, keepdim=True)
# + id="6EKVMKR__LpT" colab_type="code" colab={}
class Agent:
output_dim = 2
lr = 1e-3
batch_size = 32
gamma = 0.99
def __init__(self):
self.online_net = DQN()
self.online_net.train()
self.target_net = DQN()
self.update_target_net()
for param in self.target_net.parameters(): param.requires_grad = False
self.optimiser = optim.Adam(self.online_net.parameters(), lr=self.lr)
def act(self, state):
state = torch.FloatTensor(state).unsqueeze(0)
with torch.no_grad():
return self.online_net(state).argmax(1).item()
def act_e_greedy(self, state, epsilon=0.01):
if random.random() < epsilon:
return random.randrange(self.output_dim)
else:
return self.act(state)
def _sample_batch(self, buffer):
s, a, r, ns, t = zip(*random.sample(buffer, self.batch_size))
f, l = torch.FloatTensor, torch.LongTensor
return f(s), l(a), f(r), f(ns), f(t)
def train_iter(self, buffer):
state, action, reward, next_state, terminal = self._sample_batch(buffer)
q_value = self.online_net(state)[range(self.batch_size), action]
with torch.no_grad():
next_state_action = self.online_net(next_state).max(1)[1] # online_net to get action for next_state
next_qv = self.target_net(next_state)[range(self.batch_size), next_state_action] # target_net to get Q
target_qv = reward + self.gamma * (1 - terminal) * next_qv
loss = (q_value - target_qv).pow(2).mean()
self.optimiser.zero_grad()
loss.backward()
self.optimiser.step()
return loss.item()
def update_target_net(self):
self.target_net.load_state_dict(self.online_net.state_dict())
# + id="2Urg4wcU-pof" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 354} outputId="c0b3dc92-bf89-490d-8790-ee21d9658c91"
buffer = deque(maxlen=5000)
env = gym.make("CartPole-v0")
agent = Agent()
rewards, loss = [], []
episode_reward = 0
state = env.reset()
for i in trange(10000):
eps = .01 + .99 * math.exp(-i / 500) # close to .01 at 3500
action = agent.act_e_greedy(state, eps)
next_state, reward, done, _ = env.step(action)
buffer.append((state, action, reward, next_state, done))
state = next_state
episode_reward += reward
if done:
state = env.reset()
rewards.append(episode_reward)
episode_reward = 0
if i > 100:
loss.append(agent.train_iter(buffer))
if i > 100 and i % 100 == 0:
agent.update_target_net()
plt.figure(figsize=(20,5))
plt.subplot(131)
plt.plot(rewards)
plt.subplot(132)
plt.plot(loss)
# + id="-FXQYSSh67Iw" colab_type="code" colab={}
| dqn.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/DR1CH/OOP-58002/blob/main/Prelim_Exam_in_OOP.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="AoZHfTTYS3a5"
# Prelim Exam in OOP
# + [markdown] id="P3QG6pzbS-sg"
# * Write a Python to display your full name, student number, age, and course
# * Create a class named Student with attributes: Name, Student_No, Age, School, and Course
# * Create an object name Myself and assign an instance for each attribute.
# * Create a method Self () using an instantiation of a class.
# * Insert your GitHub link "Prelim Exam" from your repository named "OOP 58002"
#
#
# + colab={"base_uri": "https://localhost:8080/"} id="F6kt2uouTgTd" outputId="475d3b84-6dfd-4177-e4ba-da2c2e60886b"
class Person:
def __init__(self,student,number,age,school,course):
self.student = student
self.number = number
self.age = age
self.school = school
self.course = course
def myFunction(self):
print("Hi my name is",self.student,)
print("years of age:",self.age,)
print("my student number is",self.number)
print("freshmen student in", self.school)
print("and currently taking ", self.course)
p1= Person("<NAME>.", 202117685, 19, "Adamson University", "Bachelor of Science in Computer Engineering")
p1.myFunction()
| Prelim_Exam_in_OOP.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="8nKdL3Crw81q"
# **Remember to change runtime type to GPU.**
#
# Runtime > Change runtime type > GPU
# + [markdown] id="IYCkiK58apNm"
# **Clone git repo**
# + colab={"base_uri": "https://localhost:8080/"} id="RGRkm-Hkn6Tl" executionInfo={"status": "ok", "timestamp": 1620635105101, "user_tz": -480, "elapsed": 8946, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "03685829748003472020"}} outputId="f1b017be-57c8-47ad-beb2-b670a991581c"
# !git clone https://github.com/wein98/webapp-first-order-motion.git
# + colab={"base_uri": "https://localhost:8080/"} id="bSB2YeZEoCiJ" executionInfo={"status": "ok", "timestamp": 1620635107572, "user_tz": -480, "elapsed": 802, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "03685829748003472020"}} outputId="7f4d3467-e500-43fc-acf3-f2dd0a2283d1"
# cd webapp-first-order-motion/first-order-motion
# + [markdown] id="R6l5IVMrxbDt"
# **Image and video input here.**
#
# Upload the your input image and video under /content/webapp-first-order-motion/first-order-motion.
# + [markdown] id="WXNmh9efHHTx"
# **Download first order motion pretrained model.**
# + colab={"base_uri": "https://localhost:8080/"} id="EX7GytT7HDpG" executionInfo={"status": "ok", "timestamp": 1620635134538, "user_tz": -480, "elapsed": 13105, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "03685829748003472020"}} outputId="1f2ba425-a9a7-4a14-b37d-6e8774587850"
# !gdown https://drive.google.com/uc?id=19eg-JkeauMAOlIBJPdIrAzgocAjRWp7T
# + [markdown] id="llamfcYueQow"
# Function to display output in Google Colab
# + id="R2k6gBtMbByc" executionInfo={"status": "ok", "timestamp": 1620636611937, "user_tz": -480, "elapsed": 809, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "03685829748003472020"}}
import imageio
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from skimage.transform import resize
from IPython.display import HTML
import warnings
warnings.filterwarnings("ignore")
def display(image_path, video_path, generated_vid_path):
source_image = imageio.imread(image_path)
reader = imageio.get_reader(video_path)
generateReader = imageio.get_reader(generated_vid_path)
source = resize(source_image, (256, 256))[..., :3]
fps = reader.get_meta_data()['fps']
driving_video = []
try:
for im in reader:
driving_video.append(im)
except RuntimeError:
pass
reader.close()
driving = [resize(frame, (256, 256))[..., :3] for frame in driving_video]
fps = generateReader.get_meta_data()['fps']
generated_video = []
try:
for im in generateReader:
generated_video.append(im)
except RuntimeError:
pass
generateReader.close()
generated = [resize(frame, (256, 256))[..., :3] for frame in generated_video]
fig = plt.figure(figsize=(8 + 4 * (generated is not None), 6))
ims = []
for i in range(len(driving)):
cols = [source]
cols.append(driving[i])
if generated is not None:
cols.append(generated[i])
im = plt.imshow(np.concatenate(cols, axis=1), animated=True)
plt.axis('off')
ims.append([im])
ani = animation.ArtistAnimation(fig, ims, interval=50, repeat_delay=1000)
plt.close()
return ani
# + [markdown] id="e-xoIIeKaBIF"
# ## Run the demo.py with our provided inputs
#
# change the `src_img` and `src_vid` arguments respectively to your uploaded filename to run your own input.
# + colab={"base_uri": "https://localhost:8080/"} id="dRGwEkoKo8Ti" executionInfo={"status": "ok", "timestamp": 1620634837817, "user_tz": -480, "elapsed": 97937, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "03685829748003472020"}} outputId="7f22082f-d048-4695-a01c-92c31574ed9f"
# !python demo.py
# + colab={"base_uri": "https://localhost:8080/", "height": 452} id="IyRUdGWKdu8K" executionInfo={"status": "ok", "timestamp": 1620636647561, "user_tz": -480, "elapsed": 33191, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "03685829748003472020"}} outputId="69cce4fb-dda5-44eb-f28e-a40628f544a0"
HTML(display('stage1_image.jpg', "stage1_video.mp4", "generated.mp4").to_html5_video())
# + id="udsezZ51pCrv" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1620635201528, "user_tz": -480, "elapsed": 61055, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "03685829748003472020"}} outputId="908a6254-c136-452c-9412-3c988e762060"
# !python demo.py --src_img obama.jpg --src_vid musk.mp4
| webapp-first-order-motion.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Data Imports
import numpy as np
import pandas as pd
from pandas import Series,DataFrame
# Plot imports
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style('whitegrid')
# %matplotlib inline
# +
from sklearn import linear_model
from sklearn.datasets import load_iris
# Import the data
iris = load_iris()
# Grab features (X) and the Target (Y)
X = iris.data
Y = iris.target
# +
# Grab data
iris_data = DataFrame(X,columns=['Sepal Length','Sepal Width','Petal Length','Petal Width'])
# Grab Target
iris_target = DataFrame(Y,columns=['Species'])
# +
def flower(num):
''' Takes in numerical class, returns flower name'''
if num == 0:
return 'Setosa'
elif num == 1:
return 'Veriscolour'
else:
return 'Virginica'
# Apply
iris_target['Species'] = iris_target['Species'].apply(flower)
# -
# Create a combined Iris DataSet
iris = pd.concat([iris_data,iris_target],axis=1)
from sklearn.model_selection import train_test_split
# Split the data into Trainging and Testing sets
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.4,random_state=3)
# Import testing metrics from SciKit Learn
from sklearn import metrics
# K neighbour classifier
# +
#Import from SciKit Learn
from sklearn.neighbors import KNeighborsClassifier
# We'll first start with k=6
# Import the kNeighbors Classifiers
knn = KNeighborsClassifier(n_neighbors = 6)
# Fit the data
knn.fit(X_train,Y_train)
# Run a prediction
Y_pred = knn.predict(X_test)
# Check Accuracy against the Testing Set
print(metrics.accuracy_score(Y_test,Y_pred))
# +
# Import the kNeighbors Classifiers
knn = KNeighborsClassifier(n_neighbors = 1)
# Fit the data
knn.fit(X_train,Y_train)
# Run a prediction
Y_pred = knn.predict(X_test)
# Check Accuracy against the Testing Set
print (metrics.accuracy_score(Y_test,Y_pred))
# +
# Test k values 1 through 20
k_range = range(1, 21)
# Set an empty list
accuracy = []
# Repeat above process for all k values and append the result
for k in k_range:
knn = KNeighborsClassifier(n_neighbors=k)
knn.fit(X_train, Y_train)
Y_pred = knn.predict(X_test)
accuracy.append(metrics.accuracy_score(Y_test, Y_pred))
# -
plt.plot(k_range, accuracy)
plt.xlabel('K value for for kNN')
plt.ylabel('Testing Accuracy')
| K - Nearest Neighbour Classification.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import lightgbm as lgb
import numpy as np
import pandas as pd
import csv
from hyperopt import fmin
from hyperopt import hp
from hyperopt import tpe
from hyperopt import Trials
from hyperopt import STATUS_OK
from hyperopt.pyll.stochastic import sample
from sklearn.metrics import roc_auc_score
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import StratifiedKFold
from sklearn.preprocessing import LabelBinarizer
import matplotlib.pyplot as plt
from timeit import default_timer as timer
# -
def generate_features(bands=['u','g','i','r','z'], use_stokes=True, use_averages=False):
features = []
base_features = [
'dered',
'petroRad',
'petroR50',
'petroR90',
'petro_R50_R90_ratio',
'petroMag',
]
stokes_features = [
'stokes_q',
'stokes_u',
'stokes_p'
]
average_features = [
'avg_petro_rad',
'avg_petro_R50',
'avg_petro_R90',
'avg_petro_R50_R90_ratio'
]
average_stokes_features = [
'avg_stokes_q',
'avg_stokes_u',
]
valid_colour_indexes = [
'u_g_colour_index',
'g_r_colour_index',
'r_i_colour_index',
'i_z_colour_index',
]
for band in bands:
for base_feature in base_features:
feature = '{}_{}'.format(base_feature, band)
features.append(feature)
if use_stokes:
for stokes_feature in stokes_features:
feature = '{}_{}'.format(stokes_feature, band)
features.append(feature)
for band2 in bands:
feature = '{}_{}_colour_index'.format(band, band2)
if feature in valid_colour_indexes:
petro_feature = 'petro_{}'.format(feature)
features.append(feature)
features.append(petro_feature)
if use_averages:
features.extend(average_features)
if use_stokes:
features.extend(average_stokes_features)
return features
# +
SPIRIAL_GALAXY_TYPE = 0
ELLIPTICAL_GALAXY_TYPE = 1
UNKNOWN_GALAXY_TYPE = 2
features = generate_features()
target_column = 'galaxy_type'
CONFIDENCE_LEVEL = 0.8
# -
input_data = pd.read_csv('data/input.csv')
# +
data = input_data.copy()
combined_spiral = data.spiralclock + data.spiralanticlock + data.edgeon
data['galaxy_type'] = UNKNOWN_GALAXY_TYPE
data['combined_spiral'] = combined_spiral
data.loc[data.debiased_elliptical > CONFIDENCE_LEVEL, 'galaxy_type'] = ELLIPTICAL_GALAXY_TYPE
data.loc[data.debiased_spiral > CONFIDENCE_LEVEL, 'galaxy_type'] = SPIRIAL_GALAXY_TYPE
num_of_elliptical = data[data.galaxy_type == ELLIPTICAL_GALAXY_TYPE].size
num_of_spirial = data[data.galaxy_type == SPIRIAL_GALAXY_TYPE].size
num_of_unknown = data[data.galaxy_type == UNKNOWN_GALAXY_TYPE].size
total_count = data.size
print(num_of_elliptical / total_count)
print(num_of_spirial / total_count)
print(num_of_unknown / total_count)
print(num_of_spirial / (num_of_elliptical + num_of_spirial))
# -
# http://skyserver.sdss.org/dr12/SkyserverWS/ImgCutout/getjpeg?ra=224.5941&dec=-1.09&width=512
from urllib.request import urlopen
from PIL import Image
# +
GZ_IMAGE_SIZE = 424
BASE_CUTOUT_SCALE = 0.008
def download_image(row, image_size=GZ_IMAGE_SIZE, padding_scale=1.0):
petroRad = row['petroRad_r']
ra = row['ra']
dec = row['dec']
scale = BASE_CUTOUT_SCALE * GZ_IMAGE_SIZE/image_size * petroRad * padding_scale
url = f'http://skyserver.sdss.org/dr15/SkyserverWS/ImgCutout/getjpeg?ra={ra}&dec={dec}&width={image_size}&height={image_size}&scale={scale}'
return Image.open(urlopen(url))
# -
img = download_image(data.loc[0])
plt.imshow(img, cmap=plt.get_cmap('gray'))
img = download_image(data.loc[0], image_size=224)
plt.imshow(img, cmap=plt.get_cmap('gray'))
# +
orig_size = 424
small_size = 64
scale = small_size/float(orig_size)
small_img = img.resize((64,64), Image.ANTIALIAS)
plt.imshow(small_img, cmap=plt.get_cmap('gray'))
# -
rand_scale = np.random.uniform(0.9, 1.1)
new_size = int(rand_scale * orig_size)
new_size
resized_img = img.resize((new_size, new_size), Image.ANTIALIAS)
plt.imshow(resized_img, cmap=plt.get_cmap('gray'))
# +
left = (orig_size - 212)/2
top = left
right = (orig_size + 212)/2
bottom = right
cropped_image = img.crop((left, top, right, bottom))
plt.imshow(cropped_image, cmap=plt.get_cmap('gray'))
# +
def crop_dimensions(curr_size, new_size, top_offset=0, left_offset=0):
top = int((curr_size - new_size)/2 + top_offset)
bottom = top + new_size
left = int((curr_size - new_size)/2 + left_offset)
right = left + new_size
return (left, top, right, bottom)
def centre_crop(img):
# return img.crop(crop_dimensions(image_size, crop_size))
(left, top, right, bottom) = crop_dimensions(image_size, crop_size)
return img[left:right,top:bottom,:]
def create_crops(img, size=224):
(width, height) = img.size
imgs = []
imgs.append(img.crop(crop_dimensions(width, size)))
# do the middle third range in the quadrant
max_offset = (width - size)/3
min_offset = max_offset / 2
for idx in range(0,4):
offset = np.random.uniform(min_offset, max_offset, 2)
if idx < 2:
offset[0] = -offset[0]
if idx % 2 == 0:
offset[1] = -offset[1]
cropped_img = img.crop(crop_dimensions(width, size, top_offset=int(offset[0]), left_offset=int(offset[1])))
cropped_img.show()
imgs.append(cropped_img)
return imgs
# -
def plot_images(imgs, size=batch_size):
num_rows = int(np.ceil(size/3.0))
print(num_rows)
figsize_y = 5 * num_rows
fig = plt.figure(figsize=(20,figsize_y))
for idx in range(0, size):
img = imgs[idx]
# make scale between 0 and 1.0 plotting
img_min = img.min()
img_max = img.max()
img = (img - img_min) / (img_max - img_min)
fig.add_subplot(num_rows, 3, idx + 1)
plt.imshow(img, cmap=plt.get_cmap('gray'))
plt.show()
# +
from keras.preprocessing.image import ImageDataGenerator
from keras import backend as K
K.set_image_dim_ordering('tf')
def augment_images(datagen, X_train, y_train):
imgs = X_train.copy()
if not datagen is None:
imgs = apply_augmentation(datagen, X_train, y_train)
result_imgs = np.empty((imgs.shape[0], crop_size, crop_size, 3))
for idx, img in enumerate(imgs):
result_imgs[idx] = centre_crop(img)
plot_images(result_imgs)
return result_imgs
def apply_augmentation(datagen, X_train, y_train):
# Convert to float32 in here
X_train = X_train.astype('float32')
datagen.fit(X_train)
for X_batch, y_batch in datagen.flow(X_train, y_train, shuffle=False, batch_size=batch_size):
return X_batch
# -
# ## Load Image Cutouts
# +
batch_size = 9
image_size = 350
crop_size = 224
padding_scale = float(350)/crop_size
X_train = np.empty((batch_size, image_size, image_size, 3), dtype=int)
y_train = []
for idx in range(0, batch_size):
img = download_image(data.loc[idx], image_size=image_size, padding_scale=padding_scale)
X_train[idx] = np.asarray(img)
y_train.append(data.loc[idx, 'galaxy_type'])
# -
# # Data Augmentation
augmented_imgs = np.empty((8, batch_size, crop_size, crop_size, 3))
augmented_imgs[0] = augment_images(None, X_train, y_train)
# ## Normalise Features
channels = np.moveaxis(X_train, 3, 0)
fill = int(np.mean(channels))
fill
datagen = ImageDataGenerator(featurewise_center=True,
featurewise_std_normalization=True
)
augmented_imgs[1] = augment_images(datagen, X_train, y_train)
# ## Random Rotations
datagen = ImageDataGenerator(
rotation_range=180,fill_mode='constant',cval=fill
)
augmented_imgs[2] = augment_images(datagen, X_train, y_train)
# ## Random Shifts
# +
shift = 0.1
datagen = ImageDataGenerator(
width_shift_range=shift,
height_shift_range=shift,
fill_mode='constant',
cval=fill
)
augmented_imgs[3] = augment_images(datagen, X_train, y_train)
# -
# ## Random Flips
datagen = ImageDataGenerator(horizontal_flip=True,
vertical_flip=True
)
augmented_imgs[4] = augment_images(datagen, X_train, y_train)
# ## Random Scaling
# +
datagen = ImageDataGenerator(rescale=0.1, fill_mode='constant')
augmented_imgs[5] = augment_images(datagen, X_train, y_train)
# -
# ## Samplewise normalisation
datagen = ImageDataGenerator(samplewise_center=True,
samplewise_std_normalization=True
)
augmented_imgs[6] = augment_images(datagen, X_train, y_train)
# ## Multiple Augmentations
# +
shift = 0.1
datagen = ImageDataGenerator(featurewise_center=True,
featurewise_std_normalization=True,
# samplewise_center=True,
# samplewise_std_normalization=True,
# width_shift_range=shift,
# height_shift_range=shift,
horizontal_flip=True,
vertical_flip=True,
fill_mode='constant',
rotation_range=180,
rescale=0.1,
brightness_range=(0.9,1.1),
cval=fill
)
augmented_imgs[7] = augment_images(datagen, X_train, y_train)
# -
for augmentations in np.moveaxis(augmented_imgs, 0, 1):
plot_images(augmentations, size=8)
base_img = augmented_imgs[0]/255
mod_img = augmented_imgs[7]
np.min(mod_img)
| notebooks/JPEG Cutout.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] toc=true
# <h1>Table of Contents<span class="tocSkip"></span></h1>
# <div class="toc"><ul class="toc-item"><li><span><a href="#Linear-Regression" data-toc-modified-id="Linear-Regression-1"><span class="toc-item-num">1 </span>Linear Regression</a></span><ul class="toc-item"><li><ul class="toc-item"><li><span><a href="#The-Normal-Equation" data-toc-modified-id="The-Normal-Equation-1.0.1"><span class="toc-item-num">1.0.1 </span>The Normal Equation</a></span></li></ul></li><li><span><a href="#Gradient-Descent" data-toc-modified-id="Gradient-Descent-1.1"><span class="toc-item-num">1.1 </span>Gradient Descent</a></span><ul class="toc-item"><li><span><a href="#Batch-Gradient-Descent" data-toc-modified-id="Batch-Gradient-Descent-1.1.1"><span class="toc-item-num">1.1.1 </span>Batch Gradient Descent</a></span></li><li><span><a href="#Stochastic-Gradient-Descent" data-toc-modified-id="Stochastic-Gradient-Descent-1.1.2"><span class="toc-item-num">1.1.2 </span>Stochastic Gradient Descent</a></span></li></ul></li><li><span><a href="#Polynomial-Regression" data-toc-modified-id="Polynomial-Regression-1.2"><span class="toc-item-num">1.2 </span>Polynomial Regression</a></span></li><li><span><a href="#Learning-rate" data-toc-modified-id="Learning-rate-1.3"><span class="toc-item-num">1.3 </span>Learning rate</a></span></li><li><span><a href="#Regularized-Linear-Models" data-toc-modified-id="Regularized-Linear-Models-1.4"><span class="toc-item-num">1.4 </span>Regularized Linear Models</a></span><ul class="toc-item"><li><span><a href="#Ridge-Regression" data-toc-modified-id="Ridge-Regression-1.4.1"><span class="toc-item-num">1.4.1 </span>Ridge Regression</a></span></li><li><span><a href="#Lasso-Regression" data-toc-modified-id="Lasso-Regression-1.4.2"><span class="toc-item-num">1.4.2 </span>Lasso Regression</a></span></li><li><span><a href="#Elastic-Net" data-toc-modified-id="Elastic-Net-1.4.3"><span class="toc-item-num">1.4.3 </span>Elastic Net</a></span></li><li><span><a href="#Early-Stopping" data-toc-modified-id="Early-Stopping-1.4.4"><span class="toc-item-num">1.4.4 </span>Early Stopping</a></span></li></ul></li></ul></li><li><span><a href="#Logistic-Regression" data-toc-modified-id="Logistic-Regression-2"><span class="toc-item-num">2 </span>Logistic Regression</a></span></li></ul></div>
# -
# ## Linear Regression
#
# #### The Normal Equation
#
#
# $\hat{\theta} = (X^T X)^{-1} X^T y$
#
# * $\hat{\theta}$ is the value of $\theta$ that minimizes the cost function.
# * $y$ is the vector of target values containing $y^{(1)}$ to $y^{(m)}$.
import numpy as np
import matplotlib.pyplot as plt
X = 2 * np.random.rand(100, 1)
y = 4 + 3 * X + np.random.randn(100, 1)
plt.scatter(X, y)
plt.xlabel("X1")
plt.ylabel("Y")
plt.show()
X_b = np.c_[np.ones((100, 1)), X] # add x0 = 1 to each instance
theta_best = np.linalg.inv(X_b.T.dot(X_b)).dot(X_b.T).dot(y)
# The actual function that we used to generate the data is $y = 4 + 3x_1 +$ Gaussian noise
theta_best
# predictions using $\hat{\theta}$
X_new = np.array([[0], [2]])
X_new_b = np.c_[np.ones((2, 1)), X_new]
y_predict = X_new_b.dot(theta_best)
y_predict
plt.plot(X_new, y_predict, "r-" , label='predictions')
plt.plot(X, y, "b.")
plt.axis([0, 2, 0, 15])
plt.legend()
plt.show()
from sklearn.linear_model import LinearRegression
lin_reg = LinearRegression()
lin_reg.fit(X, y)
lin_reg.intercept_, lin_reg.coef_
# The LinearRegression class is based on the *scipy.linalg.lstsq()* function (the
# name stands for “least squares”), which you could call directly
theta_best_svd, residuals, rank, s = np.linalg.lstsq(X_b, y, rcond=1e-6)
theta_best_svd
# This function computes $\hat{\theta} = X^+ y$ where $X^+$ is the pseudoinverse of $X$ (specifically the
# Moore-Penrose inverse).
#
# You can use **np.linalg.pinv()** to compute the pseudoinverse directly
np.linalg.pinv(X_b).dot(y)
# The pseudoinverse itself is computed using a standard matrix factorization technique called **Singular Value Decomposition** (SVD) that can decompose the training set matrix $X$ into the matrix multiplication of three matrices $U \Sigma^+ V^T$ (see numpy.linalg.svd()). The pseudoinverse is computed as $X^+= V\Sigma^+U^T$. To compute the matrix $\Sigma^+$ , the algorithm takes $\Sigma$ and sets to zero all values smaller than a tiny threshold value, then it replaces all the non-zero values with their inverse, and finally it transposes the resulting matrix. This approach is more efficient than computing the **Normal Equation**, plus it handles edge cases nicely: indeed, the Normal Equation may
# not work if the matrix $X^TX$ is not invertible (i.e., singular), such as if m < n or if some features are redundant, but the pseudoinverse is always defined.
# ### Gradient Descent
# *Gradient Descent* is a very generic optimization algorithm capable of finding optimal solutions to a wide range of problems. The general idea of Gradient Descent is to tweak parameters iteratively in order to minimize a cost function.
#
# Suppose you are lost in the mountains in a dense fog; you can only feel the slope of the ground below your feet. A good strategy to get to the bottom of the valley quickly is to go downhill in the direction of the steepest slope. This is exactly what Gradient Descent does: it measures the local gradient of the error function with regards to the parameter vector $θ$, and it goes in the direction of descending gradient. Once the gradient is zero, you have reached a minimum!
#
# To implement Gradient Descent, you need to compute the gradient of the cost function with regards to each model parameter $\theta_{j}$. In other words, you need to calculate how much the cost function will change if you change $\theta_{j}$ just a little bit. This is called a partial derivative. It is like asking “what is the slope of the mountain under my feet if I face east?” and then asking the same question facing north.
#
# Once you have the gradient vector, which points uphill, just go in the opposite direction to go downhill.
# *Gradient vector of the cost function:*
# $\nabla_{\theta}MSE(\theta) = \dfrac{2}{m} X^T(X\theta - y)$
# #### Batch Gradient Descent
# +
eta = 0.1 # learning rate
n_iterations = 1000
m = 100
theta = np.random.randn(2,1) # random initialization
for iteration in range(n_iterations):
gradients = 2/m * X_b.T.dot(X_b.dot(theta) - y)
theta = theta - eta * gradients
theta
# -
# #### Stochastic Gradient Descent
# +
n_epochs = 50
t0, t1 = 5, 50 # learning schedule hyperparameters
def learning_schedule(t):
return t0 / (t + t1)
theta = np.random.randn(2,1) # random initialization
for epoch in range(n_epochs):
for i in range(m):
random_index = np.random.randint(m)
xi = X_b[random_index:random_index+1]
yi = y[random_index:random_index+1]
gradients = 2 * xi.T.dot(xi.dot(theta) - yi)
eta = learning_schedule(epoch * m + i)
theta = theta - eta * gradients
theta
# +
n_epochs = 50
t0, t1 = 5, 50
def learning_rate(t):
return t0 / (t + t1)
theta = np.random.randn(2, 1)
for epoch in range(n_epochs):
for i in range(m):
random_index = np.random.randint(m)
xi = X_b[random_index:random_index+1]
yi = y[random_index:random_index+1]
gradients = 2 * xi.T.dot(xi.dot(theta) - yi)
eta = learning_schedule(epoch * m + i)
theta = theta - eta * gradients
theta
# +
from sklearn.linear_model import SGDRegressor
sgd_reg = SGDRegressor(max_iter=1000, tol=1e-3, penalty=None, eta0=0.1)
sgd_reg.fit(X, y.ravel())
# -
sgd_reg.intercept_, sgd_reg.coef_
# ### Polynomial Regression
m = 100
X = 6 * np.random.rand(m, 1) - 3
y = 0.5 * X ** 2 + X + 2 + np.random.randn(m, 1)
plt.plot(X , y, 'bo')
# Clearly, a straight line will never fit this data properly. So let’s use Scikit-Learn’s PolynomialFeatures class to transform our training data, adding the square (2nd-degree polynomial) of each feature in the training set as new features
from sklearn.preprocessing import PolynomialFeatures
poly_features = PolynomialFeatures(degree=2, include_bias = False)
X_poly = poly_features.fit_transform(X)
X[0]
X_poly[0]
# X_poly now contains the original feature of X plus the square of this feature.
lin_reg = LinearRegression()
lin_reg.fit(X_poly, y)
lin_reg.intercept_, lin_reg.coef_
# ### Learning rate
# **learning curves**: these are plots of the model’s performance on the training set and the validation set as a function of the training set size (or the training iteration).
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
# +
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import train_test_split
def plot_learning_curves(model, X, y):
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=10)
train_errors, val_errors = [], []
for m in range(1, len(X_train)):
model.fit(X_train[:m], y_train[:m])
y_train_predict = model.predict(X_train[:m])
y_val_predict = model.predict(X_val)
train_errors.append(mean_squared_error(y_train[:m], y_train_predict))
val_errors.append(mean_squared_error(y_val, y_val_predict))
plt.plot(np.sqrt(train_errors), "r-+", linewidth=2, label="train")
plt.plot(np.sqrt(val_errors), "b-", linewidth=3, label="val")
plt.legend(loc="upper right", fontsize=14) # not shown in the book
plt.xlabel("Training set size", fontsize=14) # not shown
plt.ylabel("RMSE", fontsize=14)
# -
lin_reg = LinearRegression()
plot_learning_curves(lin_reg, X, y)
plt.axis([0, 80, 0, 3])
plt.legend()
plt.show()
# +
from sklearn.pipeline import Pipeline
polynomial_regression = Pipeline([
("poly_features", PolynomialFeatures(degree=2, include_bias=False)),
("lin_reg", LinearRegression()),
])
plot_learning_curves(polynomial_regression, X, y)
plt.axis([0, 80, 0, 3]) # not shown
plt.show()
# +
from sklearn.pipeline import Pipeline
polynomial_regression = Pipeline([
("poly_features", PolynomialFeatures(degree=10, include_bias=False)),
("lin_reg", LinearRegression()),
])
plot_learning_curves(polynomial_regression, X, y)
plt.axis([0, 80, 0, 3]) # not shown
plt.show()
# -
# * The error on the training data is much lower than with the Linear Regression model.
# * There is a gap between the curves. This means that the model performs significantly better on the training data than on the validation data, which is the hallmark of an overfitting model. However, if you used a much larger training set, the two curves would continue to get closer.
# ### Regularized Linear Models
# #### Ridge Regression
#
# * half the square of the ℓ2 norm of the weight vector
#
# $J(\mathbf{\theta}) = MSE(\mathbf{\theta})+ \alpha \frac{1}{2} \sum_{i=1}^{n} \theta_{i}^2$
#
# * Closed form solution:
#
# $\mathbf{\hat{\theta} = (X^TX + \alpha A)^{-1}\ \ X^T y}$
#
#
# where $A$: is the $(n + 1) × (n + 1)$ identity matrix except with a $0$ in the top-left cell, corresponding to the bias term
# +
# using closed form solution
from sklearn.linear_model import Ridge
ridge_reg = Ridge(alpha=1, solver='cholesky')
ridge_reg.fit(X, y)
# -
ridge_reg.predict([[1.5]])
# +
# using stochastic Gradient descent
sgd_reg = SGDRegressor(penalty='l2')
sgd_reg.fit(X, y.ravel())
sgd_reg.predict([[1.5]])
# -
# #### Lasso Regression
# $Least Absolute Shrinkage and Selection Operator Regression$
#
# * ℓ1 norm of the weight vector
#
# $J(\mathbf{\theta}) = MSE(\mathbf{\theta})+ \alpha \frac{1}{2} \sum_{i=1}^{n} |\theta_{i}|$
#
#
# An important characteristic of Lasso Regression is that it tends to completely eliminate the weights of the least important features (i.e., set them to zero).
# In other words, Lasso Regression automatically performs feature selection and outputs a sparse model (i.e., with few nonzero feature weights)
from sklearn.linear_model import Lasso
lasso_reg = Lasso(alpha=0.1)
lasso_reg.fit(X, y)
lasso_reg.predict([[1.5]])
# #### Elastic Net
#
#
# $J(\mathbf{\theta}) = MSE(\mathbf{\theta})+ r\alpha \sum_{i=1} ^{n}|\theta_{i}| + \frac{1 - r}{2} \alpha \sum_{i=1} ^{n}\theta^2$
#
#
# The regularization term is a simple mix of both Ridge and Lasso’s regularization terms, and you can control the mix ratio r. When r = 0, Elastic Net is equivalent to Ridge Regression, and when r = 1, it is equivalent to Lasso Regression
#
# It is almost always preferable to have at least a little bit of regularization, so generally you should avoid plain Linear Regression. Ridge is a good default, but if you suspect that only a few features are actually useful, you should prefer Lasso or Elastic Net since they tend to reduce the useless features’ weights down to zero as we have discussed. In general, Elastic Net is preferred over Lasso since Lasso may behave erratically when the number of features is greater than the number of training instances or when several features are strongly correlated.
from sklearn.linear_model import ElasticNet
elastic_net = ElasticNet(alpha=0.1, l1_ratio=0.5)
elastic_net.fit(X, y)
elastic_net.predict([[1.5]])
# #### Early Stopping
from sklearn.base import clone
from sklearn.preprocessing import StandardScaler
# +
np.random.seed(42)
m = 100
X = 6 * np.random.rand(m, 1) - 3
y = 2 + X + 0.5 * X**2 + np.random.randn(m, 1)
X_train, X_val, y_train, y_val = train_test_split(X[:50], y[:50].ravel(), test_size=0.5, random_state=10)
# -
poly_scaler = Pipeline([
("poly_features", PolynomialFeatures(degree=90, include_bias=False)),
("std_scaler", StandardScaler())
])
X_train_poly_scaled = poly_scaler.fit_transform(X_train)
X_val_poly_scaled = poly_scaler.transform(X_val)
sgd_reg = SGDRegressor(max_iter=1, tol=-np.infty, warm_start=True,
penalty=None, learning_rate="constant", eta0=0.0005)
minimum_val_error = float("inf")
best_epoch = None
best_model = None
for epoch in range(1000):
sgd_reg.fit(X_train_poly_scaled, y_train) # continues where it left off
y_val_predict = sgd_reg.predict(X_val_poly_scaled)
val_error = mean_squared_error(y_val, y_val_predict)
if val_error < minimum_val_error:
minimum_val_error = val_error
best_epoch = epoch
best_model = clone(sgd_reg)
print(best_epoch)
print(best_model)
# ## Logistic Regression
#
t = np.linspace(-10, 10, 100)
sig = 1 / (1 + np.exp(-t))
plt.figure(figsize=(9, 3))
plt.plot([-10, 10], [0, 0], "k-")
plt.plot([-10, 10], [0.5, 0.5], "k:")
plt.plot([-10, 10], [1, 1], "k:")
plt.plot([0, 0], [-1.1, 1.1], "k-")
plt.plot(t, sig, "b-", linewidth=2, label=r"$\sigma(t) = \frac{1}{1 + e^{-t}}$")
plt.xlabel("t")
plt.legend(loc="upper left", fontsize=20)
plt.axis([-10, 10, -0.1, 1.1])
plt.show()
# +
from sklearn import datasets
from sklearn.linear_model import LogisticRegression
iris = datasets.load_iris()
X = iris["data"][:, 3:] # petal width
y = (iris["target"] == 2).astype(np.int)
# -
log_reg = LogisticRegression(solver="lbfgs", random_state=42)
log_reg.fit(X, y)
X_new = np.linspace(0, 3, 1000).reshape(-1, 1)
y_proba = log_reg.predict_proba(X_new)
y_proba[0]
plt.plot(X_new, y_proba[:, 1], "g-", linewidth=2, label="Iris virginica")
plt.plot(X_new, y_proba[:, 0], "b--", linewidth=2, label="Not Iris virginica")
# +
X_new = np.linspace(0, 3, 1000).reshape(-1, 1)
y_proba = log_reg.predict_proba(X_new)
decision_boundary = X_new[y_proba[:, 1] >= 0.5][0]
plt.figure(figsize=(8, 3))
plt.plot(X[y==0], y[y==0], "bs")
plt.plot(X[y==1], y[y==1], "g^")
plt.plot([decision_boundary, decision_boundary], [-1, 2], "k:", linewidth=2)
plt.plot(X_new, y_proba[:, 1], "g-", linewidth=2, label="Iris virginica")
plt.plot(X_new, y_proba[:, 0], "b--", linewidth=2, label="Not Iris virginica")
plt.text(decision_boundary+0.02, 0.15, "Decision boundary", fontsize=14, color="k", ha="center")
plt.arrow(decision_boundary, 0.08, -0.3, 0, head_width=0.05, head_length=0.1, fc='b', ec='b')
plt.arrow(decision_boundary, 0.92, 0.3, 0, head_width=0.05, head_length=0.1, fc='g', ec='g')
plt.xlabel("Petal width (cm)", fontsize=14)
plt.ylabel("Probability", fontsize=14)
plt.legend(loc="center left", fontsize=14)
plt.axis([0, 3, -0.02, 1.02])
plt.show()
# -
decision_boundary
log_reg.predict([[1.7], [1.5]])
X = iris["data"][:, (2, 3)] # petal length, petal width
y = iris["target"]
softmax_reg = LogisticRegression(multi_class="multinomial",solver="lbfgs", C=10)
softmax_reg.fit(X, y)
softmax_reg.predict([[5, 2]])
softmax_reg.predict_proba([[5, 2]])
| Chapter 4/linear_models.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %load_ext autoreload
# %autoreload 2
# +
from psm.logger import MockLogger
from gbstrategy import SuccessiveHalvingStrategy
from gbstrategy.core import DemoDriver, ExampleLoss1, Interface, StrategyMachineFactory
logger = MockLogger()
interf = Interface()
loss = ExampleLoss1()
demo = DemoDriver(interf, loss)
testcase_0 = [
{
'trigger': 'ReceiveRandomSearchHyperparams',
'data': {'num_exp':16, 'epoch':2}
}, {
'trigger': 'ReceiveHyperparams',
'data': {
'learning_rate': [0.001,0.01],
}
}, {
'trigger': 'ReceiveTrainingLoss',
'data': {'exp_id' : 'blah',
'epoch' : 4,
'loss_name' : 'blah',
'loss_value': 0.09}
}
]
for t in testcase_0[0:3]:
strategy = SuccessiveHalvingStrategy()
factory = StrategyMachineFactory(strategy, logger, interf)
psm = factory.generate_psm()
data = t['data']
print(t['trigger'])
strategy.trigger(t['trigger'], **data)
del strategy, factory, psm, data
# -
from tqdm import *
import numpy as np
for i in tqdm(range(2000)):
interf.next_time_point()
data = {}
triggers = logger._find_all_triggers()
for t in triggers:
t.aggregate_data(data)
training_loss = data['trainingloss']
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from plot_utils import RGBALabelTransformer
# %matplotlib inline
# +
from IPython import display
import time
NUM_EXPS = 200
cm = RGBALabelTransformer('Set2')
cm.fit(list(range(NUM_EXPS)))
fig = plt.gcf()
fig.set_size_inches((9,6))
plt.xlabel('Epoch (# of runs over entire dataset)', fontsize=18)
plt.xlim(0,15)
plt.ylabel('Training Loss',fontsize=18)
plt.ylim(0.5,1.6)
for idx in range(100):
df = pd.DataFrame(training_loss).iloc[:idx]
df['color_id'] = 0
id2color = {}
color_cnt = 0
for i in range(len(df)):
exp_id = df.exp_id.iloc[i]
if exp_id not in id2color:
id2color[exp_id] = color_cnt
color_cnt += 1
df['color_id'] = df.exp_id.apply(lambda exp_id: id2color[exp_id])
df['logloss'] = np.log10(df.loss_value)
# df.head()
plt.scatter(df.epoch, df.loss_value, c=cm.transform(df.color_id))
display.clear_output(wait=True)
display.display(plt.gcf())
time.sleep(0.001)
# g = sns.lmplot(x="epoch", y="logloss", data=df)
# -
logger.printmocklog()
| jupyter/Demo_SuccessiveHalving.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Organization of the source code of occiput
# - occiput
# - Core
# - Core.py
# - NiftyPy_wrap.py
# - Conversion.py
# - Errors.py
# - Print.py
# - transformations.py
# - Reconstruction
# - PET
# - PET_projection.py Defines
# - PET_raytracer.py
# - PET_scanners.py
# - PET_subsets.py
# - PET_listmode.py
# - PET_span.py
# - PET_meshing.py
# - PET_ilang.py
# - PET.py
# - MR
# - SPECT
# - CT
# - DataSources
# - FileSources
# - Synthetic
# - Registration
# - Classification
# - Transformation
# - Visualization
| tomolab/Examples/doc_organization_of_source_code.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="udoq38uwEis8"
# <NAME>
# Checked working on Windows 10
# Run the notebook to check the working.
# Code takes care of all the errors mentioned and the other cases
# + id="YWC01UIaEqTx"
# Links for reference:
# https://en.wikipedia.org/wiki/Key%E2%80%93value_database
# https://www.w3schools.com/js/js_json_objects.asp
# https://en.wikipedia.org/wiki/Thread_safety
# + id="RhhUC6DXEz05"
import time
import sys
import threading
from threading import *
# + [markdown] id="JZzRIYmWGnTG"
# **SERVER**
# + id="tRTZwHJ1GuC-"
datastore = {}
# + id="dtRQ_aeDG1xt"
def create(key,value,timeout=0): # timeout provided in seconds
if key in datastore:
print("Error :( Key already exists in the datastore.") # error msg
else:
if(key.isalpha()): # string key #1073741824 bytes ==1 Gb
if sys.getsizeof(datastore)<(1073741824) and sys.getsizeof(value)<=(16*1024): # Check file size<=1GB and Json obj size<=16kb
if timeout==0:
l=[value,-1]
else:
l=[value,time.time()+timeout] # adding timeout incase its not zero
if len(key)<=32: # key is max of 32 chars
datastore[key]=l
else:
print("Error :( Memory exceeded") # error msg
else:
print("Error :( Key should have alphabet only") # error msg
# + id="qgVSltJHHAXP"
def delete(key):
if key not in datastore:
print("Error :( Key is not in Datastore") # error msg
else:
list=datastore[key]
if list[1]!=-1: # time to live parameter isnt -1(means its provided by user)
current_time=time.time()
if current_time<list[1]: # Expiry & current time compared
del datastore[key]
print("Success :) key is now been deleted")
else:
print("Error :( time to live off has been expired") # error as time to live has expired so cant delete it
else: # time to live is -1 then just delete the key
del datastore[key]
print("Success! key is now deleted")
# + id="7K8TELDWHH7B"
def read(key):
if key not in datastore:
print("Error :( Key is not in Datastore") # error msg
else:
list=datastore[key]
if list[1]!=-1: # time to live parameter isnt -1(means it's provided by user)
current_time=time.time()
if current_time<list[1]: # Expiry & current time compared
response=str(key)+" : "+str(list[0]) # Key - JSon pair returned from DB
return response
else:
print("Error :( time to live off has been expired") # error msg
else:
response =str(key)+" : "+str(list[0])
return response
# + [markdown] id="eD9aFop3HO8w"
# **CLIENT**
# + [markdown] id="Nw6yp4z3HR7A"
# ***Testcase-1***
# + colab={"base_uri": "https://localhost:8080/"} id="A_VK_ZoGHVLs" outputId="b2ba7555-ec83-4c04-a440-b500c332da84"
json1={ "Name": "Manikantan",
"Rollno": "EC17B1033",
"Marks": 95}
create("Student",json1) # to create a key with key & json obj given and no time-to-live property
json2=[28,7]
create("Number",json2,200)# to create a key with key & json obj given and with time-to-live property value given(number of seconds)
print(read("Student")) # PRINTS key in Json object format 'key_name:value'
print(read("Number")) # PRINTS key in Json object format 'key_name:value' if the (time to live) is not expired else it throws an ERROR
json3={"Nameid":"lala123"}
create("Student",json3)
# this returns an error since the key_name already present in datastore
delete("Student")
# this deletes the given key & json obj from datastore
# #Using Multi threading
json4=["Merry","Christmas"]
thread1=Thread(target=(create),args=("moker",json4)) # as per the operation
thread1.start()
thread2=Thread(target=(delete),args=("moker",)) # as per the operation
thread2.start()
print("Final datastore",datastore)
# + [markdown] id="7BezmZ7kHpF_"
# ***Testcase-2***
# + colab={"base_uri": "https://localhost:8080/"} id="s2xpaE53HsB7" outputId="9eaa722f-63e0-47d4-d0c7-13ef9fcc3f37"
delete("lalalala")
json1={ "Name": "Manikantan",
"Rollno": "EC17B1033",
"Marks": 95}
create("Student1",json1)
#Error! as alphanumeric key with key & json obj given and no time-to-live property
json2=[28,7]
create("Numbers",json2,30)
#to create a key with key & json obj given and with time-to-live property value given(number of seconds) as just 10 secs
print(read("Numbers"))
#PRINTS key in Json object format 'key_name:value' if the (time to live) is not expired else it throws an ERROR !
json3={"Nameid":"lala123"}
create("Employee",json3)
print("Final datastore",datastore)
# + colab={"base_uri": "https://localhost:8080/"} id="con-X2IrHy9D" outputId="d3a659cc-230f-4e81-eb45-6508a924252d"
print(read("Numbers"))
| Freshworks_assignment.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/erickaalgr/CpEN-21A-BSCpE-1-1/blob/main/Midterm_Exam.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="TKrAcdEBiceX"
# #Midterm Exam
# + [markdown] id="xv3mL9Q9xoQE"
# ###PROBLEM STATEMENT 1
# + colab={"base_uri": "https://localhost:8080/"} id="df8wnKMliHTV" outputId="e16e44a9-eb37-431f-897c-2502e97100d6"
Name= "<NAME>"
StudendtNumber = 202101777
Age = "18 years old"
Birthday = "November 24, 2003"
Address = "Kanluran, Kayrilaw, Nasugbu, Batangas"
Course = "Bachelor of Science in Computer Engineering"
GWA = 95.800
print(Name)
print(StudentNumber)
print(Age)
print(Birthday)
print(Address)
print(Course)
print(GWA)
# + [markdown] id="AxkkZt5vyA1J"
# ###PROBLEM STATEMENT 2
# + colab={"base_uri": "https://localhost:8080/"} id="oylN31Mylsy5" outputId="ed3974d4-284d-4bea-ea2a-f06148354cf9"
n=4
answ= "Y"
print((2<n) and (n<6))
print((2<n) or (n==6))
print(not(2<n) or (n==6))
print(not(n<6))
print((answ=="Y") or (answ=="y"))
print((answ=="Y") and (answ=="y"))
print(not(answ=="y"))
print(((2<n) and (n==5+1)) or (answ=="No"))
print(((n==2)and(n==7)) or (answ=="Y"))
print((n==2) and ((n==7) or(answ=="Y")))
# + [markdown] id="6qier_mLyW2y"
# ###PROBLEM STATEMENT 3
# + id="Ja9gKNitty1C" colab={"base_uri": "https://localhost:8080/"} outputId="23424170-ce4f-447f-c5c9-74ff27e231c1"
w = 7
x = 2
y = -3
z = -10
a = x/y
b = w/y/x
c = z/y%x
d = x%-y*w
e = x%y
f = z%w-y/x*5+5
g = 9-x%(2+y)
h = z//w
i = (2+y)**2
j = w/x*2
print(a)
print(b)
print(c)
print(d)
print(e)
print(f)
print(g)
print(h)
print(i)
print(j)
| Midterm_Exam.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Intro
# In this workbook I have described my way of converting weights from keras to torch. In the beginning you will find some issues I encountered with and my way of solving them. Then you will find a code of converting.
#
# My motivation or how the story began:
# I have downloaded model with a pretrained model from here https://github.com/uzh-rpg/rpg_public_dronet. But I am torch user and wished to convert model from keras to torch. The original keras model could be find in *'keras_model.py'* and the weights in *'model_weights.h5*
# ## My path
# For some reasons, online available converter (https://github.com/Microsoft/MMdnn) didn't help me. It could convert model successfuly, but results of feeding the same data were different. So I decided to implement code of the model on Torch by bare hands and then convert weights from keras to torch. Torch copy of the original Keras model could be find in *torch_model.py*. My solution for converting weights was based on the mapping of parameters names between keras and torch. So here the task of mapping creating appeared. I couldn't find smart solution for it, so decided to make it by hands as well. I have extracted names of torch model
# > [(weight.name, weight.shape) for weight in keras_model.weights]
#
# and names of keras model like this:
# > [(key, torch_model.state_dict()[key].shape) for key in torch_model.state_dict().keys()]
#
# I have used shape just as a good hint for correct mapping.
# ## Next problems
#
# After having such dictionary I encountered with some problems:
#
# Here: [w=width, h=height, i=input_channel_size, o=output_channel_size]
#
# 1) **Convolution form issue.** Matrix of convolution in Keras has a form (w, h, i, o), while in torch (o, i, w, h)
#
# 2) **Dense form issue.** Matrix of dense in Keras has a form (i,o) while linear in torch (o, i)
#
# 3) **batch_norm issue.** Batch normalization has different default parameters (in particular momentum (0.1 in torch and 0.99 in keras) and eps (1e-5 in torch and 1e-3 in keras)
#
# 4) **padding issue.** In keras there is a parameter of convolution which is called padding which could be 'same', 'valid' and etc. But this 'same' is very tricky it could add one column on the left side of a picture and two on the right. In torch padding in convolution is an integer, so it is impossible to set one column on the left and not one on the right. So for this purposes I have used nn.ZeroPad2d which takes tuple of four element one for left, one for right, one for down and one for up.
#
# 5) **ReLU()** works with inplace=True by default. Which means, that one you run ReLU()(x), your x has been changed once.
#
# 6) **Flatten issue.** Keras convolution inputs and outputs should be percieved so that channel dimension is the last one, i.e. (batch_size, w, h, channel_size), but in torch it is quite different: (batch_size, channel_size, w, h). So it creates different flattened vectors on torch and keras.
#
# You can find in this workbook or in .py files some comments like "# related to ___ issue".
KERAS_MODEL_PATH="./model_weights.h5"
DICT_PATH="./keras_torch_mapping.csv"
import keras, torch
import numpy as np
# %run ./keras_model.py
keras_model = resnet8(200,200,1,1)
keras_model.load_weights(KERAS_MODEL_PATH)
# +
# %run ./torch_model.py
torch_model = ResNet8()
torch_model.eval();
# -
img=np.random.uniform(size=(200,200,1))[None]
keras_img=img
torch_img = torch.tensor(img.transpose((0, 3, 1, 2)), dtype=torch.float32, device="cpu")
# +
import pandas as pd
def get_dict():
krs2trch = pd.read_csv(DICT_PATH)
length = len(krs2trch)
return {krs2trch.iloc[i].keras : krs2trch.iloc[i].torch for i in range(length)}
def keras_to_pyt(km, pm):
name_mapping = get_dict()
weight_dict = dict()
for layer in km.layers:
weight_names = layer.weights
weight_values = layer.get_weights()
for i in range(len(weight_names)):
torch_name = name_mapping[weight_names[i].name]
if "conv2d_" in weight_names[i].name and "kernel" in weight_names[i].name: # convolution from issue
weight_dict[torch_name] = np.transpose(weight_values[i], (3, 2, 0, 1))
elif "dense_" in weight_names[i].name and "kernel" in weight_names[i].name: # dense from issue
weight_dict[torch_name] = np.transpose(weight_values[i], (1, 0))
else:
weight_dict[torch_name] = weight_values[i]
pyt_state_dict = pm.state_dict()
for key in weight_dict:
pyt_state_dict[key] = torch.from_numpy(weight_dict[key])
pm.load_state_dict(pyt_state_dict)
keras_to_pyt(keras_model, torch_model)
# -
keras_output=keras_model.predict_on_batch(img)
torch_model.train(False)
torch_output=torch_model(torch_img)
#np.allclose(torch_output[0].detach().numpy().transpose(0,2,3,1), keras_output[0],atol=1e-5)
np.allclose(torch_output[0].detach().cpu().numpy(), keras_output[0],atol=1e-5)
#np.max(torch_output[0].detach().numpy().transpose(0,2,3,1) - keras_output[0])
np.max(torch_output[1].detach().cpu().numpy() - keras_output[1]), np.max(torch_output[0].detach().cpu().numpy() - keras_output[0])
torch.save(torch_model.state_dict(), "./torch_weights.pth")
| keras2torch_converter/keras2torch_converter.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8.3 64-bit (conda)
# name: python3
# ---
# Summary
#
# First paragragh: Lede statement that states purpose, and end goal. Put a little story in if you can.
#
# Theme: Successive iteration, each more precise and descriptive
#
# Jot down the iterations...
#
# ###########################
#
# These are your choices of types of article or essay:
#
# Argument: convinces someone of something
#
# Explanation: tells why something happened instead of something else
#
# Definition: states what a word or concept means
#
# Description: identifies properties or qualities of things
#
# ###########################
#
# So, now, here is the full set of types of things I have described (with indicator words in brackets):
#
# ###########################
#
# Argument (premise: 'since', 'because'; conclusion: 'therefore', 'so', 'by logic')
# Deductive
# Categorical ('all', 'only', 'no', 'none', 'some')
# Propositional ('if', 'or', 'and')
#
# Inductive
# Generalization ('sample', 'poll', 'observation')
# Statistical ('most', 'generally, 'usually', 'seventy percent', 'nine out of ten')
# Universal ('always' and 'all')
# Causal ('causes')
#
# Explanation ('why', 'instead of')
# Causal ('caused')
# Statistical ('percent', 'probability')
#
# Definition ('is a', 'is defined as')
# Ostensive ( 'That's what I mean by...' )
# Lexical ('All', 'Only', 'is a type of', 'is necessarily')
# Implicit ('is a', 'for example')
#
# Description
# Specific (Subject, predicate, object)
# Chronology ('yesterday', 'today')
# Sensations ('seems', 'feels', 'appears', etc.,)
# List ('first', 'second', etc.)
# 5 W's ('who', 'what', 'where', 'when', 'why')
| _TODO/How to write.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <h1>Featurization<h1>
from itertools import combinations
from msmbuilder.featurizer import AtomPairsFeaturizer
import os
import numpy as np
import mdtraj as md
def create_pairwise_index(atomindex_file):
"""
Create pair-wise index for pdb files from input and remove redundant pairs
Parameters
----------
atomindex_file:
Text file containing the atom index for featurization
Return
----------
pairwise_index: list
a list of pairwise index
"""
tmp_array=[]
counter=0
for line in open(atomindex_file):
line=line.strip().split()
for i in range(len(line)):
tmp_array.append(int(line[i])-1) # starting from 0 and shift the atom index in pdb by 1;
output_array = np.unique(tmp_array)
pairwise_index=list(combinations(output_array, 2))
return pairwise_index
def feat(atom_pairs_feat,traj_name,topfile):
"""
Use AtomPairsFeaturizer from MSMbuilder to featurize a trajectory
Parameters
----------
atom_pairs_feat:
AtomPairsFeaturizer from MSMbuilder
traj_name:
Directory to the MD trajectories
topfile:
PDB file for the topology of trajectories
Return
----------
pairwise_index:
a list of pairwise index
"""
traj = md.load(traj_name, top=topfile)
feat_traj = atom_pairs_feat.partial_transform(traj)
return feat_traj
# +
featdir="./Featurization/"
trajDir="./trajs/"
topfile = trajDir+"ala2.pdb"
#Create pairwise index
atom_set =featdir+"AtomIndices.dat"
atom_pair_list=create_pairwise_index(atom_set)
np.savetxt(featdir+"atom_pair_list.dat",atom_pair_list,fmt='%d')
#Prepare an index for trajectories
from glob import glob
import re
trajlist=glob(trajDir+"*.xtc")
trajlist.sort(key=lambda f: int(re.sub('\D', '', f)))
#Featurize trajectories
os.makedirs(featdir+"features",exist_ok=True)
atom_pairs_feat = AtomPairsFeaturizer(atom_pair_list)
for n,i in enumerate(trajlist):
feat_ = feat(atom_pairs_feat,i,topfile)
np.save("{}features/{}.npy".format(featdir,n), feat_)
# -
| notebook/Featurization.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Perform ‘Exploratory Data Analysis’ on dataset ‘Indian Premier League’
#download essential Libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
# Get the Data
matches = pd.read_csv('C:\\Users\\nidhi\\Desktop\\Business Analytics\\Projects\\Indian Premier League\\matches.csv')
deliveries = pd.read_csv('C:\\Users\\nidhi\\Desktop\\Business Analytics\\Projects\\Indian Premier League\\deliveries.csv')
matches.head()
deliveries.head()
# check for null values
m2 = matches.isnull().sum()
print("null values in lunch: ")
print(m2)
print("==============================================================================")
print("null values in matches: ")
deliveries.isnull().sum()
pd.concat([matches['team1'],matches['team2']]).unique()
# winning team
plt.figure(figsize = (10,5))
sns.countplot(x = 'winner', data = matches)
plt.title('Winning Teams')
plt.xticks(rotation = 90);
matches['player_of_match'].value_counts()[0:5]
print('The Total no of rows and columns in deliveries dataset is ', deliveries.shape)
print('The Total no of rows and columns in matches dataset is ', matches.shape)
# kinds of dismissal
sns.countplot(x = deliveries['dismissal_kind'], data = deliveries)
plt.xticks(rotation = 90)
| EDA - SPORTS #TASK5.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <a href="https://qworld.net" target="_blank" align="left"><img src="../qworld/images/header.jpg" align="left"></a>
# $ \newcommand{\bra}[1]{\langle #1|} $
# $ \newcommand{\ket}[1]{|#1\rangle} $
# $ \newcommand{\braket}[2]{\langle #1|#2\rangle} $
# $ \newcommand{\dot}[2]{ #1 \cdot #2} $
# $ \newcommand{\biginner}[2]{\left\langle #1,#2\right\rangle} $
# $ \newcommand{\mymatrix}[2]{\left( \begin{array}{#1} #2\end{array} \right)} $
# $ \newcommand{\myvector}[1]{\mymatrix{c}{#1}} $
# $ \newcommand{\myrvector}[1]{\mymatrix{r}{#1}} $
# $ \newcommand{\mypar}[1]{\left( #1 \right)} $
# $ \newcommand{\mybigpar}[1]{ \Big( #1 \Big)} $
# $ \newcommand{\sqrttwo}{\frac{1}{\sqrt{2}}} $
# $ \newcommand{\dsqrttwo}{\dfrac{1}{\sqrt{2}}} $
# $ \newcommand{\onehalf}{\frac{1}{2}} $
# $ \newcommand{\donehalf}{\dfrac{1}{2}} $
# $ \newcommand{\hadamard}{ \mymatrix{rr}{ \sqrttwo & \sqrttwo \\ \sqrttwo & -\sqrttwo }} $
# $ \newcommand{\vzero}{\myvector{1\\0}} $
# $ \newcommand{\vone}{\myvector{0\\1}} $
# $ \newcommand{\stateplus}{\myvector{ \sqrttwo \\ \sqrttwo } } $
# $ \newcommand{\stateminus}{ \myrvector{ \sqrttwo \\ -\sqrttwo } } $
# $ \newcommand{\myarray}[2]{ \begin{array}{#1}#2\end{array}} $
# $ \newcommand{\X}{ \mymatrix{cc}{0 & 1 \\ 1 & 0} } $
# $ \newcommand{\I}{ \mymatrix{rr}{1 & 0 \\ 0 & 1} } $
# $ \newcommand{\Z}{ \mymatrix{rr}{1 & 0 \\ 0 & -1} } $
# $ \newcommand{\Htwo}{ \mymatrix{rrrr}{ \frac{1}{2} & \frac{1}{2} & \frac{1}{2} & \frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & \frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} & \frac{1}{2} } } $
# $ \newcommand{\CNOT}{ \mymatrix{cccc}{1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 \\ 0 & 0 & 0 & 1 \\ 0 & 0 & 1 & 0} } $
# $ \newcommand{\norm}[1]{ \left\lVert #1 \right\rVert } $
# $ \newcommand{\pstate}[1]{ \lceil \mspace{-1mu} #1 \mspace{-1.5mu} \rfloor } $
# $ \newcommand{\greenbit}[1] {\mathbf{{\color{green}#1}}} $
# $ \newcommand{\bluebit}[1] {\mathbf{{\color{blue}#1}}} $
# $ \newcommand{\redbit}[1] {\mathbf{{\color{red}#1}}} $
# $ \newcommand{\brownbit}[1] {\mathbf{{\color{brown}#1}}} $
# $ \newcommand{\blackbit}[1] {\mathbf{{\color{black}#1}}} $
# <font style="font-size:28px;" align="left"><b> <font color="blue"> Solution for </font>Operations on the Unit Circle </b></font>
# <br>
# _prepared by <NAME>_
# <br><br>
# <a id="task3"></a>
# <h3> Task 3</h3>
#
# Randomly pick an angle $ \theta \in [0,2\pi) $.
#
# Suppose that we have 1000 copies of quantum state $ \ket{v} = \myvector{ \cos \theta \\ \sin \theta } $ and we measure each of them.
#
# What are the expected numbers of observing the states 0 and 1?
#
# Implement the above experiment by designing a quantum circuit and set the quantum state by using ry-gate.
#
# Compare your experimental and analytic results.
#
# Repeat the task a couple of times.
# <h3> Solution </h3>
# **Analytical results**
# +
from random import randrange
from math import sin,cos, pi
# randomly pick an angle
random_angle = randrange(360)
print("random angle is",random_angle)
# pick angle in radian
rotation_angle = random_angle/360*2*pi
# the quantum state
quantum_state = [ cos(rotation_angle) , sin (rotation_angle) ]
the_expected_number_of_zeros = 1000*cos(rotation_angle)**2
the_expected_number_of_ones = 1000*sin(rotation_angle)**2
# expected results
print("The expected value of observing '0' is",round(the_expected_number_of_zeros,4))
print("The expected value of observing '1' is",round(the_expected_number_of_ones,4))
# +
# draw the quantum state
# %run quantum.py
draw_qubit()
draw_quantum_state(quantum_state[0],quantum_state[1],"|v>")
show_plt()
# -
# **Experimental results**
# +
from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, execute, Aer
from qiskit.visualization import plot_histogram
# we define a quantum circuit with one qubit and one bit
q = QuantumRegister(1) # quantum register with a single qubit
c = ClassicalRegister(1) # classical register with a single bit
qc = QuantumCircuit(q,c) # quantum circuit with quantum and classical registers
# rotate the qubit with rotation_angle
qc.ry(2*rotation_angle,q[0])
# measure the qubit
qc.measure(q,c)
# draw the circuit
qc.draw(output='mpl')
# +
# execute the program 1000 times
job = execute(qc,Aer.get_backend('qasm_simulator'),shots=1000)
# print the results
counts = job.result().get_counts(qc)
print(counts)
the_observed_number_of_ones = 0
if '1' in counts:
the_observed_number_of_ones= counts['1']
# draw the histogram
plot_histogram(counts)
# -
# **Compare the results**
difference = abs(the_expected_number_of_ones - the_observed_number_of_ones)
print("The expected number of ones is",the_expected_number_of_ones)
print("The observed number of ones is",the_observed_number_of_ones)
print("The difference is",difference)
print("The difference in percentage is",difference/100,"%")
| Bronze/quantum-with-qiskit/Q40_Operations_on_the_Unit_Circle_Solution.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import pyfolio as pf
import matplotlib.pyplot as plt
import numpy as np
import os, string
retlist =[]
for i in os.listdir():
if i.startswith('retcsv_'):
retlist.append(i)
# +
retcsv = pd.read_csv(retlist[0], index_col=0, parse_dates=True)
for i in retlist[1:]:
ret = pd.read_csv(i, index_col=0, parse_dates=True)
retcsv['asset'] += ret['asset']
# -
retcsv['DateTime'] = retcsv['DateTime'].str.slice(0,10)
retcsv['DateTime'] = pd.to_datetime(retcsv['DateTime'], format='%Y-%m-%d')
#convert 15-minute level date to daily level data
retcsv= retcsv.drop_duplicates(subset =['DateTime'],keep ='last')
#calculate daily return
retcsv['return'] = np.log(retcsv.loc[:,'asset']) -np.log(retcsv.loc[:,'asset'].shift(1))
retcsv = retcsv.set_index('DateTime')
ret = retcsv['return'].tz_localize('UTC')
start = '2019-10-08'
pf.create_full_tear_sheet(ret)
| pair_trading_becktesting_multiple.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 100 numpy exercises
#
# This is a collection of exercises that have been collected in the numpy mailing list, on stack overflow and in the numpy documentation. The goal of this collection is to offer a quick reference for both old and new users but also to provide a set of exercises for those who teach.
#
#
# If you find an error or think you've a better way to solve some of them, feel free to open an issue at <https://github.com/rougier/numpy-100>
# #### 1. Import the numpy package under the name `np` (★☆☆)
import numpy as np
# #### 2. Print the numpy version and the configuration (★☆☆)
print(np.__version__)
np.show_config()
# #### 3. Create a null vector of size 10 (★☆☆)
Z = np.zeros(10)
print(Z)
# #### 4. How to find the memory size of any array (★☆☆)
Z = np.zeros((10,10))
print("%d bytes" % (Z.size * Z.itemsize))
# #### 5. How to get the documentation of the numpy add function from the command line? (★☆☆)
# %run `python -c "import numpy; numpy.info(numpy.add)"`
# #### 6. Create a null vector of size 10 but the fifth value which is 1 (★☆☆)
Z = np.zeros(10)
Z[4] = 1
print(Z)
# #### 7. Create a vector with values ranging from 10 to 49 (★☆☆)
Z = np.arange(10,50)
print(Z)
# #### 8. Reverse a vector (first element becomes last) (★☆☆)
Z = np.arange(50)
Z = Z[::-1]
print(Z)
# #### 9. Create a 3x3 matrix with values ranging from 0 to 8 (★☆☆)
Z = np.arange(9).reshape(3,3)
print(Z)
# #### 10. Find indices of non-zero elements from \[1,2,0,0,4,0\] (★☆☆)
nz = np.nonzero([1,2,0,0,4,0])
print(nz)
# #### 11. Create a 3x3 identity matrix (★☆☆)
Z = np.eye(3)
print(Z)
# #### 12. Create a 3x3x3 array with random values (★☆☆)
Z = np.random.random((3,3,3))
print(Z)
# #### 13. Create a 10x10 array with random values and find the minimum and maximum values (★☆☆)
Z = np.random.random((10,10))
Zmin, Zmax = Z.min(), Z.max()
print(Zmin, Zmax)
# #### 14. Create a random vector of size 30 and find the mean value (★☆☆)
Z = np.random.random(30)
m = Z.mean()
print(m)
# #### 15. Create a 2d array with 1 on the border and 0 inside (★☆☆)
Z = np.ones((10,10))
Z[1:-1,1:-1] = 0
print(Z)
# #### 16. How to add a border (filled with 0's) around an existing array? (★☆☆)
Z = np.ones((5,5))
Z = np.pad(Z, pad_width=1, mode='constant', constant_values=0)
print(Z)
# #### 17. What is the result of the following expression? (★☆☆)
print(0 * np.nan)
print(np.nan == np.nan)
print(np.inf > np.nan)
print(np.nan - np.nan)
print(np.nan in set([np.nan]))
print(0.3 == 3 * 0.1)
# #### 18. Create a 5x5 matrix with values 1,2,3,4 just below the diagonal (★☆☆)
Z = np.diag(1+np.arange(4),k=-1)
print(Z)
# #### 19. Create a 8x8 matrix and fill it with a checkerboard pattern (★☆☆)
Z = np.zeros((8,8),dtype=int)
Z[1::2,::2] = 1
Z[::2,1::2] = 1
print(Z)
# #### 20. Consider a (6,7,8) shape array, what is the index (x,y,z) of the 100th element?
print(np.unravel_index(100,(6,7,8)))
# #### 21. Create a checkerboard 8x8 matrix using the tile function (★☆☆)
Z = np.tile( np.array([[0,1],[1,0]]), (4,4))
print(Z)
# #### 22. Normalize a 5x5 random matrix (★☆☆)
Z = np.random.random((5,5))
Z = (Z - np.mean (Z)) / (np.std (Z))
print(Z)
# #### 23. Create a custom dtype that describes a color as four unsigned bytes (RGBA) (★☆☆)
color = np.dtype([("r", np.ubyte, 1),
("g", np.ubyte, 1),
("b", np.ubyte, 1),
("a", np.ubyte, 1)])
# #### 24. Multiply a 5x3 matrix by a 3x2 matrix (real matrix product) (★☆☆)
# +
Z = np.dot(np.ones((5,3)), np.ones((3,2)))
print(Z)
# Alternative solution, in Python 3.5 and above
Z = np.ones((5,3)) @ np.ones((3,2))
# -
# #### 25. Given a 1D array, negate all elements which are between 3 and 8, in place. (★☆☆)
# +
# Author: <NAME>
Z = np.arange(11)
Z[(3 < Z) & (Z <= 8)] *= -1
print(Z)
# -
# #### 26. What is the output of the following script? (★☆☆)
# +
# Author: <NAME>
print(sum(range(5),-1))
from numpy import *
print(sum(range(5),-1))
# -
# #### 27. Consider an integer vector Z, which of these expressions are legal? (★☆☆)
Z**Z
2 << Z >> 2
Z <- Z
1j*Z
Z/1/1
Z<Z>Z
# #### 28. What are the result of the following expressions?
print(np.array(0) / np.array(0))
print(np.array(0) // np.array(0))
print(np.array([np.nan]).astype(int).astype(float))
# #### 29. How to round away from zero a float array ? (★☆☆)
# +
# Author: <NAME>
Z = np.random.uniform(-10,+10,10)
print(Z)
print (np.copysign(np.ceil(np.abs(Z)), Z))
# -
# #### 30. How to find common values between two arrays? (★☆☆)
Z1 = np.random.randint(0,10,10)
Z2 = np.random.randint(0,10,10)
print(np.intersect1d(Z1,Z2))
# #### 31. How to ignore all numpy warnings (not recommended)? (★☆☆)
# +
# Suicide mode on
defaults = np.seterr(all="ignore")
Z = np.ones(1) / 0
# Back to sanity
_ = np.seterr(**defaults)
An equivalent way, with a context manager:
with np.errstate(divide='ignore'):
Z = np.ones(1) / 0
# -
# #### 32. Is the following expressions true? (★☆☆)
np.sqrt(-1) == np.emath.sqrt(-1)
# #### 33. How to get the dates of yesterday, today and tomorrow? (★☆☆)
yesterday = np.datetime64('today', 'D') - np.timedelta64(1, 'D')
today = np.datetime64('today', 'D')
tomorrow = np.datetime64('today', 'D') + np.timedelta64(1, 'D')
# #### 34. How to get all the dates corresponding to the month of July 2016? (★★☆)
Z = np.arange('2016-07', '2016-08', dtype='datetime64[D]')
print(Z)
# #### 35. How to compute ((A+B)\*(-A/2)) in place (without copy)? (★★☆)
# + pycharm={"is_executing": false}
A = np.ones(3)*1
print(A)
B = np.ones(3)*2
C = np.ones(3)*3
np.add(A,B,out=B)
np.divide(A,2,out=A)
np.negative(A,out=A)
np.multiply(A,B,out=A)
# -
# #### 36. Extract the integer part of a random array using 5 different methods (★★☆)
# +
Z = np.random.uniform(0,10,10)
print (Z - Z%1)
print (np.floor(Z))
print (np.ceil(Z)-1)
print (Z.astype(int))
print (np.trunc(Z))
# -
# #### 37. Create a 5x5 matrix with row values ranging from 0 to 4 (★★☆)
Z = np.zeros((5,5))
Z += np.arange(5)
print(Z)
# #### 38. Consider a generator function that generates 10 integers and use it to build an array (★☆☆)
def generate():
for x in range(10):
yield x
Z = np.fromiter(generate(),dtype=float,count=-1)
print(Z)
# #### 39. Create a vector of size 10 with values ranging from 0 to 1, both excluded (★★☆)
Z = np.linspace(0,1,11,endpoint=False)[1:]
print(Z)
# #### 40. Create a random vector of size 10 and sort it (★★☆)
Z = np.random.random(10)
Z.sort()
print(Z)
# #### 41. How to sum a small array faster than np.sum? (★★☆)
# +
# Author: <NAME>
Z = np.arange(10)
np.add.reduce(Z)
# -
# #### 42. Consider two random array A and B, check if they are equal (★★☆)
# +
A = np.random.randint(0,2,5)
B = np.random.randint(0,2,5)
# Assuming identical shape of the arrays and a tolerance for the comparison of values
equal = np.allclose(A,B)
print(equal)
# Checking both the shape and the element values, no tolerance (values have to be exactly equal)
equal = np.array_equal(A,B)
print(equal)
# -
# #### 43. Make an array immutable (read-only) (★★☆)
Z = np.zeros(10)
Z.flags.writeable = False
Z[0] = 1
# #### 44. Consider a random 10x2 matrix representing cartesian coordinates, convert them to polar coordinates (★★☆)
Z = np.random.random((10,2))
X,Y = Z[:,0], Z[:,1]
R = np.sqrt(X**2+Y**2)
T = np.arctan2(Y,X)
print(R)
print(T)
# #### 45. Create random vector of size 10 and replace the maximum value by 0 (★★☆)
Z = np.random.random(10)
Z[Z.argmax()] = 0
print(Z)
# #### 46. Create a structured array with `x` and `y` coordinates covering the \[0,1\]x\[0,1\] area (★★☆)
Z = np.zeros((5,5), [('x',float),('y',float)])
Z['x'], Z['y'] = np.meshgrid(np.linspace(0,1,5),
np.linspace(0,1,5))
print(Z)
# #### 47. Given two arrays, X and Y, construct the Cauchy matrix C (Cij =1/(xi - yj))
# +
# Author: <NAME>
X = np.arange(8)
Y = X + 0.5
C = 1.0 / np.subtract.outer(X, Y)
print(np.linalg.det(C))
# -
# #### 48. Print the minimum and maximum representable value for each numpy scalar type (★★☆)
for dtype in [np.int8, np.int32, np.int64]:
print(np.iinfo(dtype).min)
print(np.iinfo(dtype).max)
for dtype in [np.float32, np.float64]:
print(np.finfo(dtype).min)
print(np.finfo(dtype).max)
print(np.finfo(dtype).eps)
# #### 49. How to print all the values of an array? (★★☆)
np.set_printoptions(threshold=np.nan)
Z = np.zeros((16,16))
print(Z)
# #### 50. How to find the closest value (to a given scalar) in a vector? (★★☆)
Z = np.arange(100)
v = np.random.uniform(0,100)
index = (np.abs(Z-v)).argmin()
print(Z[index])
# #### 51. Create a structured array representing a position (x,y) and a color (r,g,b) (★★☆)
Z = np.zeros(10, [ ('position', [ ('x', float, 1),
('y', float, 1)]),
('color', [ ('r', float, 1),
('g', float, 1),
('b', float, 1)])])
print(Z)
# #### 52. Consider a random vector with shape (100,2) representing coordinates, find point by point distances (★★☆)
# +
Z = np.random.random((10,2))
X,Y = np.atleast_2d(Z[:,0], Z[:,1])
D = np.sqrt( (X-X.T)**2 + (Y-Y.T)**2)
print(D)
# Much faster with scipy
import scipy
# Thanks <NAME> (#issue 1)
import scipy.spatial
Z = np.random.random((10,2))
D = scipy.spatial.distance.cdist(Z,Z)
print(D)
# -
# #### 53. How to convert a float (32 bits) array into an integer (32 bits) in place?
Z = np.arange(10, dtype=np.float32)
Z = Z.astype(np.int32, copy=False)
print(Z)
# #### 54. How to read the following file? (★★☆)
# +
from io import StringIO
# Fake file
s = StringIO("""1, 2, 3, 4, 5\n
6, , , 7, 8\n
, , 9,10,11\n""")
Z = np.genfromtxt(s, delimiter=",", dtype=np.int)
print(Z)
# -
# #### 55. What is the equivalent of enumerate for numpy arrays? (★★☆)
Z = np.arange(9).reshape(3,3)
for index, value in np.ndenumerate(Z):
print(index, value)
for index in np.ndindex(Z.shape):
print(index, Z[index])
# #### 56. Generate a generic 2D Gaussian-like array (★★☆)
X, Y = np.meshgrid(np.linspace(-1,1,10), np.linspace(-1,1,10))
D = np.sqrt(X*X+Y*Y)
sigma, mu = 1.0, 0.0
G = np.exp(-( (D-mu)**2 / ( 2.0 * sigma**2 ) ) )
print(G)
# #### 57. How to randomly place p elements in a 2D array? (★★☆)
# +
# Author: Divakar
n = 10
p = 3
Z = np.zeros((n,n))
np.put(Z, np.random.choice(range(n*n), p, replace=False),1)
print(Z)
# -
# #### 58. Subtract the mean of each row of a matrix (★★☆)
# +
# Author: <NAME>
X = np.random.rand(5, 10)
# Recent versions of numpy
Y = X - X.mean(axis=1, keepdims=True)
# Older versions of numpy
Y = X - X.mean(axis=1).reshape(-1, 1)
print(Y)
# -
# #### 59. How to sort an array by the nth column? (★★☆)
# +
# Author: <NAME>
Z = np.random.randint(0,10,(3,3))
print(Z)
print(Z[Z[:,1].argsort()])
# -
# #### 60. How to tell if a given 2D array has null columns? (★★☆)
# +
# Author: <NAME>
Z = np.random.randint(0,3,(3,10))
print((~Z.any(axis=0)).any())
# -
# #### 61. Find the nearest value from a given value in an array (★★☆)
Z = np.random.uniform(0,1,10)
z = 0.5
m = Z.flat[np.abs(Z - z).argmin()]
print(m)
# #### 62. Considering two arrays with shape (1,3) and (3,1), how to compute their sum using an iterator? (★★☆)
A = np.arange(3).reshape(3,1)
B = np.arange(3).reshape(1,3)
it = np.nditer([A,B,None])
for x,y,z in it: z[...] = x + y
print(it.operands[2])
# #### 63. Create an array class that has a name attribute (★★☆)
# +
class NamedArray(np.ndarray):
def __new__(cls, array, name="no name"):
obj = np.asarray(array).view(cls)
obj.name = name
return obj
def __array_finalize__(self, obj):
if obj is None: return
self.info = getattr(obj, 'name', "no name")
Z = NamedArray(np.arange(10), "range_10")
print (Z.name)
# -
# #### 64. Consider a given vector, how to add 1 to each element indexed by a second vector (be careful with repeated indices)? (★★★)
# +
# Author: <NAME>
Z = np.ones(10)
I = np.random.randint(0,len(Z),20)
Z += np.bincount(I, minlength=len(Z))
print(Z)
# Another solution
# Author: <NAME>
np.add.at(Z, I, 1)
print(Z)
# -
# #### 65. How to accumulate elements of a vector (X) to an array (F) based on an index list (I)? (★★★)
# +
# Author: <NAME>
X = [1,2,3,4,5,6]
I = [1,3,9,3,4,1]
F = np.bincount(I,X)
print(F)
# -
# #### 66. Considering a (w,h,3) image of (dtype=ubyte), compute the number of unique colors (★★★)
# +
# Author: <NAME>
w,h = 16,16
I = np.random.randint(0,2,(h,w,3)).astype(np.ubyte)
#Note that we should compute 256*256 first.
#Otherwise numpy will only promote F.dtype to 'uint16' and overfolw will occur
F = I[...,0]*(256*256) + I[...,1]*256 +I[...,2]
n = len(np.unique(F))
print(n)
# -
# #### 67. Considering a four dimensions array, how to get sum over the last two axis at once? (★★★)
A = np.random.randint(0,10,(3,4,3,4))
# solution by passing a tuple of axes (introduced in numpy 1.7.0)
sum = A.sum(axis=(-2,-1))
print(sum)
# solution by flattening the last two dimensions into one
# (useful for functions that don't accept tuples for axis argument)
sum = A.reshape(A.shape[:-2] + (-1,)).sum(axis=-1)
print(sum)
# #### 68. Considering a one-dimensional vector D, how to compute means of subsets of D using a vector S of same size describing subset indices? (★★★)
# +
# Author: <NAME>
D = np.random.uniform(0,1,100)
S = np.random.randint(0,10,100)
D_sums = np.bincount(S, weights=D)
D_counts = np.bincount(S)
D_means = D_sums / D_counts
print(D_means)
# Pandas solution as a reference due to more intuitive code
import pandas as pd
print(pd.Series(D).groupby(S).mean())
# -
# #### 69. How to get the diagonal of a dot product? (★★★)
# +
# Author: <NAME>
A = np.random.uniform(0,1,(5,5))
B = np.random.uniform(0,1,(5,5))
# Slow version
np.diag(np.dot(A, B))
# Fast version
np.sum(A * B.T, axis=1)
# Faster version
np.einsum("ij,ji->i", A, B)
# -
# #### 70. Consider the vector \[1, 2, 3, 4, 5\], how to build a new vector with 3 consecutive zeros interleaved between each value? (★★★)
# +
# Author: <NAME>
Z = np.array([1,2,3,4,5])
nz = 3
Z0 = np.zeros(len(Z) + (len(Z)-1)*(nz))
Z0[::nz+1] = Z
print(Z0)
# -
# #### 71. Consider an array of dimension (5,5,3), how to mulitply it by an array with dimensions (5,5)? (★★★)
A = np.ones((5,5,3))
B = 2*np.ones((5,5))
print(A * B[:,:,None])
# #### 72. How to swap two rows of an array? (★★★)
# +
# Author: <NAME>
A = np.arange(25).reshape(5,5)
A[[0,1]] = A[[1,0]]
print(A)
# -
# #### 73. Consider a set of 10 triplets describing 10 triangles (with shared vertices), find the set of unique line segments composing all the triangles (★★★)
# +
# Author: <NAME>
faces = np.random.randint(0,100,(10,3))
F = np.roll(faces.repeat(2,axis=1),-1,axis=1)
F = F.reshape(len(F)*3,2)
F = np.sort(F,axis=1)
G = F.view( dtype=[('p0',F.dtype),('p1',F.dtype)] )
G = np.unique(G)
print(G)
# -
# #### 74. Given an array C that is a bincount, how to produce an array A such that np.bincount(A) == C? (★★★)
# +
# Author: <NAME>
C = np.bincount([1,1,2,3,4,4,6])
A = np.repeat(np.arange(len(C)), C)
print(A)
# -
# #### 75. How to compute averages using a sliding window over an array? (★★★)
# +
# Author: <NAME>
def moving_average(a, n=3) :
ret = np.cumsum(a, dtype=float)
ret[n:] = ret[n:] - ret[:-n]
return ret[n - 1:] / n
Z = np.arange(20)
print(moving_average(Z, n=3))
# -
# #### 76. Consider a one-dimensional array Z, build a two-dimensional array whose first row is (Z\[0\],Z\[1\],Z\[2\]) and each subsequent row is shifted by 1 (last row should be (Z\[-3\],Z\[-2\],Z\[-1\]) (★★★)
# +
# Author: <NAME> / <NAME>
from numpy.lib import stride_tricks
def rolling(a, window):
shape = (a.size - window + 1, window)
strides = (a.itemsize, a.itemsize)
return stride_tricks.as_strided(a, shape=shape, strides=strides)
Z = rolling(np.arange(10), 3)
print(Z)
# -
# #### 77. How to negate a boolean, or to change the sign of a float inplace? (★★★)
# +
# Author: <NAME>
Z = np.random.randint(0,2,100)
np.logical_not(Z, out=Z)
Z = np.random.uniform(-1.0,1.0,100)
np.negative(Z, out=Z)
# -
# #### 78. Consider 2 sets of points P0,P1 describing lines (2d) and a point p, how to compute distance from p to each line i (P0\[i\],P1\[i\])? (★★★)
# +
def distance(P0, P1, p):
T = P1 - P0
L = (T**2).sum(axis=1)
U = -((P0[:,0]-p[...,0])*T[:,0] + (P0[:,1]-p[...,1])*T[:,1]) / L
U = U.reshape(len(U),1)
D = P0 + U*T - p
return np.sqrt((D**2).sum(axis=1))
P0 = np.random.uniform(-10,10,(10,2))
P1 = np.random.uniform(-10,10,(10,2))
p = np.random.uniform(-10,10,( 1,2))
print(distance(P0, P1, p))
# -
# #### 79. Consider 2 sets of points P0,P1 describing lines (2d) and a set of points P, how to compute distance from each point j (P\[j\]) to each line i (P0\[i\],P1\[i\])? (★★★)
# +
# Author: <NAME>
# based on distance function from previous question
P0 = np.random.uniform(-10, 10, (10,2))
P1 = np.random.uniform(-10,10,(10,2))
p = np.random.uniform(-10, 10, (10,2))
print(np.array([distance(P0,P1,p_i) for p_i in p]))
# -
# #### 80. Consider an arbitrary array, write a function that extract a subpart with a fixed shape and centered on a given element (pad with a `fill` value when necessary) (★★★)
# +
# Author: <NAME>
Z = np.random.randint(0,10,(10,10))
shape = (5,5)
fill = 0
position = (1,1)
R = np.ones(shape, dtype=Z.dtype)*fill
P = np.array(list(position)).astype(int)
Rs = np.array(list(R.shape)).astype(int)
Zs = np.array(list(Z.shape)).astype(int)
R_start = np.zeros((len(shape),)).astype(int)
R_stop = np.array(list(shape)).astype(int)
Z_start = (P-Rs//2)
Z_stop = (P+Rs//2)+Rs%2
R_start = (R_start - np.minimum(Z_start,0)).tolist()
Z_start = (np.maximum(Z_start,0)).tolist()
R_stop = np.maximum(R_start, (R_stop - np.maximum(Z_stop-Zs,0))).tolist()
Z_stop = (np.minimum(Z_stop,Zs)).tolist()
r = [slice(start,stop) for start,stop in zip(R_start,R_stop)]
z = [slice(start,stop) for start,stop in zip(Z_start,Z_stop)]
R[r] = Z[z]
print(Z)
print(R)
# -
# #### 81. Consider an array Z = \[1,2,3,4,5,6,7,8,9,10,11,12,13,14\], how to generate an array R = \[\[1,2,3,4\], \[2,3,4,5\], \[3,4,5,6\], ..., \[11,12,13,14\]\]? (★★★)
# +
# Author: <NAME>
Z = np.arange(1,15,dtype=np.uint32)
R = stride_tricks.as_strided(Z,(11,4),(4,4))
print(R)
# -
# #### 82. Compute a matrix rank (★★★)
# +
# Author: <NAME>
Z = np.random.uniform(0,1,(10,10))
U, S, V = np.linalg.svd(Z) # Singular Value Decomposition
rank = np.sum(S > 1e-10)
print(rank)
# -
# #### 83. How to find the most frequent value in an array?
Z = np.random.randint(0,10,50)
print(np.bincount(Z).argmax())
# #### 84. Extract all the contiguous 3x3 blocks from a random 10x10 matrix (★★★)
# +
# Author: <NAME>
Z = np.random.randint(0,5,(10,10))
n = 3
i = 1 + (Z.shape[0]-3)
j = 1 + (Z.shape[1]-3)
C = stride_tricks.as_strided(Z, shape=(i, j, n, n), strides=Z.strides + Z.strides)
print(C)
# -
# #### 85. Create a 2D array subclass such that Z\[i,j\] == Z\[j,i\] (★★★)
# +
# Author: <NAME>
# Note: only works for 2d array and value setting using indices
class Symetric(np.ndarray):
def __setitem__(self, index, value):
i,j = index
super(Symetric, self).__setitem__((i,j), value)
super(Symetric, self).__setitem__((j,i), value)
def symetric(Z):
return np.asarray(Z + Z.T - np.diag(Z.diagonal())).view(Symetric)
S = symetric(np.random.randint(0,10,(5,5)))
S[2,3] = 42
print(S)
# -
# #### 86. Consider a set of p matrices wich shape (n,n) and a set of p vectors with shape (n,1). How to compute the sum of of the p matrix products at once? (result has shape (n,1)) (★★★)
# +
# Author: <NAME>
p, n = 10, 20
M = np.ones((p,n,n))
V = np.ones((p,n,1))
S = np.tensordot(M, V, axes=[[0, 2], [0, 1]])
print(S)
# It works, because:
# M is (p,n,n)
# V is (p,n,1)
# Thus, summing over the paired axes 0 and 0 (of M and V independently),
# and 2 and 1, to remain with a (n,1) vector.
# -
# #### 87. Consider a 16x16 array, how to get the block-sum (block size is 4x4)? (★★★)
# +
# Author: <NAME>
Z = np.ones((16,16))
k = 4
S = np.add.reduceat(np.add.reduceat(Z, np.arange(0, Z.shape[0], k), axis=0),
np.arange(0, Z.shape[1], k), axis=1)
print(S)
# -
# #### 88. How to implement the Game of Life using numpy arrays? (★★★)
# +
# Author: <NAME>
def iterate(Z):
# Count neighbours
N = (Z[0:-2,0:-2] + Z[0:-2,1:-1] + Z[0:-2,2:] +
Z[1:-1,0:-2] + Z[1:-1,2:] +
Z[2: ,0:-2] + Z[2: ,1:-1] + Z[2: ,2:])
# Apply rules
birth = (N==3) & (Z[1:-1,1:-1]==0)
survive = ((N==2) | (N==3)) & (Z[1:-1,1:-1]==1)
Z[...] = 0
Z[1:-1,1:-1][birth | survive] = 1
return Z
Z = np.random.randint(0,2,(50,50))
for i in range(100): Z = iterate(Z)
print(Z)
# -
# #### 89. How to get the n largest values of an array (★★★)
# +
Z = np.arange(10000)
np.random.shuffle(Z)
n = 5
# Slow
print (Z[np.argsort(Z)[-n:]])
# Fast
print (Z[np.argpartition(-Z,n)[:n]])
# -
# #### 90. Given an arbitrary number of vectors, build the cartesian product (every combinations of every item) (★★★)
# +
# Author: <NAME>
def cartesian(arrays):
arrays = [np.asarray(a) for a in arrays]
shape = (len(x) for x in arrays)
ix = np.indices(shape, dtype=int)
ix = ix.reshape(len(arrays), -1).T
for n, arr in enumerate(arrays):
ix[:, n] = arrays[n][ix[:, n]]
return ix
print (cartesian(([1, 2, 3], [4, 5], [6, 7])))
# -
# #### 91. How to create a record array from a regular array? (★★★)
Z = np.array([("Hello", 2.5, 3),
("World", 3.6, 2)])
R = np.core.records.fromarrays(Z.T,
names='col1, col2, col3',
formats = 'S8, f8, i8')
print(R)
# #### 92. Consider a large vector Z, compute Z to the power of 3 using 3 different methods (★★★)
# +
# Author: <NAME>.
x = np.random.rand(5e7)
# %timeit np.power(x,3)
# %timeit x*x*x
# %timeit np.einsum('i,i,i->i',x,x,x)
# -
# #### 93. Consider two arrays A and B of shape (8,3) and (2,2). How to find rows of A that contain elements of each row of B regardless of the order of the elements in B? (★★★)
# +
# Author: <NAME>
A = np.random.randint(0,5,(8,3))
B = np.random.randint(0,5,(2,2))
C = (A[..., np.newaxis, np.newaxis] == B)
rows = np.where(C.any((3,1)).all(1))[0]
print(rows)
# -
# #### 94. Considering a 10x3 matrix, extract rows with unequal values (e.g. \[2,2,3\]) (★★★)
# +
# Author: <NAME>
Z = np.random.randint(0,5,(10,3))
print(Z)
# solution for arrays of all dtypes (including string arrays and record arrays)
E = np.all(Z[:,1:] == Z[:,:-1], axis=1)
U = Z[~E]
print(U)
# soluiton for numerical arrays only, will work for any number of columns in Z
U = Z[Z.max(axis=1) != Z.min(axis=1),:]
print(U)
# -
# #### 95. Convert a vector of ints into a matrix binary representation (★★★)
# +
# Author: <NAME>
I = np.array([0, 1, 2, 3, 15, 16, 32, 64, 128])
B = ((I.reshape(-1,1) & (2**np.arange(8))) != 0).astype(int)
print(B[:,::-1])
# Author: <NAME>
I = np.array([0, 1, 2, 3, 15, 16, 32, 64, 128], dtype=np.uint8)
print(np.unpackbits(I[:, np.newaxis], axis=1))
# -
# #### 96. Given a two dimensional array, how to extract unique rows? (★★★)
# +
# Author: <NAME>
Z = np.random.randint(0,2,(6,3))
T = np.ascontiguousarray(Z).view(np.dtype((np.void, Z.dtype.itemsize * Z.shape[1])))
_, idx = np.unique(T, return_index=True)
uZ = Z[idx]
print(uZ)
# Author: <NAME>
# NumPy >= 1.13
uZ = np.unique(Z, axis=0)
print(uZ)
# -
# #### 97. Considering 2 vectors A & B, write the einsum equivalent of inner, outer, sum, and mul function (★★★)
# +
# Author: <NAME>
# Make sure to read: http://ajcr.net/Basic-guide-to-einsum/
A = np.random.uniform(0,1,10)
B = np.random.uniform(0,1,10)
np.einsum('i->', A) # np.sum(A)
np.einsum('i,i->i', A, B) # A * B
np.einsum('i,i', A, B) # np.inner(A, B)
np.einsum('i,j->ij', A, B) # np.outer(A, B)
# -
# #### 98. Considering a path described by two vectors (X,Y), how to sample it using equidistant samples (★★★)?
# +
# Author: <NAME>
phi = np.arange(0, 10*np.pi, 0.1)
a = 1
x = a*phi*np.cos(phi)
y = a*phi*np.sin(phi)
dr = (np.diff(x)**2 + np.diff(y)**2)**.5 # segment lengths
r = np.zeros_like(x)
r[1:] = np.cumsum(dr) # integrate path
r_int = np.linspace(0, r.max(), 200) # regular spaced path
x_int = np.interp(r_int, r, x) # integrate path
y_int = np.interp(r_int, r, y)
# -
# #### 99. Given an integer n and a 2D array X, select from X the rows which can be interpreted as draws from a multinomial distribution with n degrees, i.e., the rows which only contain integers and which sum to n. (★★★)
# +
# Author: <NAME>
X = np.asarray([[1.0, 0.0, 3.0, 8.0],
[2.0, 0.0, 1.0, 1.0],
[1.5, 2.5, 1.0, 0.0]])
n = 4
M = np.logical_and.reduce(np.mod(X, 1) == 0, axis=-1)
M &= (X.sum(axis=-1) == n)
print(X[M])
# -
# #### 100. Compute bootstrapped 95% confidence intervals for the mean of a 1D array X (i.e., resample the elements of an array with replacement N times, compute the mean of each sample, and then compute percentiles over the means). (★★★)
# +
# Author: <NAME>
X = np.random.randn(100) # random 1D array
N = 1000 # number of bootstrap samples
idx = np.random.randint(0, X.size, (N, X.size))
means = X[idx].mean(axis=1)
confint = np.percentile(means, [2.5, 97.5])
print(confint)
| 100_Numpy_exercises.ipynb |