code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
from torch.autograd import Variable
import matplotlib.pyplot as plt
import torch.nn.functional as F
raw = pd.read_csv('../dat/schools_w_clusters.csv')
raw = raw[['Cluster ID', 'Id', 'Site name', 'Address', 'Zip', 'Phone']]
raw['Zip'] = raw['Zip'].astype(str)
raw['Phone'] = raw['Phone'].astype(str)
raw.head(15)
# +
inpt1 = record_formatter(raw.iloc[0])
inpt2 = record_formatter(raw.iloc[7])
inpt3 = record_formatter(raw.iloc[11])
otpt1, otpt2 = model.forward(inpt1, inpt2)
print(loss.forward(otpt1,otpt2,1))
otpt1, otpt3 = model.forward(inpt1, inpt3)
print(loss.forward(otpt1,otpt3,0))
otpt2, otpt3 = model.forward(inpt2, inpt3)
print(loss.forward(otpt2,otpt3,0))
# -
print('name max len =', raw['Site name'].str.len().max())
print('address max len =', raw['Address'].str.len().max())
print('Zip max len =', raw['Zip'].str.len().max())
print('phone max len =', raw['Phone'].str.len().max())
# for a total of max length 154
# ## defs
# The following insanity is how we need to convert into a useable Torch tensor of correct size and Variable...ness.
Variable(torch.from_numpy(np.random.rand(10)).float()).view(1,10)
# +
def extend_to_length(string_to_expand, length):
extension = '~' * (length-len(string_to_expand))
return string_to_expand + extension
def record_formatter(record):
name = extend_to_length(record['Site name'], 95)
addr = extend_to_length(record['Address'], 43)
zipp = extend_to_length(record['Zip'], 7)
phon = extend_to_length(record['Phone'], 9)
strings = list(''.join((name, addr, zipp, phon)))
characters = np.array(list(map(ord, strings)))
return Variable(torch.from_numpy(characters).float()).view(1,len(characters))
# +
class SiameseNetwork(nn.Module):
def __init__(self):
super(SiameseNetwork, self).__init__()
self.fc1 = nn.Sequential(
nn.Linear(154,100),
nn.ReLU(inplace=True),
nn.Linear(100, 80),
nn.Sigmoid())
def forward_once(self, x):
return self.fc1(x)
def forward(self, input1, input2):
output1 = self.forward_once(input1)
output2 = self.forward_once(input2)
return output1, output2
class ContrastiveLoss(torch.nn.Module):
def __init__(self, margin=1.0):
super(ContrastiveLoss, self).__init__()
self.margin = margin
'''
def forward(self, x0, x1, y):
# euclidian distance
diff = x0 - x1
dist_sq = torch.sum(torch.pow(diff, 2), 1)
dist = torch.sqrt(dist_sq)
mdist = self.margin - dist
dist = torch.clamp(mdist, min=0.0)
loss = y * dist_sq + (1 - y) * torch.pow(dist, 2)
loss = torch.sum(loss) / 2.0 / x0.size()[1]
return loss
'''
def forward(self, output1, output2, label):
euclidean_distance = F.pairwise_distance(output1, output2)
loss_contrastive = torch.mean((1-label) * torch.pow(euclidean_distance, 2) +
(label) * torch.pow(torch.clamp(self.margin - euclidean_distance, min=0.0), 2))
return loss_contrastive
# +
inpt1 = record_formatter(raw.iloc[0])
inpt2 = record_formatter(raw.iloc[7])
otpt1, otpt2 = model.forward(inpt1,inpt2)
loss.forward(otpt1,otpt2,1)
# -
# ## data characteristics
raw.shape
raw['Cluster ID'].unique().shape
# ## training
model = SiameseNetwork()
loss = ContrastiveLoss(margin=1)
optimizer = torch.optim.SGD(model.parameters(), lr=0.001, momentum=0.1)
# +
# %%time
diff = 10
loss_holder = []
model.train()
for epoch in range(10):
for i in range(raw.shape[0]-diff):
# build data pairs
inpt1 = record_formatter(raw.iloc[i])
inpt2 = record_formatter(raw.iloc[i+diff])
label = 1 if (raw.iloc[i]['Cluster ID'] == raw.iloc[i+diff]['Cluster ID']) else 0
# forward
otpt1, otpt2 = model.forward(inpt1, inpt2)
optimizer.zero_grad()
loss_calc = loss.forward(otpt1, otpt2, label)
# reassign loss requiring gradient
loss_calc = Variable(loss_calc.data, requires_grad=True)
# backprop
loss_calc.backward()
optimizer.step()
# console.log
loss_holder.append(loss_calc.data[0])
#print(label)
if i == raw.shape[0]-diff-1:
print('loss for epoch', epoch, 'is',
sum(loss_holder[-raw.shape[0]:]))
model.eval()
# -
model.state_dict().keys()
inpt1.size()
loss_calc
model.forward(inpt1,inpt2)
plt.plot(loss_holder)
plt.show()
plt.plot(loss_holder[:raw.shape[0]])
plt.show()
model.state_dict()
| nb/first_try.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.9.1 64-bit
# name: python3
# ---
# # <NAME> & <NAME>: Project 7
# ### Topic modeling and keywords extractions for Holi.io
# ### Jedha Full Stack, dsmf-paris-13
# ### 08-2021
#
# This project is the final project as Jedha Students.
# Idea has been submitted by Holi.io Founder : <NAME>
# The specifications from Holi.io can be found [here](https://github.com/FlorianG-dev/Jedha_certification/blob/master/7_Holi/Project_initialization.pdf). It is the projet number 1 : Topic modeling
#
# ---
#
# This notebook is the **first** notebook in a serie of two
# # **1) Initialization**
# ----
# ## **1.1) We begin with the Import of the different libraries we will use and their configurations**
# ----
# +
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import re
import string
from itertools import product
from collections import defaultdict
import spacy
from spacy.lang.en.stop_words import STOP_WORDS
# #!spacy download en_core_web_sm -q
import en_core_web_sm
# -
# ## **1.2) Data collection**
# ---
# ### We work on the Mind Dataset. You can find more information [here](https://msnews.github.io/)
# First, we gather the data relative to the articles:
# The news.tsv file contains the detailed information of news articles involved in the behaviors.tsv file.
# It has 7 columns, which are divided by the tab symbol:
# - News ID
# - Category
# - Subcategory
# - Title
# - Abstract
# - URL
# - Title Entities (entities contained in the title of this news)
# - Abstract Entities (entities contained in the abstract of this news)
file_news = pd.read_table('Data/news.tsv',
header=None,
names=[
'id', 'category', 'subcategory', 'title', 'abstract', 'url',
'title_entities', 'abstract_entities'
])
file_news['nid']=file_news['url'].str[-12:-5] #The id of the articles are extracted from the URL
# Then, we gather the content of these articles
json_news = pd.read_json('Data/msn-article_content.json')
json_news['text'] = [' '.join(x) for x in json_news['body']]
json_news = json_news.drop(['body'], axis=1)
print("Shape of the articles' content :",json_news.shape)
print("Shape of the articles' information :",file_news.shape)
# We merge those files to create our dataset
raw_data = file_news.merge(json_news, on = 'nid').drop(['title_entities', 'abstract_entities', 'url', 'abstract'], axis=1)
raw_data.head()
print("Shape of our dataset:",raw_data.shape)
# # **2) DEA**
# ---
# ## **2.1) Dataset Overview**
# ---
df = raw_data.copy() # We create our dataframe df
#df = raw_data.sample(1000).copy() #sample for testing
df.head()
df.describe(include = 'all')
# ## **2.2) Deleting first outliers and duplicates**
# ---
df = df[df['text'].str.len()<20000].reset_index() # We remove the texts too long
df = df[df['text'].str.len()>30].reset_index() # We remove the texts too short
df = df.drop_duplicates(['text']) # We remove texts that appear multiple times
df = df.drop(['level_0','index'], axis=1)
# ## **2.3) Looking at punctuations**
# ---
ponctuations = {x : 0 for x in string.punctuation}
for ponc,text in product(string.punctuation, df['text']):
ponctuations[ponc] += text.count(ponc)
x,y=zip(*ponctuations.items())
plt.figure()
plt.ylabel("number of occurences")
plt.title("Occurences for each ponctuation in the articles")
plt.bar(x,y)
plt.show()
# ## **2.4) Handling missing values**
# ---
df.isnull().value_counts()
# ### **We do not have missing values in this dataset**
# ## **2.5) Text lenght**
# ---
plt.figure()
ax = sns.histplot(df.text.str.len(),color='red')
ax.set (ylabel="Number of articles",
title ="text lenght",
xlabel = "lenght")
plt.show();
# ### Most of the articles are under 10 000 characters and a lot are under 100, we will need to take care of missing values after the tokenization
# We take a look at the short articles :
df[df['text'].str.len()<100]
# # **3) Data cleaning**
# ---
# ## **3.1) Cleaning function**
# ---
# ### We create a function to clean the dataset. This functions has evolved during all the projects as much as we faced issues or wanted to transform more the text
def clean (article):
# We force the utf-8 encoding
article.encode("utf-8").decode("utf-8")
# Deletes urls
article = re.sub(r"https:[A-Za-z0-9]+", "", article)
article = re.sub(r"http:[A-Za-z0-9]+", "", article)
article = re.sub(r"www\.[A-Za-z0-9]+", "", article)
# We delete the \r and the \n that are everywhere in the articles
article = re.sub(r"\\r|\\n", "", article)
# We delete the 's
article = re.sub(r"'s", "", article)
# We delete everything that is not alphabetic
pattern = re.compile(r'[^a-zA-Z]+')
article = pattern.sub(' ', article)
# Transform multiples spaces in one space
article = re.sub(r"\s{2,}", " ", article)
# Strip
article = article.strip()
return article
# And apply it to all our articles
# %time df['text_cleaned'] = df['text'].apply(lambda s : clean(s))
# ## **3.2) Cleaning results**
# ---
# We visualize the lenght of the articles, first for all the articles then for the little articles
plt.figure()
ax = sns.histplot(df.text.str.len(),color='red')
ax.set (ylabel="Number of articles",
title ="Text lenght for all the articles",
xlabel = "Lenght")
plt.show();
plt.figure()
ax = sns.histplot(df[df.text.str.len()<500].text.str.len(),color='red')
ax.set (ylabel="Number of articles",
title ="Text lenght for articles with lenght < 500",
xlabel = "Lenght")
plt.show();
# Let's visualize the stop words in our cleaned dataset
# +
all_articles = df.text.str.cat(sep=' ').split() # One very long string with all the articles
dic=defaultdict(int)
for word in all_articles:
if word in STOP_WORDS:
dic[word]+=1
top=sorted(dic.items(), key=lambda x:x[1],reverse=True)[:15]
x,y=zip(*top)
plt.bar(x,y)
plt.xlabel("Stop Words")
plt.ylabel("Occurence in million")
# -
# ## **3.3) Lemmatization**
# ---
df.head()
# ### We use a dic to replace common words with our empirical observations, this dic has evolved during the project
to_replace={
'sen':'senate',
'senator':'senate',
'teacher':'teaching'
}
# ### We remove Stop Words, with empirical observations with LDA model and Bert words extractions, we complete the stop_words list
add_stop_words={'tonight',
'yes',
'no',
'hey',
'okay',
'etc',
'mr',
'mss',
'ms',
'er',
'v',
'monthly',
'tb',
'sec',
'mind'}
STOP_WORDS |= add_stop_words
# ### Now we perform two actions on our articles :
# ### - Remove stop-words
# ### - Remove words which are not Noun, adjective or verb
# ### **Warning : very long, 3 000 seconds for the small dataset**
# +
nlp = en_core_web_sm.load()
excluded_tags = {"ADV", "ADP", "AUX", "NUM"}
# %time df['text_tokenized'] = df['text_cleaned'].apply(lambda x: [token.lemma_ for token in nlp(x) if (token.pos_ not in excluded_tags) & (token.lemma_.lower() not in STOP_WORDS) & (token.text.lower() not in STOP_WORDS) & (len(token.lemma_) >1)])
df['nlp_ready'] = df['text_tokenized'].str.join(' ').str.lower() # We replace the capitals letter
df = df.drop(['text_tokenized'], axis=1)
df = df.dropna() # The cleaning has created empty cells, we delete the rows accordingly
df.to_csv('Data/smallMind_clean_data_without_stop_words.csv', index=False) # We save our dataset to a csv file
# -
df.head()
# ---
#
# # **The next steps are in the notebook Step2_LDA_model_training**
| 7_Holi/step1_import_data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Attention Model Pipeline
# +
import collections
import pandas as pd
import numpy as np
import time
import os
import tensorflow as tf
from tensorflow.keras.layers import *
from tensorflow.keras.models import Model
from tensorflow.keras.models import load_model
from tensorflow.keras.utils import plot_model
from TrainValTensorBoard import TrainValTensorBoard
import matplotlib.pyplot as plt
from scipy import signal
pd.set_option('display.max_columns', 500)
# -
os.environ['HDF5_USE_FILE_LOCKING'] = 'FALSE'
tf.__version__
# ### Load Datasets and Clean
# In this configuration the relevant data set should be loaded from the same folder as the notebook
df = pd.read_csv('/nfs/2018/j/jcruz-y-/neurotron_datasets/joined/joined_data_106979_24-Oct-19_17:31_jose_all_1.csv')
# The data consists of timestamps from the two hardware devices and a diff between them. When the two hardware data streams were stitched together an effor was made to minimize this diff, but the driver configuration did not easily permit eliminating it. This information is included to understand the accuracy of the data, but will not be used during the training.
#
# The time data is followed by the 8 channels from the Myo, this data will be used as input features.
#
# This is followed by the 63 positional points from the Leap cameras. These will be used as labels.
df.head()
df = df.drop(labels=["Leap timestamp", "timestamp diff", "emg timestamp"], axis=1)
df.describe()
# + active=""
# df = df[:5000]
# df.shape
# +
def preprocess_features(x_train):
shape = (7, 13, 8)
reshape = (-1, shape[0], shape[1], shape[2])
x_train = x_train.replace(-np.inf, 0)
x_train = x_train.replace(np.inf, 0)
#x_train = np.log(x_train.values)
x_train = x_train.values
x_train_norm = x_train.reshape(reshape)
return x_train_norm
#features = preprocess_features(df)
# -
feature_ar = df.loc[:, 'ch1':'ch8'].values
label_ar = df.loc[:, 'Wrist x':].values
label_ar_tips = label_ar[:,[0,1,2,12,13,14,24,25,26,36,37,38,48,49,50,60,61,62]]
label_ar_tips.shape
feature_ar.shape
label_ar.shape
# +
seq_length = 24
def overlap_samples(seq_length, feats, labels):
new_l = labels[seq_length - 1:]
feat_list = [feats[i:i + seq_length] for i in range(feats.shape[0] - seq_length + 1)]
new_f = np.array(feat_list)
return new_f, new_l
features, labels = overlap_samples(seq_length, feature_ar, label_ar)
features, labels_tips = overlap_samples(seq_length, feature_ar, label_ar_tips)
print(features.shape)
print(labels.shape)
# -
# ## Attention Basics
# [attention mechanism from scratch](https://towardsdatascience.com/learning-attention-mechanism-from-scratch-f08706aaf6b6)
# ### Attention scoring
#
# #### Inputs to the scoring function
#
# Start by looking at the inputs we'll give to the scoring function.
# We will assume we're in the first step in the decoding phase.
# First input to scoring function is hidden state of decoder.
# Assuming a toy RNN with three hidden nodes -- not usable in real life but easier to illustrate
#
# ### decoder hidden state vector
dec_hidden_state = [5, 1, 20] # Decoder hidden state (query vector?)
# Visualize this vector
import seaborn as sns
plt.figure(figsize=(1.5, 4.5))
sns.heatmap(np.transpose(np.matrix(dec_hidden_state)), annot=True, cmap=sns.light_palette("purple", as_cmap=True), linewidths=1)
# ### Encoder hidden state = annotation
#
# Our first scoring function will score a single annotation (encoder hidden state), which looks like this:
annotation = [3, 12, 45] # i.e. Encoder hidden state
# Visualizing single annotation
plt.figure(figsize=(1.5, 4.5))
sns.heatmap(np.transpose(np.matrix(annotation)), annot=True,
cmap=sns.light_palette("orange",as_cmap=True), linewidths=1)
# ### Scoring a single annotation
# dot product of decoder hidden state and encoder hidden state
# +
def single_dot_attention_score(dec_hidden_state, enc_hidden_state):
return np.dot(dec_hidden_state, enc_hidden_state)
single_dot_attention_score(dec_hidden_state, annotation)
# -
# ### Annotations Matrix
# All the scoring of annotations at once. To do that, here's annotation matrix
annotations = np.transpose([[3, 12, 45], [59, 2, 5], [1, 43, 5], [4, 3, 45.3]])
# It can be visualized like this (each column is a hidden state of an encoder time step):
ax = sns.heatmap(annotations, annot=True, cmap=sns.light_palette("orange", as_cmap=True), linewidths=1)
# ### Implement: Scoring all annotations at once
#
# +
def dot_attention_score(dec_hidden, annotations):
return np.matmul(np.transpose(dec_hidden_state), annotations)
attention_weights_raw = dot_attention_score(dec_hidden_state, annotations)
attention_weights_raw
# -
# Looking at these scores the 929 will get the most attention from the decoder
#
# ### Softmax
# After getting scores we apply softmax
# +
def softmax(x):
x = np.array(x, dtype=np.float128)
e_x = np.exp(x)
return e_x / e_x.sum(axis=0)
attention_weights = softmax(attention_weights_raw)
attention_weights.shape
# -
# Its good to appreciate again, even after knowing which annotation will get the most focus,
# just how much more drastic the softmax makes the difference.
# The first and last annotation had 927 and 929 after the softmax they get .119 and .880 respectively
# Even a variation of less than 1% ((1 - 929/927)*100) gets incremented to a variation of 800% ((1 - 929/927)*100)!
# ### Applying the scores back on the annotations
# Now that we have our scores, let's multiply each annotation by its score to proceed closer to the attention context vector. This is the multiplication part of this formula (we'll tackle the summation part in the latter cells)
# +
def apply_attention_scores(attention_weights, annotations):
return attention_weights * annotations
applied_attention = apply_attention_scores(attention_weights, annotations)
applied_attention
# -
# Now let's see how the context vector looks now
# Visualizing annotations after applying attention
ax = sns.heatmap(applied_attention, cmap=sns.light_palette("orange", as_cmap=True), linewidths=1, annot=True)
# Contrasting this with the original annotations matrix and the second and third column have been reduced
# greatly
# ### Calculating Attention Context Vector
# All that remains to produce our attention context vector now is to sum up the four columns to produce a single attention context vector.<br>
# +
def calculate_attention_vector(applied_attention):
return np.sum(applied_attention, axis=1)
attention_vector = calculate_attention_vector(applied_attention)
attention_vector
# -
# Visualizing attention context vector
plt.figure(figsize=(1.5, 4.5))
sns.heatmap(np.transpose(np.matrix(attention_vector)), cmap=sns.light_palette("blue", as_cmap=True), linewidths=1, annot=True)
# ### Attention
# **This model will incorporate a component of attention**
#
# ### Model from <NAME>
# https://github.com/philipperemy/keras-attention-mechanism/blob/master/attention_lstm.py
# Attention vector
#
# Also, sometimes the time series can be N-dimensional. It could be interesting to have one atention vector per dimension. Let's
#
# Attention can just then be a softmax applied to an output of something?
#
# The permute function switches the positions of the axis and the dims argument tells how you want the final positions to be.
#
# For example, if x is 4-dimensional and of the shape (None, 2, 4, 5, 8) - (None is the batch size here) and if you specify dims = (3, 2, 1, 4), then the following four steps will take place:
#
# 1. Third dimension will move to first
# 2. Second dimension will move to second
# 3. First dimension will move to third
# 4. Fourth dimension will move to fourth
#
# Remember, the indexing starts at 1 and not 0. The dimension zero is the batch size. So finally the output
#
# **RepeatVector**
# Repeats the input vector n times
#
#
# Updated version:
# https://github.com/philipperemy/keras-attention-mechanism/issues/14
# +
def attention_3d_block_2(hidden_states):
# hidden_states.shape = (batch_size, time_steps, hidden_size)
hidden_size = int(hidden_states.shape[2])
# Inside dense layer hidden_states dot W => score_first_part
# (batch_size, time_steps, hidden_size) dot (hidden_size, hidden_size) => (batch_size, time_steps, hidden_size)
# W is the trainable weight matrix of attention
# Luong's multiplicative style score
score_first_part = Dense(hidden_size, use_bias=False, name='attention_score_vec')(hidden_states)
# score_first_part dot last_hidden_state => attention_weights
# (batch_size, time_steps, hidden_size) dot (batch_size, hidden_size) => (batch_size, time_steps)
h_t = Lambda(lambda x: x[:, -1, :], output_shape=(hidden_size,), name='last_hidden_state')(hidden_states)
score = dot([score_first_part, h_t], [2, 1], name='attention_score')
attention_weights = Activation('softmax', name='attention_weight')(score)
# (batch_size, time_steps, hidden_size) dot (batch_size, time_steps) => (batch_size, hidden_size)
context_vector = dot([hidden_states, attention_weights], [1, 1], name='context_vector')
pre_activation = concatenate([context_vector, h_t], name='attention_output')
attention_vector = Dense(128, use_bias=False, activation='tanh', name='attention_vector')(pre_activation)
return attention_vector
#return pre_activation
# +
INPUT_DIM = 8
TIME_STEPS = 24
# if True, the attention vector is shared across the input_dimensions where the attention is applied.
SINGLE_ATTENTION_VECTOR = True
APPLY_ATTENTION_BEFORE_LSTM = False
def attention_3d_block_cnn(inputs,timesteps):
input_dim = int(inputs.shape[2])
time_steps=timesteps
a_probs = Conv1D(input_dim,3,strides=1,padding='same',activation='softmax')(inputs)
output_attention_mul= Multiply()([inputs, a_probs]) #name='attention_mul'
return output_attention_mu
def attention_3d_block(inputs):
# inputs.shape = (batch_size, time_steps, input_dim)
input_dim = int(inputs.shape[2])
a = Permute((2, 1))(inputs) # changes the order of the parameters (in this case 2 becomes 1 and 1 becomes 2)
a = Reshape((input_dim, TIME_STEPS))(a) # this line is not useful. It's just to know which dimension is what.
a = Dense(TIME_STEPS, activation='softmax')(a)
if SINGLE_ATTENTION_VECTOR:
a = Lambda(lambda x: tf.keras.backend.mean(x, axis=1))(a)
a = RepeatVector(input_dim)(a)
a_probs = Permute((2, 1))(a) #, name='attention_vec')(a)
output_attention_mul = Add()([a_probs, inputs])
return output_attention_mul
def model_attention_applied_after_lstm():
inputs = Input(shape=(TIME_STEPS, INPUT_DIM,))
lstm_units = 40
model = LSTM(lstm_units, return_sequences=True)(inputs)
model = Dropout(0.3)(model)
model = BatchNormalization()(model)
model = attention_3d_block(model)
#model = LSTM(lstm_units, return_sequences=True)(model)
#model = attention_3d_block_cnn()(model)
model = LSTM(lstm_units, return_sequences=True)(model)
model = Dropout(0.3)(model)
model = BatchNormalization()(model)
model = attention_3d_block(model)
model = Flatten()(model)
output = Dense(63, activation=None)(model)
model = Model(inputs, output)
return model
model = model_attention_applied_after_lstm()
model.summary()
# -
optimizer = tf.keras.optimizers.Adam(lr=0.005, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.00, amsgrad=True)
model.compile(optimizer=optimizer, loss='mse')
model.evaluate(features, labels, verbose=1)
# ### Attention shared
# val loss = 430
history = model.fit(features, labels, epochs=40, verbose=1, validation_split=0.2, callbacks=[TrainValTensorBoard()])
model.save('Attention_jose_all_loss400_vloss500_model.h5')
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.legend(['Train', 'Test'])
plt.title('Attention')
preds = model.predict(features)
error = labels - preds
sq_error = error * error
avg_error = np.mean(sq_error, axis=0)
plt.figure(figsize=(15, 10))
plt.xticks(rotation=90)
plt.ylabel('Prediction Error (mm)')
bar = plt.bar(df.columns[8:], avg_error)
for i in range(0,63,3):
bar[i].set_color('coral')
bar[i+1].set_color('olivedrab')
plt.show()
| ml/attention_model_pipeline.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import os
import pandas as pd
import numpy as np
from convokit import Corpus, User, Utterance
# -
# (a directory containing all files used in the original dataset, found [here](https://gitlab.com/ucdavisnlp/persuasionforgood/tree/master/data). replace with your own directory.)
ROOT_DIR = '<YOUR DIRECTORY>'
# Converting user-level information. We'll assume that each user, across all the conversations they've participated in, will have the same survey information.
user_df = pd.read_csv(os.path.join(ROOT_DIR, 'full_info.csv'))
user_df.columns = [c.replace('.x','') for c in user_df.columns]
survey_cols = [c for c in user_df.columns if not c.startswith('B')]
user_df.columns = ['dialogue_id', 'user', 'role', 'donation', 'n_turns'] + survey_cols
user_df.head()
user_meta_dict = user_df.drop_duplicates('user').set_index('user')[survey_cols].to_dict(orient='index')
corpus_users = {k: User(name=k, meta=v) for k,v in user_meta_dict.items()}
# we'll also keep track of which users are involved in which dialogue.
er_info = user_df[user_df.role == 0].set_index('dialogue_id')[['user','donation']]
ee_info = user_df[user_df.role == 1].set_index('dialogue_id')[['user','donation']]
convo_info = er_info.join(ee_info, lsuffix='_er', rsuffix='_ee')
convo_info.head()
# Converting utterances.
utt_df = pd.read_csv(os.path.join(ROOT_DIR, 'full_dialog.csv'))
utt_df.columns = ['turn_id', 'text', 'user_turn_id', 'role', 'dialogue_id']
utt_df.head()
# Here, we get the user_ids of the author of each utterance.
utt_df = utt_df.join(convo_info[['user_er','user_ee']], on='dialogue_id')
utt_df['user'] = utt_df[['user_er','user_ee']].values[np.arange(len(utt_df)), utt_df.role.values]
# since convokit conversation ids use the id of the root utterance (i.e., the first utterance), we'll keep track of the mapping from dialogue_ids to convokit's conversation ids.
utt_df['id'] = utt_df.index
dialogue_to_convo_id = utt_df.groupby('dialogue_id')['id'].min()
dialogue_to_convo_id.sort_values().head()
# Adds the reply and root info.
utt_df['reply_to'] = utt_df['id'].shift()
utt_df.loc[utt_df.turn_id==0,'reply_to'] = np.nan
utt_df = utt_df.join(dialogue_to_convo_id.rename('root'), on='dialogue_id')
utt_df.head()
utt_df.head()
# keeps track of intended donations. note these are only available for persuadees (`role==1`) for 300 manually-annotated dialogues.
intended_df = pd.read_excel(os.path.join(ROOT_DIR, '300_info.xlsx'))
intended_df.columns = ['dialogue_id','user','role','intended', 'actual', 'n_turns']
intended_df = intended_df[intended_df.role == 1].set_index('dialogue_id')
intended_df.head()
convo_info = convo_info.join(intended_df.intended)
convo_info['is_annotated'] = convo_info.intended.notnull()
convo_info.head()
# Reads the annotated subset. Note that annotations are per sentence. We'll store these as a list of annotations; we'll also keep around the sentence-tokenized text for these annotated utterances to facilitate corresponding the annotations with the texts.
annot_df = pd.read_excel(os.path.join(ROOT_DIR, 'data_AnnotatedData_300_dialog.xlsx'), index_col=0)
annot_df.columns = ['dialogue_id', 'role', 'user_turn_id', 'text', 'er_label_1', 'ee_label_1',
'er_label_2', 'ee_label_2', 'neg','neu','pos']
annot_df['turn_id'] = annot_df.dialogue_id + '__' + annot_df.role.map(str) + '__' + annot_df.user_turn_id.map(str)
agg_annots = annot_df.groupby('turn_id')[['er_label_1','er_label_2','ee_label_1','ee_label_2',
'neg','neu','pos']].agg(list)
agg_sents = annot_df.groupby('turn_id').text.agg(lambda x: ' <s> '.join(x)).rename('text_by_sent')
sents_per_utt = annot_df.turn_id.value_counts().rename('n_sents')
# Join the annotations to the utterances.
utt_df['turn_id'] = utt_df.dialogue_id + '__' + utt_df.role.map(str) + '__' + utt_df.user_turn_id.map(str)
utt_df = utt_df.join(agg_annots, on='turn_id').join(agg_sents, on='turn_id').join(sents_per_utt, on='turn_id')
utt_df['label_1'] = utt_df[['er_label_1','ee_label_1']].values[np.arange(len(utt_df)), utt_df.role.values]
utt_df['label_2'] = utt_df[['er_label_2','ee_label_2']].values[np.arange(len(utt_df)), utt_df.role.values]
utt_df.head()
# We're now ready to construct a new Corpus object given the utterances. Note a gotcha (to be fixed or documented in a future release) -- to facilitate reading the corpus back into memory later, utterance and hence conversation IDs must be stored as strings.
# to deal with some data type problems
def safe_convert_str(x):
if np.isnan(x): return None
else: return str(int(x))
utterances = []
for utt_id, row in utt_df.iterrows():
utterance = Utterance(id=str(utt_id), user=corpus_users[row.user], root=str(row.root),
reply_to=safe_convert_str(row.reply_to),text=row.text,
meta={
'user_turn_id': row.user_turn_id,
'role': row.role,
'text_by_sent': row.text_by_sent,
'n_sents': safe_convert_int(row.n_sents),
'label_1': row.label_1,
'label_2': row.label_2,
'sentiment': {'neg': row.neg, 'neu': row.neu, 'pos': row.pos}
})
utterances.append(utterance)
utterances[13]
utterances[1993]
corpus = Corpus(utterances=utterances, version=1)
corpus.meta['name'] = "Persuasion For Good Corpus"
corpus.print_summary_stats()
# update convo-level metadata
convo_info = convo_info.join(dialogue_to_convo_id.rename('convo_id'))
convo_info['dialogue_id'] = convo_info.index
convo_info = convo_info.set_index('convo_id')
convo_info_dict = convo_info.to_dict(orient='index')
for convo_id in corpus.get_conversation_ids():
conversation = corpus.get_conversation(convo_id)
conversation.meta = convo_info_dict[int(convo_id)]
# finally, write to disk.
corpus.dump('persuasionforgood_corpus', base_path=os.path.dirname(ROOT_DIR))
| datasets/persuasionforgood-corpus/p4g_corpus_conversion.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
# import necessary packages
import os
import pandas
import numpy
import matplotlib.pyplot as plt
#import geopandas as gpd
from pathlib import Path
# import function for querying DOE dataframesaq
from monte_carlo_utils import get_DOE_atb, get_DOE_barges, concat_shp
from dask import delayed
# set file location and name
atb_shp_path = Path('/data/MIDOSS/shapefiles/atb_2018_01.shp')
barge_shp_path = Path('/data/MIDOSS/shapefiles/barge_2018_01.shp')
doe_xls_path = Path('/data/MIDOSS/spreadsheets/MuellerTrans4-30-20.xlsx')
fac_xls_path = Path(
'/home/rmueller/Projects/MIDOSS/marine_transport_data/Oil_Transfer_Facilities.xlsx'
)
shapefile_path = Path('/data/MIDOSS/shapefiles/')
# import facility locations
facWA = pandas.read_excel(
fac_xls_path,
sheet_name = 'Washington',
usecols="B,D,J,K"
)
# import facility locations
facCAD = pandas.read_excel(
fac_xls_path,
sheet_name = 'British Columbia',
usecols="A",
nrows=10
)
# +
# def concat_shp(ship_type, shapefile_path):
# """
# INPUT:
# - ship_type ["tanker", "barge", "atb", etc]: MIDOSS-name for ship type (see oil_attribution.yaml for list)
# - shapefile_path [Path]: e.g., on Salish,Path('/data/MIDOSS/shapefiles/')
# OUTPUT:
# - dataframe of all 2018 ship tracks for given ship_type
# requirements:
# This script requires being run on Salish.eoas.ubc.ca, with path to shapefiles being /data/MIDOSS/shapefiles/
# """
# for months in range(1,13):
# # set file location and name
# shapefile = shapefile_path/f'{ship_type}_2018_{months:02d}.shp'
# # import shapefile using geopandas
# monthly_shp = gpd.read_file(shapefile)
# if months == 1:
# print(f'creating {ship_type} shapefile for 2018, starting with January data')
# allTracks = monthly_shp
# else:
# print(f'Concatenating {ship_type} data from month {months}')
# allTracks = gpd.GeoDataFrame(
# pandas.concat([allTracks, monthly_shp])
# )
# return allTracks
# -
# # Concatenate all monthly ship track data to get values for entire year
# ### ATBs
# %%time
allTracks = {}
allTracks_dask = delayed(concat_shp("atb", shapefile_path))
allTracks["atb"]=allTracks_dask.compute()
allTracks["atb"].shape[0]
# ### Barges
# %%time
allTracks_dask = delayed(concat_shp("barge", shapefile_path))
allTracks["barge"]=allTracks_dask.compute()
# ### Tankers
# %%time
ship_type = "tanker"
allTracks["tanker"] = concat_shp("tanker", shapefile_path)
# ## Check barge ship track count used in ping-to-transfer ratio estimate
# - values recorded in `Origin_Destination_Analysis_updated.xlsx`
print(f'{allTracks["atb"].shape[0]} ATB ship tracks')
print(f'cf. 588,136 ATB ship tracks used in ping-to-transfer estimate')
print(f'{allTracks["barge"].shape[0]} barge ship tracks')
# ##### Take-away: total number of ship tracks used in the ping-to-transfer ratio matches those used in this analysis. That's good. It's what I wanted to verify.
# ## Find all ATB and barge tracks with generic attribution as both origin and destination
# +
attribution = ['US','Canada','Pacific']
noNone = {}
allNone = {}
generic = {}
for vessel_type in ["atb",'barge']:
generic[vessel_type] = allTracks[vessel_type].loc[
(allTracks[vessel_type].TO.isin(attribution)) &
(allTracks[vessel_type].FROM_.isin(attribution))
]
# -
generic["barge"].shape
# ## Find all ship tracks with None as origin and destination
for vessel_type in ["atb",'barge']:
# keep rows with None attribution
shp_tmp = allTracks[vessel_type].isnull()
row_has_None = shp_tmp.any(axis=1)
allNone[vessel_type] = allTracks[vessel_type][row_has_None]
# ## Find all ship tracks with no None designations in either origin or destination
for vessel_type in ["atb",'barge']:
# drop rows with None attribution
noNone[vessel_type] = allTracks[vessel_type].dropna().reset_index(drop=True)
# ## Find all ship tracks with origin or destination as marine terminal
# - compare this value to frac[vessel_type]["marine_terminal"] to quantify how many ship tracks have mixed origin/destation as marine_terminal/generic (I don't think mixed with None is possible).
allfac = {}
toWA = {}
fromWA = {}
bothWA = {}
for vessel_type in ["atb",'barge']:
allfac[vessel_type] = allTracks[vessel_type].loc[
((allTracks[vessel_type].TO.isin(facWA.FacilityName)) |
(allTracks[vessel_type].FROM_.isin(facWA.FacilityName))
)
]
toWA[vessel_type] = allTracks[vessel_type].loc[
(allTracks[vessel_type].TO.isin(facWA.FacilityName))
]
fromWA[vessel_type] = allTracks[vessel_type].loc[
(allTracks[vessel_type].FROM_.isin(facWA.FacilityName))
]
bothWA[vessel_type] = allTracks[vessel_type].loc[
((allTracks[vessel_type].TO.isin(facWA.FacilityName)) &
(allTracks[vessel_type].FROM_.isin(facWA.FacilityName)))
]
# ## Find all ship tracks with any WA or CAD marine oil terminal as either origin or destination
allfacWACAD = {}
for vessel_type in ["atb","barge"]:
allfacWACAD[vessel_type] = allTracks[vessel_type].loc[
((allTracks[vessel_type].TO.isin(facWA.FacilityName)) |
(allTracks[vessel_type].FROM_.isin(facWA.FacilityName))|
(allTracks[vessel_type].TO.isin(facCAD.Name)) |
(allTracks[vessel_type].FROM_.isin(facCAD.Name))
)
]
print(f'To OR From: {allfac["atb"].shape[0]}')
print(f'To AND From: {bothWA["atb"].shape[0]}')
print(f'To: {toWA["atb"].shape[0]}')
print(f'From: {fromWA["atb"].shape[0]}')
print(f'To + From: {toWA["atb"].shape[0] + fromWA["atb"].shape[0]}')
print(f'To + From - "To AND from": {toWA["atb"].shape[0] + fromWA["atb"].shape[0] - bothWA["atb"].shape[0]}')
allfacWACAD["atb"].shape[0]
# ##### TAKE-AWAY:
# - 129165 ship tracks are to or from WA marine terminals with
# - 13238 of these having WA marine terminal as both to and from
# - The remainder are mixed with origin or destination as WA marine terminal and the other end-member being US, Pacific, Canada or CAD marine terminal (None values shouldn't be includeded here)
# ##### TEST:
# - All tracks = Generic + allFac + None
print(f'All tracks = Generic + allFac + allNone')
print(f'{allTracks["atb"].shape[0]} = {generic["atb"].shape[0] + allfac["atb"].shape[0] + allNone["atb"].shape[0]}')
# ##### Hypothesis: the difference in the above is CAD terminal transfers. Testing....
print(f'All tracks = Generic + allFacWACAD + allNone')
print(f'{allTracks["atb"].shape[0]} = {generic["atb"].shape[0] + allfacWACAD["atb"].shape[0] + allNone["atb"].shape[0]}')
# ##### Good! So 588136 - 476681 = 111455 => CAD traffic.
# compare ATB tracks
vessel_type = "atb"
print(f'Attributed (no None in to or from): {noNone[vessel_type].shape[0]}')
print(f'Generic (to AND from): {generic[vessel_type].shape[0]}')
print(f'Attributed - Generic = {noNone[vessel_type].shape[0] - generic[vessel_type].shape[0]}')
print(f'Marine terminal (to or from): {allfac[vessel_type].shape[0]}')
# create a dictionary of ratios between subsampled data and all ship tracks
frac = {}
frac['atb'] = {}
frac['barge'] = {}
for vessel_type in ["atb","barge"]:
frac[vessel_type]["unattributed"] = allNone[vessel_type].shape[0]/allTracks[vessel_type].shape[0]
frac[vessel_type]["attributed"] = noNone[vessel_type].shape[0]/allTracks[vessel_type].shape[0]
frac[vessel_type]["generic"] = generic[vessel_type].shape[0]/allTracks[vessel_type].shape[0]
frac[vessel_type]["marine_terminal_WACAD"] = allfacWACAD[vessel_type].shape[0]/allTracks[vessel_type].shape[0]
frac[vessel_type]["marine_terminal_diff"] = frac[vessel_type]["attributed"] - frac[vessel_type]["generic"]
print(f'~~~ {vessel_type} ~~~')
print(f'Fraction of {vessel_type} tracks that are unattributed: {frac[vessel_type]["unattributed"]}')
print(f'Fraction of {vessel_type} tracks that are attributed: {frac[vessel_type]["attributed"]}')
print(f'Fraction of attributed {vessel_type} tracks that are generic : {frac[vessel_type]["generic"]}')
print(f'Fraction of attributed {vessel_type} tracks that are linked to marine terminal (WACAD): {frac[vessel_type]["marine_terminal_WACAD"]}')
print(f'Fraction of attributed {vessel_type} tracks that are linked to marine terminal (diff): {frac[vessel_type]["marine_terminal_diff"]}')
for vessel_type in ["atb","barge"]:
print(f'Total number of tracks for {vessel_type}: {allTracks[vessel_type].shape[0]:1.2e}')
print(f'Total number of unattributed barge tracks: {allTracks[vessel_type].shape[0]*frac[vessel_type]["unattributed"]:10.2f}')
print(f'Total number of generically-attributed barge tracks: {allTracks[vessel_type].shape[0]*frac[vessel_type]["generic"]:10.2f}')
print(f'Total number of marine-terminal-attributed barge tracks: {allTracks[vessel_type].shape[0]*frac[vessel_type]["marine_terminal_WACAD"]:10.2f}')
# ## Quantify barge and ATB cargo transfers in 2018 DOE database
[atb_in, atb_out]=get_DOE_atb(
doe_xls_path,
fac_xls_path,
transfer_type = 'cargo',
facilities='selected'
)
barge_inout=get_DOE_barges(
doe_xls_path,
fac_xls_path,
direction='combined',
facilities='selected',
transfer_type = 'cargo')
transfers = {}
transfers["barge"] = barge_inout.shape[0]
transfers["atb"] = atb_in.shape[0] + atb_out.shape[0]
print(f'{transfers["atb"]} cargo transfers for atbs')
print(f'{transfers["barge"]} cargo transfers for barges')
# ### Group barge and atb transfers by AntID and:
# - compare transfers
# - compare fraction of grouped transfers to ungrouped transfers by vessel type
transfers["barge_antid"] = barge_inout.groupby('AntID').sum().shape[0]
transfers["atb_antid"] = atb_in.groupby('AntID').sum().shape[0] + atb_out.groupby('AntID').sum().shape[0]
print(f'{transfers["atb_antid"]} ATB cargo transfers based on AntID')
print(f'{transfers["atb_antid"]/transfers["atb"]:.2f} ATB fraction AntID to all')
print(f'{transfers["barge_antid"]} barge cargo transfers based on AntID')
print(f'{transfers["barge_antid"]/transfers["barge"]:.2f} barge fraction AntID to all')
# ##### Take away: Barge and ATBs have similar number of mixed-oil-type transfers with ATBs having more mixed-type transfers (29% of 677) than barges (16% of 2334). Even though values are similar, we will use the AntID grouped number of transfers for our ping to transfer ratios
# ### Calculate the number of oil cargo barges we expect using the AntID grouping for ping-to-transfer ratio
ping2transfer = {}
oilcargobarges = {}
# ATB ping-to-transfer ratio
ping2transfer["atb"] = allTracks["atb"].shape[0]/transfers["atb_antid"]
# Estimate number of oil cargo barges using number of barge transfers
# and atb ping-to-transfer ratio
oilcargobarges["total"] = transfers["barge_antid"]*ping2transfer["atb"]
print(f'We expect {oilcargobarges["total"]:.0f} total oil cargo pings for barge traffic')
# ### Calculate the number of Attributed tracks we get for ATBs and estimate the equivalent value for barges
# estimate the ratio of attributed ATB tracks to ATB cargo transfers
noNone_ratio = noNone["atb"].shape[0]/transfers["atb_antid"]
print(f'We get {noNone_ratio:.2f} attributed ATB tracks per ATB cargo transfer')
# estimate the amount of attributed tracks we'd expect to see for tank barges based on tank barge transfers
print(f'We expect {noNone_ratio*transfers["barge_antid"]:.2f} attributed barge tracks, but we get {noNone["barge"].shape[0]}')
# estimate spurious barge voyages by removing estimated oil carge barge from total
fraction_nonoilbarge = (noNone["barge"].shape[0]-noNone_ratio*transfers["barge_antid"])/noNone["barge"].shape[0]
print(f'We estimate that non-oil tank barge voyages account for {100*fraction_nonoilbarge:.2f}% of barge voyages')
# +
#The above value was 88% when not using the AntID grouping
# -
# # Evaluate oil cargo traffic pings for ATBs and barges
# Dictionary for probability of oil cargo barges for our 3 attribution types
P_oilcargobarges = {}
allfac = {}
for vessel_type in ["atb",'barge']:
allfac[vessel_type] = allTracks[vessel_type].loc[
((allTracks[vessel_type].TO.isin(facWA.FacilityName)) |
(allTracks[vessel_type].FROM_.isin(facWA.FacilityName)))
]
# Ratio of ATB pings with WA facility attribution to ATB WA transfers
fac_att_ratio = allfacWACAD["atb"].shape[0]/transfers["atb_antid"]
# Fraction of barge pings with generic attribution that are expected to carry oil based on ATB pings and transfers
P_oilcargobarges["facility"] = fac_att_ratio*transfers["barge_antid"]/allfacWACAD["barge"].shape[0]
print(f'{allfac["atb"].shape[0]} ATB tracks have a WA oil facility as origin or destination')
print(f'{allfac["barge"].shape[0]} barge tracks have a WA oil facility as origin or destination')
print(f'We get {fac_att_ratio:.2f} WA oil marine terminal attributed ATB tracks per ATB cargo transfer')
# estimate the amount of oil cargo facility tracks we'd expect to see for tank barges based on tank barge transfers
print(f'We expect {fac_att_ratio*transfers["barge_antid"]:.2f} WA oil marine terminal attributed barge tracks, but we get {allfac["barge"].shape[0]}')
fraction_nonoilbarge = (allfac["barge"].shape[0]-fac_att_ratio*transfers["barge_antid"])/allfac["barge"].shape[0]
print(f'We estimate that non-oil tank barge voyages to/from marine terminals account for {100*fraction_nonoilbarge:.2f}% of barge voyages attributed to WA marine terminals')
# ###### When not grouped by AntID:
# - 129165 ATB tracks have a WA oil facility as origin or destination
# - 1666271 barge tracks have a WA oil facility as origin or destination
# - We get 190.79 WA oil marine terminal attributed ATB tracks per ATB cargo transfer
# - We expect 529061.37 WA oil marine terminal attributed barge tracks, but we get 1666271
# - We estimate that non-oil tank barge voyages to/from marine terminals account for 68.25% of - barge voyages attributed to WA marine terminals
# ### Repeat for Generic attibution only
# Ratio of ATB pings with generic attribution to ATB WA transfers
generic_ratio = generic["atb"].shape[0]/transfers["atb_antid"]
# Fraction of barge pings with generic attribution that are expected to carry oil based on ATB pings and transfers
P_oilcargobarges["generic"] = generic_ratio*transfers["barge_antid"]/generic["barge"].shape[0]
print(f'{generic["atb"].shape[0]} ATB tracks have Pacific, US or Canada as origin or destination')
print(f'{generic["barge"].shape[0]} barge tracks have Pacific, US or Canada as origin or destination')
print(f'We get {generic_ratio:.2f} Generically attributed ATB tracks per ATB cargo transfer')
# estimate the amount of oil cargo facility tracks we'd expect to see for tank barges based on tank barge transfers
print(f'We expect {generic_ratio*transfers["barge_antid"]:.2f} Generically attributed barge tracks, but we get {generic["barge"].shape[0]}')
fraction_nonoilbarge = (generic["barge"].shape[0]-generic_ratio*transfers["barge_antid"])/generic["barge"].shape[0]
print(f'We estimate that non-oil tank barge voyages account for {100*fraction_nonoilbarge:.2f}% of barge voyages with both to/from as generic attributions ')
# ### Repeat for No attribution
# Ratio of ATB pings with "None" attribution to ATB WA transfers
allNone_ratio = allNone["atb"].shape[0]/transfers["atb_antid"]
# Fraction of barge pings with None attribution that are expected to carry oil based on ATB pings and transfers
P_oilcargobarges["none"] = allNone_ratio*transfers["barge_antid"]/allNone["barge"].shape[0]
print(f'{allNone["atb"].shape[0]} ATB tracks have None as origin or destination')
print(f'{allNone["barge"].shape[0]} barge tracks have None as as origin or destination')
print(f'We get {allNone_ratio:.2f} None attributed ATB tracks per ATB cargo transfer')
# estimate the amount of oil cargo facility tracks we'd expect to see for tank barges based on tank barge transfers
print(f'We expect {allNone_ratio*transfers["barge_antid"]:.2f} None attributed oil cargo barge tracks, but we get {allNone["barge"].shape[0]}')
fraction_nonoilbarge = (allNone["barge"].shape[0]-allNone_ratio*transfers["barge_antid"])/allNone["barge"].shape[0]
print(f'We estimate that non-oil tank barge voyages account for {100*fraction_nonoilbarge:.2f}% of barge voyages with None attributions ')
# #### Find the probability of oil carge for each ping classification, i.e.:
# - `oilcargobarges["total"]` = 588136.0 = (1) + (2) + (3), where
# - (1) `P_oilcargobarges["facilities"]` * `allfacWACAD["barges"].shape[0]`
# - (2) `P_oilcargobarges["none"]` * `allNone["barges"].shape[0]`
# - (3) `P_oilcargobarges["generic"]` * `generic["barges"].shape[0]`
print(P_oilcargobarges["facility"])
print(P_oilcargobarges["none"])
print(P_oilcargobarges["generic"])
print(allfacWACAD["barge"].shape[0])
print(allNone["barge"].shape[0])
print(generic["barge"].shape[0])
print(P_oilcargobarges["facility"] * allfacWACAD["barge"].shape[0])
print(P_oilcargobarges["none"] * allNone["barge"].shape[0])
print(P_oilcargobarges["generic"] * generic["barge"].shape[0])
oilcargobarges["facilities"] = (P_oilcargobarges["facility"] * allfacWACAD["barge"].shape[0])
oilcargobarges["none"] = (P_oilcargobarges["none"] * allNone["barge"].shape[0])
oilcargobarges["generic"] = (P_oilcargobarges["generic"] * generic["barge"].shape[0])
print('oilcargobarges["total"] = oilcargobarges["facilities"] + oilcargobarges["none"] + oilcargobarges["generic"]?')
#oilcargobarges_sum = oilcargobarges["facilities"] + oilcargobarges["none"] + oilcargobarges["generic"]
print(f'{oilcargobarges["total"]:.0f} =? {oilcargobarges["facilities"] + oilcargobarges["none"] + oilcargobarges["generic"]:.0f}')
missing_pings = oilcargobarges["total"]-(oilcargobarges["facilities"] + oilcargobarges["none"] + oilcargobarges["generic"])
print(f' Missing {missing_pings} pings ({100*missing_pings/oilcargobarges["total"]:.0f}%)')
# I'm not sure where this 11% error comes from. I used WA-only terminal pings and transfers for ping-to-transfer ratio but multiplied this by the total number of CAD and WA oil transfer terminal pings.
allfacWACAD["barge"].shape[0] + generic["barge"].shape[0] + allNone["barge"].shape[0]
allTracks["barge"].shape[0]
#
| notebooks/monte_carlo_dev/.ipynb_checkpoints/Quantify_AIS_barge_attributions-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
messages = pd.read_csv('smsspamcollection/SMSSpamCollection', sep="\t", names=["label","message"])
messages.head()
import nltk
nltk.download('stopwords')
import re
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
from nltk.stem import WordNetLemmatizer
ps = PorterStemmer()
messages.head()
corpus = []
# +
for i in range(0,len(messages)):
review = re.sub('[^a-zA-Z]',' ',messages['message'][i])
review = review.lower()
review = review.split()
review = [ps.stem(words) for words in review if not words in set(stopwords.words('english'))]
review = ' '.join(review)
corpus.append(review)
# -
from sklearn.feature_extraction.text import CountVectorizer
cv = CountVectorizer(max_features=5000)
X = cv.fit_transform(corpus).toarray()
y = pd.get_dummies(messages)
y
y=pd.get_dummies(messages['label'])
y
y=y.iloc[:,1].values
y
X
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.20, random_state = 0)
X_train
X_test
y_train
y_test
from sklearn.naive_bayes import MultinomialNB
spam_detect_model = MultinomialNB().fit(X_train, y_train)
y_pred = spam_detect_model.predict(X_test)
print(y_pred)
| Spam classifier.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/dilanHewawitharana/Intro-to-Deep-Learning-with-PyTorch/blob/master/machine_learning_intro.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + colab={"base_uri": "https://localhost:8080/"} id="PkWxH_FszGIc" outputId="b3c11937-6142-40e4-a671-384fa1ab2dd6"
# Jovian Commit Essentials
# Please retain and execute this cell without modifying the contents for `jovian.commit` to work
# !pip install jovian --upgrade -q
import jovian
jovian.utils.colab.set_colab_file_id('1Zc0EWLdGyQ_76YSQI6UiTwNfi29ZHIjm')
# + [markdown] id="C1jBtMIXzGIk"
# ## Introduction to Machine Learning
#
# <img src="https://i.imgur.com/oJEQe7k.png" width="480" />
#
# <div style="text-align:center;">
# <a href="https://www.manning.com/books/deep-learning-with-python">Image Source</a>
# </div>
# + [markdown] id="rJJ6USUCzGIl"
# ### Linear Regression
#
# #### Linear Regression Data
#
# <img src="https://i.imgur.com/mtkR2lB.png" width="540" >
#
# #### Linear Regression Visualization
#
# <img src="https://i.imgur.com/mtkR2lB.png" width="480">
#
#
# #### Linear Regression model
#
# $$
# \hspace{2.5cm} X \hspace{1.1cm} \times \hspace{1.2cm} W^T \hspace{1.2cm} + \hspace{1cm} b \hspace{2cm}
# $$
#
# $$
# \left[ \begin{array}{cc}
# 73 & 67 & 43 \\
# 91 & 88 & 64 \\
# \vdots & \vdots & \vdots \\
# 69 & 96 & 70
# \end{array} \right]
# %
# \times
# %
# \left[ \begin{array}{cc}
# w_{11} & w_{21} \\
# w_{12} & w_{22} \\
# w_{13} & w_{23}
# \end{array} \right]
# %
# # +
# %
# \left[ \begin{array}{cc}
# b_{1} & b_{2} \\
# b_{1} & b_{2} \\
# \vdots & \vdots \\
# b_{1} & b_{2} \\
# \end{array} \right]
# $$
# + [markdown] id="yM2OV-lbzGIl"
# ### Feedfoward Neural Network
#
# 
#
# Conceptually, you think of feedforward neural networks as two or more linear regression models stacked on top of one another with a non-linear activation function applied between them.
#
# <img src="https://cdn-images-1.medium.com/max/1600/1*XxxiA0jJvPrHEJHD4z893g.png" width="640">
#
# To use a feedforward neural network instead of linear regression, we can extend the `nn.Module` class from PyTorch.
# + id="TmEnAYpczGIl"
# !pip install jovian --upgrade -q
# + id="ZFcv8ziCzGIm"
import jovian
# + colab={"base_uri": "https://localhost:8080/", "height": 141} id="BYi2CuehzGIn" outputId="e670cb4f-9fd8-4c98-ae77-4342f72c9ca8"
jovian.commit(project='machine-learning-intro')
# + id="hFQEP6IwzGIo"
| machine_learning_intro.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## 2020년 7월 14일 화요일
# ### leetCode - Sum of Two Integers (Python)
# ### 문제 : https://leetcode.com/problems/sum-of-two-integers/
# ### 블로그 : https://somjang.tistory.com/entry/leetCode-371-Sum-of-Two-Integers-Python
# ### 첫번째 시도
class Solution:
def getSum(self, a: int, b: int) -> int:
answer = sum([a, b])
return answer
| DAY 101 ~ 200/DAY159_[leetCode] Sum of Two Integers (Python).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Attend Infer Repeat
#
# In this tutorial we will implement the model and inference strategy described in "Attend, Infer, Repeat:
# Fast Scene Understanding with Generative Models" (AIR) [1] and apply it to the multi-mnist dataset.
#
# A [standalone implementation](https://github.com/uber/pyro/tree/dev/examples/air) is also available.
# +
# %pylab inline
import os
from collections import namedtuple
from observations import multi_mnist
import pyro
import pyro.optim as optim
from pyro.infer import SVI, TraceGraph_ELBO
import pyro.distributions as dist
import pyro.poutine as poutine
import torch
import torch.nn as nn
from torch.nn.functional import relu, sigmoid, softplus, grid_sample, affine_grid
import numpy as np
smoke_test = ('CI' in os.environ)
pyro.enable_validation(True)
# -
# ## Introduction
#
# The model described in [1] is a generative model of scenes. In this tutorial we will use it to model images from a dataset that is similar to the multi-mnist dataset in [1]. Here are some data points from this data set:
# + keep_output=true
inpath = '../../examples/air/data'
(X_np, _), _ = multi_mnist(inpath, max_digits=2, canvas_size=50, seed=42)
X_np = X_np.astype(np.float32)
X_np /= 255.0
mnist = torch.from_numpy(X_np)
def show_images(imgs):
figure(figsize=(8, 2))
for i, img in enumerate(imgs):
subplot(1, len(imgs), i + 1)
axis('off')
imshow(img.data.numpy(), cmap='gray')
show_images(mnist[9:14])
# -
# To get an idea where we're heading, we first give a brief overview of the model and the approach we'll take to inference. We'll follow the naming conventions used in [1] as closely as possible.
#
# AIR decomposes the process of generating an image into discrete steps, each of which generates only part of the image. More specifically, at each step the model will generate a small image (`y_att`) by passing a latent "code" variable (`z_what`) through a neural network. We'll refer to these small images as "objects". In the case of AIR applied to the multi-mnist dataset we expect each of these objects to represent a single digit. The model also includes uncertainty about the location and size of each object. We'll describe an object's location and size as its "pose" (`z_where`). To produce the final image, each object will first be located within a larger image (`y`) using the pose infomation `z_where`. Finally, the `y`s from all time steps will be combined additively to produce the final image `x`.
#
# Here's a picture (reproduced from [1]) that shows two steps of this process:
# + raw_mimetype="text/html" active=""
# <center>
# <figure style='padding: 0 0 1em'>
# <img src='_static/img/model-generative.png' style='width: 35%;'>
# <figcaption style='font-size: 90%; padding: 0.5em 0 0'>
# <b>Figure 1:</b> Two steps of the generative process.
# </figcaption>
# </figure>
# </center>
# -
# Inference is performed in this model using [amortized stochastic variational inference](svi_part_i.ipynb) (SVI). The parameters of the neural network are also optimized during inference. Performing inference in such rich models is always difficult, but the presence of discrete choices (the number of steps in this case) makes inference in this model particularly tricky. For this reason the authors use a technique called data dependent baselines to achieve good performance. This technique can be implemented in Pyro, and we'll see how later in the tutorial.
#
# ## Model
#
# ### Generating a single object
#
# Let's look at the model more closely. At the core of the model is the generative process for a single object. Recall that:
#
# * At each step a single object is generated.
# * Each object is generated by passing its latent code through a neural network.
# * We maintain uncertainty about the latent code used to generate each object, as well as its pose.
#
# This can be expressed in Pyro like so:
# +
# Create the neural network. This takes a latent code, z_what, to pixel intensities.
class Decoder(nn.Module):
def __init__(self):
super(Decoder, self).__init__()
self.l1 = nn.Linear(50, 200)
self.l2 = nn.Linear(200, 400)
def forward(self, z_what):
h = relu(self.l1(z_what))
return sigmoid(self.l2(h))
decode = Decoder()
z_where_prior_loc = torch.tensor([3., 0., 0.])
z_where_prior_scale = torch.tensor([0.1, 1., 1.])
z_what_prior_loc = torch.zeros(50)
z_what_prior_scale = torch.ones(50)
def prior_step_sketch(t):
# Sample object pose. This is a 3-dimensional vector representing x,y position and size.
z_where = pyro.sample('z_where_{}'.format(t),
dist.Normal(z_where_prior_loc.expand(1, -1),
z_where_prior_scale.expand(1, -1))
.independent(1))
# Sample object code. This is a 50-dimensional vector.
z_what = pyro.sample('z_what_{}'.format(t),
dist.Normal(z_what_prior_loc.expand(1, -1),
z_what_prior_scale.expand(1, -1))
.independent(1))
# Map code to pixel space using the neural network.
y_att = decode(z_what)
# Position/scale object within larger image.
y = object_to_image(z_where, y_att)
return y
# -
# Hopefully the use of `pyro.sample` and PyTorch networks within a model seem familiar at this point. If not you might want to review the [VAE tutorial](vae.ipynb). One thing to note is that we include the current step `t` in the name passed to `pyro.sample` to ensure that names are unique across steps.
#
# The `object_to_image` function is specific to this model and warrants further attention. Recall that the neural network (`decode` here) will output a small image, and that we would like to add this to the output image after performing any translation and scaling required to achieve the pose (location and size) described by `z_where`. It's not clear how to do this, and in particular it's not obvious that this can be implemented in a way that preserves the differentiability of our model, which we require in order to perform [SVI](svi_part_i.ipynb). However, it turns out we can do this this using a spatial transformer network (STN) [2].
#
# Happily for us, PyTorch makes it easy to implement a STN using its [grid_sample](http://pytorch.org/docs/master/nn.html#grid-sample) and [affine_grid](http://pytorch.org/docs/master/nn.html#affine-grid) functions. `object_to_image` is a simple function that calls these, doing a little extra work to massage `z_where` into the expected format.
# +
def expand_z_where(z_where):
# Takes 3-dimensional vectors, and massages them into 2x3 matrices with elements like so:
# [s,x,y] -> [[s,0,x],
# [0,s,y]]
n = z_where.size(0)
expansion_indices = torch.LongTensor([1, 0, 2, 0, 1, 3])
out = torch.cat((torch.zeros([1, 1]).expand(n, 1), z_where), 1)
return torch.index_select(out, 1, expansion_indices).view(n, 2, 3)
def object_to_image(z_where, obj):
n = obj.size(0)
theta = expand_z_where(z_where)
grid = affine_grid(theta, torch.Size((n, 1, 50, 50)))
out = grid_sample(obj.view(n, 1, 20, 20), grid)
return out.view(n, 50, 50)
# -
# A discussion of the details of the STN is beyond the scope of this tutorial. For our purposes however, it suffices to keep in mind that `object_to_image` takes the small image generated by the neural network and places it within a larger image with the desired pose.
#
# Let's visualize the results of calling `prior_step_sketch` a few times to clarify this:
# + keep_output=true
pyro.set_rng_seed(0)
samples = [prior_step_sketch(0)[0] for _ in range(5)]
show_images(samples)
# -
# ### Generating an entire image
#
# Having completed the implementation of a single step, we next consider how we can use this to generate an entire image. Recall that we would like to maintain uncertainty over the number of steps used to generate each data point. One choice we could make for the prior over the number of steps is the geometric distribution, which can be expressed as follows:
# + keep_output=true
pyro.set_rng_seed(0)
def geom(num_trials=0):
p = torch.tensor([0.5])
x = pyro.sample('x{}'.format(num_trials), dist.Bernoulli(p))
if x[0] == 1:
return num_trials
else:
return geom(num_trials + 1)
# Generate some samples.
for _ in range(5):
print('sampled {}'.format(geom()))
# -
# This is a direct translation of the definition of the geometric distribution as the number of failures before a success in a series of Bernoulli trials. Here we express this as a recursive function that passes around a counter representing the number of trials made, `num_trials`. This function samples from the Bernoulli and returns `num_trials` if `x == 1` (which represents success), otherwise it makes a recursive call, incrementing the counter.
#
# The use of a geometric prior is appealing because it does not bound the number of steps the model can use a priori. It's also convenient, because by extending `geometric` to generate an object before each recursive call, we turn this from a geometric distribution over counts to a distribution over images with a geometrically distributed number of steps.
def geom_prior(x, step=0):
p = torch.tensor([0.5])
i = pyro.sample('i{}'.format(step), dist.Bernoulli(p))
if i[0] == 1:
return x
else:
x = x + prior_step_sketch(step)
return geom_prior(x, step + 1)
# Let's visualize some samples from this distribution:
# + keep_output=true
pyro.set_rng_seed(4)
x_empty = torch.zeros(1, 50, 50)
samples = [geom_prior(x_empty)[0] for _ in range(5)]
show_images(samples)
# -
# #### Aside: Vectorized mini-batches
#
# In our final implementation we would like to generate a mini batch of samples in parallel for efficiency. While Pyro supports vectorized mini batches with `iarange`, it currently requires that each `sample` statement within `iarange` makes a choice for all samples in the mini batch. Another way to say this is that each sample in the mini batch will encounter the same set of `sample` statements. This is problematic for us, because as we've just seen, samples can make differing numbers of choices under our model.
#
# One way around this is to have all samples take the same number of steps, but to nullify (so far as is possible) the effect of the superfuous random choices made after the sample is conceptually "complete". We'll say that a sample is "complete" once a zero is sampled from the Bernoulli random choice, and prior to that we'll say that a sample is "active".
#
# The first part of this is straight forward. Following [1] we choose to take a fixed number of steps for each sample. (By doing so we no longer specify a geometric distribution over the number of steps, since the number of steps is now bounded. It would be interesting to explore the alternative of having each sample in the batch take steps until a successful Bernoulli trial has occured in each, as this would retain the geometric prior.)
#
# To address the second part we will take the following steps:
#
# 1. Only add objects to the output while a sample is active.
# 2. Set the log probability of random choices made by complete samples to zero. (Since the [SVI loss](svi_part_iii.ipynb) is a weighted sum of log probabilities, setting a choice's log probability to zero effectively removes its contribution to the loss.) This is achieved using the `mask()` method of distributions.
#
# (Looking ahead, we'll need to take similar measures when we implement the guide and add baselines later in this tutorial.)
#
# Of course, one thing we can't undo is the work done in performing unncessary sampling. Nevertheless, even though this approach performs redundant computation, the gains from using mini batches are so large that this is still a win overall.
#
# Here's an updated model step function that implements these ideas. In summary, the changes from `prior_step_sketch` are:
#
# 1. We've added a new parameter `n` that specifies the size of the mini batch.
# 2. We now conditionally add the object to the output image based on a value sampled from a Bernoulli distribution.
# 3. We use `mask()` to zero out the log probability of random choices made by complete samples.
def prior_step(n, t, prev_x, prev_z_pres):
# Sample variable indicating whether to add this object to the output.
# We multiply the success probability of 0.5 by the value sampled for this
# choice in the previous step. By doing so we add objects to the output until
# the first 0 is sampled, after which we add no further objects.
z_pres = pyro.sample('z_pres_{}'.format(t),
dist.Bernoulli(0.5 * prev_z_pres)
.independent(1))
z_where = pyro.sample('z_where_{}'.format(t),
dist.Normal(z_where_prior_loc.expand(n, -1),
z_where_prior_scale.expand(n, -1))
.mask(z_pres)
.independent(1))
z_what = pyro.sample('z_what_{}'.format(t),
dist.Normal(z_what_prior_loc.expand(n, -1),
z_what_prior_scale.expand(n, -1))
.mask(z_pres)
.independent(1))
y_att = decode(z_what)
y = object_to_image(z_where, y_att)
# Combine the image generated at this step with the image so far.
x = prev_x + y * z_pres.view(-1, 1, 1)
return x, z_pres
# By iterating this step function we can produce an entire image, composed of multiple objects. Since each image in the multi-mnist dataset contains zero, one or two digits we will allow the model to use up to (and including) three steps. In this way we ensure that inference has to avoid using one or more steps in order to correctly count the number of objects in the input.
def prior(n):
x = torch.zeros(n, 50, 50)
z_pres = torch.ones(n, 1)
for t in range(3):
x, z_pres = prior_step(n, t, x, z_pres)
return x
# We have now fully specified the prior for our model. Let's visualize some samples to get a feel for this distribution:
# + keep_output=true
pyro.set_rng_seed(121)
show_images(prior(5))
# -
# #### Specifying the likelihood
#
# The last thing we need in order to complete the specification of the model is a likelihood function. Following [1] we will use a Gaussian likelihood with a fixed standard deviation of 0.3. This is straight forward to implement with `pyro.sample` using the `obs` argument.
#
# When we later come to perform inference we will find it convenient to package the prior and likelihood into a single function. This is also a convenient place to introduce `iarange`, which we use to implement data subsampling, and to register the networks we would like to optimize with `pyro.module`.
def model(data):
# Register network for optimization.
pyro.module("decode", decode)
with pyro.iarange('data', data.size(0)) as indices:
batch = data[indices]
x = prior(batch.size(0)).view(-1, 50 * 50)
sd = (0.3 * torch.ones(1)).expand_as(x)
pyro.sample('obs', dist.Normal(x, sd).independent(1),
obs=batch)
# ## Guide
#
# Following [1] we will perform [amortized stochastic variational inference](svi_part_i.ipynb) in this model. Pyro provides general purpose machinery that implements most of this inference strategy, but as we have seen in earlier tutorials we are required to provide a model specific guide. What we call a guide in Pyro is exactly the entity called the "inference network" in the paper.
#
# We will structure the guide around a recurrent network to allow the guide to capture (some of) the dependencies we expect to be present in the true posterior. At each step the recurrent network will generate the parameters for the choices made within the step. The values sampled will be fed back into the recurrent network so that this information can be used when computing the parameters for the next step. The guide for the [Deep Markov Model](dmm.ipynb) shares a similar structure.
#
# As in the model, the core of the guide is the logic for a single step. Here's a sketch of an implementation of this:
def guide_step_basic(t, data, prev):
# The RNN takes the images and choices from the previous step as input.
rnn_input = torch.cat((data, prev.z_where, prev.z_what, prev.z_pres), 1)
h, c = rnn(rnn_input, (prev.h, prev.c))
# Compute parameters for all choices made this step, by passing
# the RNN hidden start through another neural network.
z_pres_p, z_where_loc, z_where_scale, z_what_loc, z_what_scale = predict_basic(h)
z_pres = pyro.sample('z_pres_{}'.format(t),
dist.Bernoulli(z_pres_p * prev.z_pres))
z_where = pyro.sample('z_where_{}'.format(t),
dist.Normal(z_where_loc, z_where_scale))
z_what = pyro.sample('z_what_{}'.format(t),
dist.Normal(z_what_loc, z_what_scale))
return # values for next step
# This would be a reasonable guide to use with this model, but the paper describes a crucial improvement we can make to the code above. Recall that the guide will output information about an object's pose and its latent code at each step. The improvement we can make is based on the observation that once we have inferred the pose of an object, we can do a better job of inferring its latent code if we use the pose information to crop the object from the input image, and pass the result (which we'll call a "window") through an additional network in order to compute the parameters of the latent code. We'll call this additional network the "encoder" below.
#
# Here's how we can implement this improved guide, and a fleshed out implementation of the networks involved:
# +
rnn = nn.LSTMCell(2554, 256)
# Takes pixel intensities of the attention window to parameters (mean,
# standard deviation) of the distribution over the latent code,
# z_what.
class Encoder(nn.Module):
def __init__(self):
super(Encoder, self).__init__()
self.l1 = nn.Linear(400, 200)
self.l2 = nn.Linear(200, 100)
def forward(self, data):
h = relu(self.l1(data))
a = self.l2(h)
return a[:, 0:50], softplus(a[:, 50:])
encode = Encoder()
# Takes the guide RNN hidden state to parameters of
# the guide distributions over z_where and z_pres.
class Predict(nn.Module):
def __init__(self, ):
super(Predict, self).__init__()
self.l = nn.Linear(256, 7)
def forward(self, h):
a = self.l(h)
z_pres_p = sigmoid(a[:, 0:1]) # Squish to [0,1]
z_where_loc = a[:, 1:4]
z_where_scale = softplus(a[:, 4:]) # Squish to >0
return z_pres_p, z_where_loc, z_where_scale
predict = Predict()
def guide_step_improved(t, data, prev):
rnn_input = torch.cat((data, prev.z_where, prev.z_what, prev.z_pres), 1)
h, c = rnn(rnn_input, (prev.h, prev.c))
z_pres_p, z_where_loc, z_where_scale = predict(h)
z_pres = pyro.sample('z_pres_{}'.format(t),
dist.Bernoulli(z_pres_p * prev.z_pres)
.independent(1))
z_where = pyro.sample('z_where_{}'.format(t),
dist.Normal(z_where_loc, z_where_scale)
.independent(1))
# New. Crop a small window from the input.
x_att = image_to_object(z_where, data)
# Compute the parameter of the distribution over z_what
# by passing the window through the encoder network.
z_what_loc, z_what_scale = encode(x_att)
z_what = pyro.sample('z_what_{}'.format(t),
dist.Normal(z_what_loc, z_what_scale)
.independent(1))
return # values for next step
# -
# Since we would like to maintain differentiability of the guide we again use a STN to perform the required "cropping". The `image_to_object` function performs the opposite transform to the object_to_image function used in the guide. That is, the former takes a small image and places it on a larger image, and the latter crops a small image from a larger image.
# +
def z_where_inv(z_where):
# Take a batch of z_where vectors, and compute their "inverse".
# That is, for each row compute:
# [s,x,y] -> [1/s,-x/s,-y/s]
# These are the parameters required to perform the inverse of the
# spatial transform performed in the generative model.
n = z_where.size(0)
out = torch.cat((torch.ones([1, 1]).type_as(z_where).expand(n, 1), -z_where[:, 1:]), 1)
out = out / z_where[:, 0:1]
return out
def image_to_object(z_where, image):
n = image.size(0)
theta_inv = expand_z_where(z_where_inv(z_where))
grid = affine_grid(theta_inv, torch.Size((n, 1, 20, 20)))
out = grid_sample(image.view(n, 1, 50, 50), grid)
return out.view(n, -1)
# -
# ### Another perspective
#
# So far we've considered the model and the guide in isolation, but we gain an interesting perspective if we zoom out and look at the model and guide computation as a whole. Doing so, we see that at each step AIR includes a sub-computation that has the same structure as a [Variational Auto-encoder](vae.ipynb) (VAE).
#
# To see this, notice that the guide passes the window through a neural network (the encoder) to generate the parameters of the distribution over a latent code, and the model passes samples from this latent code distribution through another neural network (the decoder) to generate an output window. This structure is highlighted in the following figure, reproduced from [1]:
# + raw_mimetype="text/html" active=""
# <center>
# <figure style='padding: 0 0 1em'>
# <img src='_static/img/model-micro.png' style='width: 35%;'>
# <figcaption style='font-size: 90%; padding: 0.5em 0 0'>
# <b>Figure 2:</b> Interaction between the guide and model at each step.
# </figcaption>
# </figure>
# </center>
# -
# From this perspective AIR is seen as a sequential variant of the VAE. The act of cropping a small window from the input image serves to restrict the attention of a VAE to a small region of the input image at each step; hence "Attend, Infer, Repeat".
#
# ## Inference
#
# As we mentioned in the introduction, successfully performing inference in this model is a challenge. In particular, the presence of discrete choices in the model makes inference trickier than in a model in which all choices can be reparameterized. The underlying problem we face is that the gradient estimates we use in the optimization performed by variational inference have much higher variance in the presence of [non-reparameterizable choices](svi_part_iii.ipynb#Tricky-Case:-Non-reparameterizable-Random-Variables).
#
# To bring this variance under control, the paper applies a technique called "data dependent baselines" (AKA "neural baselines") to the discrete choices in the model.
#
# ### Data dependent baselines
#
# Happily for us, Pyro includes support for data dependent baselines. If you are not already familiar with this idea, you might want to read [our introduction](svi_part_iii.ipynb#Baselines-in-Pyro) before continuing. As model authors we only have to implement the neural network, pass it our data as input, and feed its output to `pyro.sample`. Pyro's inference back-end will ensure that the baseline is included in the gradient estimator used for inference, and that the network parameters are updated appropriately.
#
# Let's see how we can add data dependent baselines to our AIR implementation. We need a neural network that can output a (scalar) baseline value at each discrete choice in the guide, having received a multi-mnist image and the values sampled by the guide so far as input. Notice that this is very similar to the structure of the guide network, and indeed we will again use a recurrent network.
#
# To implement this we will first write a short helper function that implements a single step of the RNN we've just described:
# +
bl_rnn = nn.LSTMCell(2554, 256)
bl_predict = nn.Linear(256, 1)
# Use an RNN to compute the baseline value. This network takes the
# input images and the values samples so far as input.
def baseline_step(x, prev):
rnn_input = torch.cat((x,
prev.z_where.detach(),
prev.z_what.detach(),
prev.z_pres.detach()), 1)
bl_h, bl_c = bl_rnn(rnn_input, (prev.bl_h, prev.bl_c))
bl_value = bl_predict(bl_h) * prev.z_pres
return bl_value, bl_h, bl_c
# -
# There are two important details to highlight here:
#
# First, we `detach` values sampled by the guide before passing them to the baseline network. This is important as the baseline network and the guide network are entirely separate networks optimized with different objectives. Without this, gradients would flow from the baseline network into the guide network. When using data dependent baselines we must do this whenever we feed values sampled by the guide into the baselines network. (If we don't we'll trigger a PyTorch run-time error.)
#
# Second, we multiply the output of the baseline network by the value of `z_pres` from the previous step. This relieves the baseline network from the burdon of having to output accurate predictions for completed samples. (The outputs for completed samples will be multiplied by zero, so the derivative of the [baseline loss](svi_part_iii.ipynb#Neural-Baselines) for these outputs will be zero.) It's OK to do this because in effect we've already removed random choices for completed samples from the inference objective, so there's no need to apply any variance reduction to them.
#
# We now have everything we need to complete the implementation of the guide. Our final `guide_step` function will be very similar to `guide_step_improved` introduced above. The only changes are:
#
# 1. We now call the `baseline_step` helper and pass the baseline value it returns to `pyro.sample`.
# 2. We now mask out the `z_where` and `z_what` choices for complete sample. This serves exactly the same purpose as the masks added to the model. (See the earlier discussion for the motivation behind this change.)
#
# We'll also write a `guide` function that will iterate `guide_step` in order to provide a guide for the whole model.
# +
GuideState = namedtuple('GuideState', ['h', 'c', 'bl_h', 'bl_c', 'z_pres', 'z_where', 'z_what'])
def initial_guide_state(n):
return GuideState(h=torch.zeros(n, 256),
c=torch.zeros(n, 256),
bl_h=torch.zeros(n, 256),
bl_c=torch.zeros(n, 256),
z_pres=torch.ones(n, 1),
z_where=torch.zeros(n, 3),
z_what=torch.zeros(n, 50))
def guide_step(t, data, prev):
rnn_input = torch.cat((data, prev.z_where, prev.z_what, prev.z_pres), 1)
h, c = rnn(rnn_input, (prev.h, prev.c))
z_pres_p, z_where_loc, z_where_scale = predict(h)
# Here we compute the baseline value, and pass it to sample.
baseline_value, bl_h, bl_c = baseline_step(data, prev)
z_pres = pyro.sample('z_pres_{}'.format(t),
dist.Bernoulli(z_pres_p * prev.z_pres)
.independent(1),
infer=dict(baseline=dict(baseline_value=baseline_value.squeeze(-1))))
z_where = pyro.sample('z_where_{}'.format(t),
dist.Normal(z_where_loc, z_where_scale)
.mask(z_pres)
.independent(1))
x_att = image_to_object(z_where, data)
z_what_loc, z_what_scale = encode(x_att)
z_what = pyro.sample('z_what_{}'.format(t),
dist.Normal(z_what_loc, z_what_scale)
.mask(z_pres)
.independent(1))
return GuideState(h=h, c=c, bl_h=bl_h, bl_c=bl_c, z_pres=z_pres, z_where=z_where, z_what=z_what)
def guide(data):
# Register networks for optimization.
pyro.module('rnn', rnn),
pyro.module('predict', predict),
pyro.module('encode', encode),
pyro.module('bl_rnn', bl_rnn)
pyro.module('bl_predict', bl_predict)
with pyro.iarange('data', data.size(0), subsample_size=64) as indices:
batch = data[indices]
state = initial_guide_state(batch.size(0))
steps = []
for t in range(3):
state = guide_step(t, batch, state)
steps.append(state)
return steps
# -
# ### Putting it all together
#
# We have now completed the implementation of the model and the guide. As we have seen in earlier tutorials, we need write only a few more lines of code to begin performing inference:
# +
data = mnist.view(-1, 50 * 50)
svi = SVI(model,
guide,
optim.Adam({'lr': 1e-4}),
loss=TraceGraph_ELBO())
for i in range(5):
loss = svi.step(data)
print('i={}, elbo={:.2f}'.format(i, loss / data.size(0)))
# -
# One key detail here is that we use a `TraceGraph_ELBO` loss rather than a simpler `Trace_ELBO`. This indicates that we wish to use the gradient estimator that supports data dependent baselines. This estimator also [reduces the variance](svi_part_iii.ipynb#Reducing-Variance-via-Dependency-Structure) of gradient estimates by making use of independence information included in the model. Something similar is implicity used in [1], and is necessary in order to achieve good results on this model.
#
# ## Results
#
# To sanity check our implementation we ran inference using our [standalone implementation](https://github.com/uber/pyro/tree/dev/examples/air) and compared its performance against some of the results reported in [1].
#
# Here we show progress made on the ELBO and training set count accuracy during optimization:
# + raw_mimetype="text/html" active=""
# <center>
# <figure style='padding: 0 0 1em'>
# <div style='width: 50%; float: left;'><img src="_static/img/air/progress_elbo.png" /></div>
# <div style='width: 50%; float: left;'><img src="_static/img/air/progress_accuracy.png" /></div>
# <figcaption style='font-size: 90%; clear: both;'><b>Figure 3:</b> <i>Left:</i> Progress on the evidence lower bound (ELBO) during optimization. <i>Right:</i> Progress on training set count accuracy during optimization.</figcaption>
# </figure>
# </center>
# -
# Count accuracy reached around 98.7%, which is in the same ballpark as the count accuracy reported in [1]. The value reached on the ELBO differs a little from that reported in [1], which may be due to small differences in the priors used.
#
# In the next figure the top row shows ten data points from the test set. The bottom row is a visualization of a single sample from the guide for each of these inputs, that shows the values sampled for `z_pres` and `z_where`. Following [1], the first, second and third steps are displayed using red, green and blue borders respectively. (No blue borders are shown as the guide did not use three steps for any of these samples.) It also shows reconstructions of the input obtained by passing the latent variables sampled from the guide back through the model to generate an output image.
# + raw_mimetype="text/html" active=""
# <center>
# <figure style='padding: 0 0 1em'>
# <img src="_static/img/air/reconstructions.png" />
# <figcaption style='font-size: 90%; padding: 0.5em 0 0'><b>Figure 4:</b> <i>Top row:</i> Data points from the multi-mnist test set. <i>Bottom row:</i> Visualization of samples from the guide and the model's reconstruction of the inputs.</figcaption>
# </figure>
# </center>
# -
# These results were collected using the following parameters:
#
# ```
# python main.py -n 200000 -blr 0.1 --z-pres-prior 0.01 --scale-prior-sd 0.2 --predict-net 200 --bl-predict-net 200 --decoder-output-use-sigmoid --decoder-output-bias -2 --seed 287710
# ```
#
# We used Pyro commit `c0b38ad` with PyTorch `0.2.0.post4`. Inference ran for approximately 4 hours on an NVIDIA K80 GPU. (Note that even though we set the random seed, this isn't sufficient to make inference deterministic when using CUDA.)
#
# ## In practice
#
# We found it important to pay attention to the following details in order to achieve good results with AIR.
#
# * Inference is unlikely to recover correct object counts unless a small prior success probability for `z_pres` is used. In [1] this [probability was annealed](http://akosiorek.github.io/ml/2017/09/03/implementing-air.html) from a value close to one to `1e-5` (or less) during optimization, though we found that a fixed value of around `0.01` worked well with our implementation.
# * We initialize the decoder network to generate mostly empty objects initially. (Using the `--decoder-output-bias` argument.) This encourages the guide to explore the use of objects to explain the input early in optimization. Without this each object is a mid-gray square which is heavily penalized by the likelihood, prompting the guide to turn most steps off.
# * It is reported to be useful in practice to use a different learning rate for the baseline network. This is straight forward to implement in Pyro by tagging modules associated with the baseline network and passing multiple learning rates to the optimizer. (See the section on [optimizers](svi_part_i.ipynb#Optimizers) in part I of the SVI tutorial for more detail.) In [1] a learning rate of `1e-4` was used for the guide network, and a learning rate of `1e-3` was used for the baseline network. We found it necessary to use a larger learning rate for the baseline network in order to make progress on count accuracy at a similar rate to [1]. This difference is likely caused by Pyro setting up a [slightly different baseline loss](https://github.com/uber/pyro/issues/555).
#
#
# ## References
#
# [1] `Attend, Infer, Repeat: Fast Scene Understanding with Generative Models`
# <br />
# <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and <NAME>
#
# [2] `Spatial Transformer Networks`
# <br />
# <NAME> and <NAME> and <NAME>
| tutorial/source/air.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Robust Scaler - Implantação
#
# Este é um componante que dimensiona atributos usando estatísticas robustas para outliers. Este Scaler remove a mediana e dimensiona os dados de acordo com o intervalo quantil (o padrão é Amplitude interquartil). Amplitude interquartil é o intervalo entre o 1º quartil (25º quantil) e o 3º quartil (75º quantil). Faz uso da implementação do [Scikit-learn](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.RobustScaler.html). <br>
# Scikit-learn é uma biblioteca open source de machine learning que suporta apredizado supervisionado e não supervisionado. Também provê várias ferramentas para montagem de modelo, pré-processamento de dados, seleção e avaliação de modelos, e muitos outros utilitários.
#
# Este notebook apresenta:
# - como utilizar o [SDK da PlatIAgro](https://platiagro.github.io/sdk/) para carregar datasets, salvar modelos e outros artefatos.
# - como utilizar um modelo para fornecer predições em tempo real.
# ## Declaração de Classe para Predições em Tempo Real
#
# A tarefa de implantação cria um serviço REST para predições em tempo real.<br>
# Para isso você deve criar uma classe `Model` que implementa o método `predict`.
# +
# %%writefile Model.py
import logging
from typing import List, Iterable, Dict, Union
import numpy as np
import pandas as pd
from platiagro import load_model
logger = logging.getLogger(__name__)
class Model(object):
def __init__(self, dataset: str = None, target: str = None):
# Carrega artefatos: estimador, etc
model = load_model()
self.pipeline = model["pipeline"]
self.features_names_training = model["columns"]
self.features_after_pipeline = model["features_after_pipeline"]
def class_names(self):
return self.features_after_pipeline.tolist()
def predict(self, X: np.ndarray, feature_names: Iterable[str], meta: Dict = None) -> Union[np.ndarray, List, str, bytes]:
if feature_names:
# Antes de utilizar o conjunto de dados X no modelo, reordena suas features de acordo com a ordem utilizada no treinamento
df = pd.DataFrame(X, columns=feature_names)
X = df[self.features_names_training]
# Realiza transformação
X = self.pipeline.transform(X)
return X
# -
# ## Teste do serviço REST
#
# Crie um arquivo `contract.json` com os seguintes atributos:
#
# - `features` : A lista de features em uma requisição.
# - `targets` : A lista de valores retornados pelo método `predict`.
#
# Cada `feature` pode conter as seguintes informações:
#
# - `name` : nome da feature
# - `ftype` : tipo da feature : **continuous** ou **categorical**
# - `dtype` : tipo de dado : **FLOAT** ou **INT** : *obrigatório para ftype continuous*
# - `range` : intervalo de valores numéricos : *obrigatório para ftype continuous*
# - `values` : lista de valores categóricos : *obrigatório para ftype categorical*
#
# Em seguida, utilize a função `test_deployment` do [SDK da PlatIAgro](https://platiagro.github.io/sdk/) para simular predição em tempo-real.<br>
# %%writefile contract.json
{
"features": [
{
"name": "SepalLengthCm",
"dtype": "FLOAT",
"ftype": "continuous",
"range": [4.3, 7.9]
},
{
"name": "SepalWidthCm",
"dtype": "FLOAT",
"ftype": "continuous",
"range": [2.0, 4.4]
},
{
"name": "PetalLengthCm",
"dtype": "FLOAT",
"ftype": "continuous",
"range": [1.0, 6.9]
},
{
"name": "PetalWidthCm",
"dtype": "FLOAT",
"ftype": "continuous",
"range": [0.1, 2.5]
}
],
"targets": [
{
"name": "SepalLengthCm",
"dtype": "FLOAT",
"ftype": "continuous",
"range": [-1.6, 1.7]
},
{
"name": "SepalWidthCm",
"dtype": "FLOAT",
"ftype": "continuous",
"range": [-2.0, 3.0]
},
{
"name": "PetalLengthCm",
"dtype": "FLOAT",
"ftype": "continuous",
"range": [-1.0, 1.0]
},
{
"name": "PetalWidthCm",
"dtype": "FLOAT",
"ftype": "continuous",
"range": [-1.0, 1.0]
}
]
}
# +
from platiagro.deployment import test_deployment
test_deployment("contract.json")
| samples/robust-scaler/Deployment.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Pull Request Analysis
# +
import psycopg2
import pandas as pd
# from sqlalchemy.types import Integer, Text, String, DateTime
import sqlalchemy as s
import matplotlib
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import json
with open("config.json") as config_file:
config = json.load(config_file)
database_connection_string = 'postgres+psycopg2://{}:{}@{}:{}/{}'.format(config['user'], config['password'], config['host'], config['port'], config['database'])
dbschema='augur_data'
engine = s.create_engine(
database_connection_string,
connect_args={'options': '-csearch_path={}'.format(dbschema)})
# -
# ## Pull Request Filter
## List of repository IDs for the report
repo_set = {25760, 25663}
# # This query describes the total number of issues opened in a repository, and the average, maximum and minimum number of comments on an issue.
#
# ## Getting the Data
# +
pr_all = pd.DataFrame()
#
for repo_id in repo_set:
pr_query = salc.sql.text(f"""
SELECT
repo.repo_id,
repo.repo_name,
repo_groups.rg_name,
E.issues_count,
AVG ( D.comment_count ) AS average_comments,
MAX ( D.comment_count ) AS max_comments,
MIN ( D.comment_count ) AS min_comments,
stddev( D.comment_count ) AS stddev_comments
FROM
repo
LEFT OUTER JOIN (
SELECT
issues.issue_id,
issues.repo_id,
COUNT ( K.issue_msg_ref_id ) AS comment_count
FROM
issues
LEFT OUTER JOIN issue_message_ref K ON issues.issue_id = K.issue_id
WHERE
pull_request IS NULL -- GitHub provides pull requests in their issues API, as well as their pull requests API. We do not exclude this data from collection because it would make the provenance of the data we collect less transparent. We apply filters in queries and API endpoints, but not collection.
GROUP BY
issues.issue_id,
issues.repo_id
ORDER BY
issues.repo_id
) D ON repo.repo_id = D.repo_id,
repo_groups,
( -- subquery table to provide issues count in context
SELECT
repo.repo_id,
COUNT ( issue_id ) AS issues_count
FROM
repo
LEFT OUTER JOIN (
SELECT
repo.repo_id,
issues.issue_id --the "double left outer join here seems puzzling. TO preserve "one row per repo" and exclude pull requests, we FIRST need to get a list of issues that are not pull requests, then count those. WIthout the "double left outer join", we would exclude repos that use pull requests, but not issues on GitHub
FROM
repo
LEFT OUTER JOIN issues ON issues.repo_id = repo.repo_id
WHERE
issues.pull_request IS NULL -- here again, excluding pull_requests at data analysis, but preserving GitHub API Provenance
) K ON repo.repo_id = K.repo_id
GROUP BY
repo.repo_id
) E -- this subquery table is what gives us the issue count per repo as context for deciding if repos with very small issue counts are excluded from some analyses.
WHERE
repo.repo_group_id = repo_groups.repo_group_id
AND repo.repo_id = E.repo_id
AND repo.repo_id = {repo_id}
GROUP BY
repo.repo_id,
repo.repo_name,
repo_groups.rg_name,
repo_groups.repo_group_id,
E.issues_count
ORDER BY
rg_name,
repo_name;
""")
pr_a = pd.read_sql(pr_query, con=engine)
if not pr_all.empty:
pr_all = pd.concat([pr_all, pr_a])
else:
# first repo
pr_all = pr_a
# -
print(pr_all)
| code/issues.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
header = ['user_id', 'item_id', 'rating', 'timestamp']
df = pd.read_csv('u.data', sep='\t', names=header)
n_users = df.user_id.unique().shape[0]
n_items = df.item_id.unique().shape[0]
print('Number of users = ' , str(n_users) , ' | Number of movies = ' , str(n_items))
from sklearn import cross_validation as cv
train_data, test_data = cv.train_test_split(df, test_size=0.25)
train_data.shape
test_data.shape
#Create two user-item matrices, one for training and another for testing
train_data_matrix = np.zeros((n_users, n_items))
train_data_matrix
train_data.head()
original_train_data = train_data.copy()
train_data_matrix = train_data.pivot(index = "user_id" , columns = "item_id" , values = "rating")
train_data_matrix = train_data_matrix.sort_index(axis=1)
train_data_matrix.head()
train_data_matrix = train_data_matrix.fillna(0)
train_data_matrix.head()
test_data.head()
original_test_data = test_data.copy()
test_data_matrix = test_data.pivot(index = "user_id" , columns = "item_id" , values = "rating")
test_data_matrix = test_data_matrix.sort_index(axis=1)
test_data_matrix.head()
test_data_matrix = test_data_matrix.fillna(0)
test_data_matrix.head()
from sklearn.metrics.pairwise import pairwise_distances
user_similarity = pairwise_distances(train_data_matrix, metric='cosine')
def predict(ratings, similarity, type='user'):
if type == 'user':
mean_user_rating = ratings.mean(axis=1)
ratings_diff = (ratings - mean_user_rating[:, np.newaxis])
pred = mean_user_rating[:, np.newaxis] + similarity.dot(ratings_diff) / np.array([np.abs(similarity).sum(axis=1)]).T
return pred
user_prediction = predict(train_data_matrix, user_similarity, type='user')
mean_user_rating = train_data_matrix.mean(axis=1)
train_data_matrix-mean_user_rating[:,np.newaxis]
# creating matrix of ratings
data_matrix = np.zeros((n_users, n_items))
for line in train_data.itertuples():
data_matrix[line[1]-1, line[2]-1] = line[3]
data_matrix
| try_1/aise hi timepass.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Lab 3 - Basic data cleaning and analysis
# *© 2020 <NAME>*
#
# Welcome to Week 3 of INFO 6270! Last week we explored two key elements of the Python programming lanaguage: loops and functions. These skills are the essential building blocks of virtually all work in data science. This week we will explore two basic data structures: lists and dictionaries. This week's case is built on data from rental data in Halifax from the Canada Mortgage and Housing Corporation (CMHC) and may actually provide you with useful insight in your search for housing.
#
# We are still working our way through [Sweigart (2020)](https://automatetheboringstuff.com/). A word of caution about this week's readings: though much of it is relevant, some of it goes too deep! I recommend looking thorugh these challenge questions to see what may be most relevant before undertaking the readings. For instance, we will not explore tuples and will only use a few of the list methods that he describes.
#
# **This week, we will achieve the following objectives:**
# - Make and manage a lists
# - Use a function to interpret list data
# - Assess list data quality
# - Make and manage a dictionary
# - Analyze where rent is growing fastest
#
# Weekly reading: Sweigart (2014) Ch. 4 and 5.
# # Case: Canada Mortgage and Housing Corporation
# The [Canada Mortgage and Housing Corporation (CMHC)](https://www.youtube.com/watch?v=vy19rwKFGYk#action=share) is a crown corporation with a mandate to assist housing for Canadians. Founded shortly after the second world war, its purpose was once to find housing for veterans. Now it provides programs for making mortgages more affordable and enforces policies designed to make rent more accessible. The CMHC also provides data on rental affordability, including [information on the average price of rent](https://www03.cmhc-schl.gc.ca/hmip-pimh/en#TableMapChart/0580/3/Halifax%20CMA) for various regions in Halifax through its information portal.
#
# Like most information portals, CMHC's is built to make the most important information accessible. This has the unfortunate consequence of also making it difficult to retrieve the data that you need. To retrieve rental price data, you must first select your appropriate data boundary and then select `Primary Rental Market` and `Average Rent ($)`. You can then download data in csv format, similar to the `2019_10_Halifax_Rental.csv` provided to you in Brightspace.
#
# The data provided by CMHC is not formatted in a way that is conducive to data science. They provide data in rows and columns, complete with notes at the bottom of the file. It would be desirable to prepare the data in a way that is appropriate for analysis. If we were able to do it in Python, we could sort through hundreds of CSV spreadsheets, such as the rental data for October of each preceding year.
# # Objective 1: Make and manage lists
# As you likely read in this week's readings, lists are very important for data scientists-- in fact, this is the week when our Python tasks will start to feel a bit more like data science, specifically! Lists are data structures which consist of a series of values organized in a systematic way. For example, if we wanted to list the rental values for the South End of Halifax, as given in the 2019 CMHC rental data, we could create a list such as `peninsula_south` below.
# +
# a list of rental prices
peninsula_south = [888, 1203, 1656, 1949] # corresponds to line 4 of the 2019 data
# -
# In some ways, lists are like the strings which we investigated the past few weeks. Lists are organized with indexes, similarly to the sequence of characters in strings. For example, if we wanted to retrieve a value for the first item in our list, we could retrieve it using the index.
peninsula_south[1] # retrieves the second value from the list
# Lists can also be subdivided using an index range, similarly to strings. For example, if we had the string `"The data scientist solved the problem"` and wanted to only retrieve the values `"The"`, we could specify the range `[0:3]`. Likewise, to retrieve values from a list, we can specify the range of items, such as with the code below.
peninsula_south[0:3] # returns a subdivided list of only the first three values
# This said, lists can contain any type of data, such as strings, integers, floats ... even other lists! In this respect, they are very different. The code below demonstrates how lists can contain different types of values. Try printing the various list values by executing the cell.
# +
list_example = ["Strings are cool!", 42, 42.56, ["Seriously, they are cool!", "You can even add lists within lists!"]]
for l in list_example:
print(l)
# -
# ### Looping through lists
# In practical terms, the combination of loops and lists is very powerful. In data science, we could store a series of values in a list and use loops to iterate through each item in the list. Interestingly, the `for` loop is particularly well suited for this task. Consider the code below, which iterates through each r (short notation for rental value) in `peninsula_south` and checks whether it is greater than 900, and if so, prints them.
for r in peninsula_south: # iterates through each list value
if r > 900:
print(r)
# This creates some interesting dynamics for your code. As we saw last week, `while` loops will execute the code in the loop until the logical condition is met. If we wanted, we could write a while loop that iterates over a range of prespecified values. Consider the following code which accomplishes the same result as the for loop above.
# +
i = 0
# iterates through the first four values
while i < 4:
if peninsula_south[i] > 900:
print(peninsula_south[i])
i += 1
# -
# There are some situations where you may wish to use a `while` loop instead of a for loop such as when you wish to specify specific list items (e.g. only values 1, and 3, etc.). However, this also comes with a disadvantage: if you iterate through values that do not exist, your loop will crash! The following loop attempts to iterate through five values, though only four exist. The result is a crash.
# +
i = 0
# tries to iterate through the first five values. There are only four values, so it crashes.
while i < 5:
if peninsula_south[i] > 900:
print(peninsula_south[i])
i += 1
# -
# Fortunately, we have the handy `len()` function which can be used to discover the length of a list. Execute the following cell to retrieve the length of `peninsula_south`.
len(peninsula_south) # returns the number of items in the list
# When combined with `while` loops, the `len()` function allows us to specify only the length of the list values. It is a best practice to use this function when creating `while` lists to prevent crashes.
# +
i = 0
# returns all of the values between i and the final list item
while i < len(peninsula_south):
print(peninsula_south[i])
i += 1
# -
# ### *Challenge Question 1 (2 points)*
# Many university interns in co-op positions in the Faculty of Management earn 16 per hour, which translates to approximately 2600 per month (gross). Financial advisors often recommend that people spend no more than 30\% of their gross income on housing, which in the case of co-op students, would translate to 780 per month. It could be desirable to know which of the living options are below this threshold.
#
# Create a new list called `peninsula_north` which contains the values for the Bachelor, 1 Bedroom, 2 Bedroom and 3 Bedroom rental prices from the 2019 spreadsheet. Create a loop which checks each of the values and prints the value if it is less than 780.
# +
peninsula_north = [754,951,1189,1408]# make this a list with the values from Peninsula North on line 5 of the 2019 data
# [insert code here] create a loop which prints the values which are less than 780 here
i = 0
# iterates through the first four values
while i < len(peninsula_north):
if peninsula_north[i] < 780:
print(peninsula_north[i])
i += 1
# -
# # Objective 2: Use a function to interpret list data
# Last week we started to use functions to interpret data. It is probably no surprise to you that functions are also extremely useful for managing structured data, such as that contained in lists. Though there are many possibilities, we wile explore two ways that functions are typically used with respect to list data.
# ### ...functions can be used when iterating though lists
# Functions are often used to simplify your code so that you don't have to retype the code over and over. You can create functions that are used for iterating through lists and performing some sort of logic on each list item. For example, check out `defRent()` below, which checks a rent value that is given as input and determines whether the rent is affordable.
def checkRent(rent): # takes a rent value as input
if rent < 877: #checks whether the values is less than 877
print('This is affordable.')
else:
print('Not affordable.')
# We could use this function to create a simple while loop which checks the rent for each value in the list. The following code does this on the `peninsula_north` list.
# +
i = 0
while i < len(peninsula_north): # iterate through each list value
checkRent(peninsula_north[i]) # execute the checkRent function
i += 1
# -
# Alternatively, we could further simplify this using a `for` loop. Consider the following code which does the same thing on `peninsula_south`. Consider changing it to check if it also works with the north!
for r in peninsula_south: # loop through the values in peninsula_south
checkRent(r) # execute the function
# ### ... functions can be used to process lists
# Additionally, functions can also take lists as input and process them and perform logic to them! For example, it could be desirable to determine whether the values of many lists meet our requirements. By taking a list as input, we could easily process both `peninsula_north` and `peninsula_south`.
#
# The `explainRent()` function below takes a `region_list` as an input and uses a loop to determine whether the values in the list are affordable.
# +
i = 0
def explainRent(region_list): # takes a list as input
i = 0
while i < len(region_list): # iterate through each value in the list until it reaches the length of the list
if i == 0:
print ("A Bachelor apartment in this region costs " + str(region_list[i]) + " on average")
else:
print("A " + str(i) + " bedroom apartment in this region costs " + str(region_list[i]) + " on average")
i += 1
# -
# We can now use the function to crunch through the `peninsula_south` list in one line! Consider modifying the code to try it for the North End as well.
explainRent(peninsula_south) # execute the function
# Finally, functions can take multiple inputs, in addition to lists. For example, rather than predefining the value that we would like to check, we could take the value as an input, giving us more flexibility. The `assessRetn()` function below similarly takes a `region_list` but also takes in a `threshold` which it uses to compare. This is very handy for calculating whether rent is affordable for various income levels!
def assessRent(region_list, threshold): # takes two inputs
i = 0
while i < len(region_list): # same as before, keeps running if i is less than the length of the region lsit
if region_list[i] < threshold: # checks whether the value is less than the threshold
if i == 0:
print ("A Bachelor apartment in this region costs " + str(region_list[i]) + " on average, which is affordable")
else:
print("A " + str(i) + " bedroom apartment in this region costs " + str(region_list[i]) + " on average, which is affordable")
i += 1
# Try executing the function below with different income levels (e.g. 1200). You will see how the output changes.
assessRent(peninsula_north, 800)
# ### *Challenge Question 2 (2 points)*
# The `assessRent()` function above is very handy. The only limitation is that it produces a generic response. Modify the code to do the following:
#
# * Take a third input called `region_name`, which is a string
# * Append the appropriate string contained in `region_list` to your print statement to give a context relevant response
# * Two test scenarios are provided to test your code
def assessRent(region_list, threshold,region_name): # modify this to take the third input
i = 0
while i < len(region_list):
if region_list[i] < threshold:
if i == 0: # modify the string response below
print ("A Bachelor apartment in "+ region_name + " costs " + str(region_list[i]) + " on average, which is affordable")
else: # modify the string response below
print("A " + str(i) + " bedroom apartment in " + region_name+ " costs " + str(region_list[i]) + " on average, which is affordable")
i += 1
# #### Sample Test 1
# Should return `"A Bachelor apartment in this region costs 754 on average, which is affordable"`
assessRent(peninsula_north, 800, "North End")
# #### Sample Test 2
# Should return:
#
# `"A Bachelor apartment in this region costs 888 on average, which is affordable"`
#
# `"A 1 bedroom apartment in this region costs 1203 on average, which is affordable"`
assessRent(peninsula_south, 1500, "South End")
# # Objective 3: Assess list data quality
# So far, we have used relatively clean data in our analysis. However, the majority of data that you will encounter starting next week will be... less than idea. If we observe the `Halifax_Rental.csv` files, it will become clear that the CSV rows actually consist of data assessment characters following each value. Our data should actually look more similarly to that provided in `mainland_south`.
mainland_south = [642,"b",696,"a",838,"a",897,"a"] #corresponds to line 6 of the 2019 rental data
# Fortunately, list data types come with a few handy methods for solving data cleaning problems. Sweigart's Chapter 4 goes into a lot of detail (perhaps *too* much detail this time) about handy list methods, which you should read through. We will highlight a few of them here however.
#
# The index method will tell you the *first* instance of a specified value in a list. This can be very handy for figuring out where the value sits on the list so that we can modify or remove it.
mainland_south.index("b")
# The `del` method is used to removing list values. Now that we know that `mainland_south[1]` is an inappropriate character, we can remove it using `del`. If we would like to use this to remove the character `"b"` we could do something like the following.
# +
del mainland_south[1]
mainland_south
# -
# Alternatively, we could use the `remove` method to clean out particular values. The code below removes the first instance of the letter `a`. However, it only removes it once, so we would have to run this 3 times to get the list the way that we would like. There are more efficient ways to do this, though that may be a story for another day.
# +
# run me 3 times to clear the bad data!
mainland_south.remove("a")
mainland_south
# -
# Finally, it is helpful to insert values into a list. The most common method used for this task is `append` which adds the value to the end of the list. The following code appends the total average value for `Mainland South` to the list. Pretty handy!
mainland_south.append(779)
mainland_south
# Alternatively, the `insert` function can be used to accomplish this task. You can read more about it in Sweigart Chapter 4.
# ### *Challenge Question 3 (2 points)*
# The `sackville` list below contains a value of `"**"`. In the cell below, create code that changes this value to the string `"Insufficient data"`. _Note: Though you could simply re-write this list, such answers will not be accepted. Your code must remove, insert or change the value in the list below!_
sackville = ["**", 931, 1098, 1288, 1101]
sackville.remove("**")
sackville.insert(0,'Insufficient data')
# #### Sample Test 1
# Should return `['Insufficient data', 931, 1098, 1288, 1101]`.
sackville
# # Objective 4: Make and manage a dictionary
# In addition to lists, there is a second data structure that is commonly used in Python: dictionaries. Unlike lists which use the list order to determine the sequence of values, dictionaries use a key-value pair structure. There are no "first" items in a dictionary; instead, all of the values stored are mapped with keys.
#
# Sometimes it is better to simply see things in action. The data for `peninsula_south` have been re-written into a dictionary, this time with keys (i.e. `Bach`, `1Bdr`) mapping to their respective values (i.e. `851`, `1093`). Try running the code below to set up your dictionary.
peninsula_south = {'Bach': 888, '1Bdr': 1203, '2Bdr': 1656, '3Bdr': 1949}
# To retrieve a dictionary value, we simply need to specify the key that we are looking for! The line below retrieves the value for bachelor apartments. Consider modifying it to retrieve 1 bedrooms.
peninsula_south['Bach']
# +
keys = ['Bach','1Bdr','2Bdr','3Bdr']
for i in keys:
print(peninsula_south[i])
# -
# Similarly to lists, dictionaries can be taken as inputs to functions or can be iterated through using key-value pairs.
# ### *Challenge Question 4 (2 points)*
# Create function called `assessRent()` which does the following:
# * Takes three inputs:
# * region_dictionary (dictionary)
# * key (string)
# * threshold (integer)
# * Assesses whether the inputted key-dictionary pair is less than threshold
# * Returns the value, and whether the apartment was affordable
def assessRent(region_dictionary, key, treshold): # the start of the function
if region_dictionary[key]< treshold:
print (key + " in this region costs " + str(region_dictionary[key]) + " and is affordable for this person")
else: # modify the string response below
print (key + " in this region costs " + str(region_dictionary[key]) + " and is unaffordable for this person")
# #### Sample Test 1
# Should return `"1Bdr in this region costs 951 and is unaffordable for this person."`.
# +
peninsula_north = {'Bach': 754, '1Bdr': 951, '2Bdr': 1189, '3Bdr': 1408} # values extracted from the spreadsheet
assessRent(peninsula_north, '1Bdr', 600)
# -
# #### Sample Test 2
# Should return `"2Bdr in this region costs 1189 and is affordable for this person."`
# +
peninsula_north = {'Bach': 754, '1Bdr': 951, '2Bdr': 1189, '3Bdr': 1408} # values extracted from the spreadsheet
assessRent(peninsula_north, '2Bdr', 2000)
# -
# # Objective 5: Analyze where rent is growing fastest
# Perhaps the most powerful feature of the dictionary data structure is that they can be nested within itself. For those of you who took INFO 5590, you will remember the structure of JSON ("JavaScript Object Notation") data, which is the backbone of modern internet data exchange. Python dictionaries are structured similarly to JSON, and are designed to allow users to next dictionaries within other dictionaries. For example, we could store both the values for `Peninsula South` and `Peninsula North` inside of a larger `halifax` dictionary using the key-value architecture.
# +
# 2019 data
halifax = {
'Peninsula South' : {'Bach': 888, '1Bdr': 1203, '2Bdr': 1656, '3Bdr': 1949},
'Peninsula North' : {'Bach': 754, '1Bdr': 951, '2Bdr': 1189, '3Bdr': 1408}
}
# -
# This allows us to navigate across many data values. In Python, you can navigate between nested keys by simply writing the nested key adjacent to the key from the first level. The code below will give you the value for the Bachelor apartments in the South End, though you can also use it to retrieve values such as 1 Bedrooms in the North end if you would like. Give it a try!
halifax['Peninsula South']['Bach']
# If we wanted to, we could further expand our dictionary to encompass years. For example, the following dictionary contains 3 levels: years, locations, and apartment types.
# +
# corresponds to the 2018 and 2019 data from two CSV files
halifax_rentals = {
'2018': {
'Peninsula South': {'Bach': 872, '1Bdr': 1137, '2Bdr': 1608, '3Bdr': 1906},
'Peninsula North' : {'Bach': 782, '1Bdr': 947, '2Bdr': 1181, '3Bdr': 1362},
'Mainland South' : {'Bach': 829, '1Bdr': 694, '2Bdr': 957, '3Bdr': 1035},
'Mainland North' : {'Bach': 756, '1Bdr': 876, '2Bdr': 1147, '3Bdr': 1360},
},
'2019': {
'Peninsula South': {'Bach': 888, '1Bdr': 1203, '2Bdr': 1656, '3Bdr': 1949},
'Peninsula North' : {'Bach': 754, '1Bdr': 951, '2Bdr': 1189, '3Bdr': 1408},
'Mainland South' : {'Bach': 642, '1Bdr': 696, '2Bdr': 838, '3Bdr': 897},
'Mainland North' : {'Bach': 776, '1Bdr': 923, '2Bdr': 1186, '3Bdr': 1405},
}
}
# -
# We can also navigate through this dictionary using the three levels of keys. The following code gives us the data about 1 bedroom apartment rentals in the north end in 2018.
halifax_rentals['2018']['Mainland North']['1Bdr']
# Similarly, if we wished to compare multiple years, we could simply print the values from multiple dictionary entries. The example below prints the 1 Bedroom apartment values in the north end from 2018 and 2019.
#
# _Note, this is actually one line of code that is broken apart for readability. You can break apart strings into multiple lines in Python_.
print("Rentals on the mainland north region were "
+ str(halifax_rentals['2018']['Mainland North']['1Bdr'])
+ " in 2018 and " + str(halifax_rentals['2019']['Mainland North']['1Bdr']) + " in 2019.")
# ### *Challenge Question 5 (2 points)*
# Create a function called compareRent which compares the growth of rent for a particular region and apartment pairing between two years. The function should do the following:
# * Take five inputs:
# * data (dictionary)
# * region (string)
# * apartment (string)
# * year1 (string)
# * year2 (string)
# * It should calculate the percent growth (year 2's value minus year 1's value divide by year 2's value)
# * It should convert this calculated value into a percentage (by multiplying it by 100)
# * The percentage should be rounded to two spaces
# * The function should return a string that specifies the apartment type, region, and years
#
# Pro Tip: When this function is done, you can use it to analyze apartments in regions that interset you!
# +
halifax_rentals = {
'2018': {
'Peninsula South': {'Bach': 872, '1Bdr': 1137, '2Bdr': 1608, '3Bdr': 1906},
'Peninsula North' : {'Bach': 782, '1Bdr': 947, '2Bdr': 1181, '3Bdr': 1362},
'Mainland South' : {'Bach': 829, '1Bdr': 694, '2Bdr': 957, '3Bdr': 1035},
'Mainland North' : {'Bach': 756, '1Bdr': 876, '2Bdr': 1147, '3Bdr': 1360},
},
'2019': {
'Peninsula South': {'Bach': 888, '1Bdr': 1203, '2Bdr': 1656, '3Bdr': 1949},
'Peninsula North' : {'Bach': 754, '1Bdr': 951, '2Bdr': 1189, '3Bdr': 1408},
'Mainland South' : {'Bach': 642, '1Bdr': 696, '2Bdr': 838, '3Bdr': 897},
'Mainland North' : {'Bach': 776, '1Bdr': 923, '2Bdr': 1186, '3Bdr': 1405},
}
}
def compareRent(data, region, apartment, year1, year2):
year_1= data[year1][region][apartment]
year_2= data[year2][region][apartment]
percent_grew=round((((year_2-year_1)/year_2)*100),2)
print("Rent for " + apartment+ " in " + region + " grew by " + str(percent_grew) +"% between "+ year1 + " and " + year2)
# -
# #### Sample Test 1
# Should return `"Rent for 1Bdr in Mainland South grew by 0.29% between 2018 and 2019"`
compareRent(halifax_rentals, 'Mainland South', '1Bdr', '2018', '2019')
# #### Sample Test 2
# Should return `"Rent for 2Bdr in Peninsula South grew by 2.9% between 2018 and 2019"`
compareRent(halifax_rentals, 'Peninsula South', '2Bdr', '2018', '2019')
# # References
# Canada Mortgage and Housing Corporation (19 January 2020). Housing Market Information Portal. Retrieved from: https://www03.cmhc-schl.gc.ca/hmip-pimh/en#TableMapChart/0580/3/Halifax%20CMA
| Lab3 - Basic data cleaning and analysis/lab3_jaswanth.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
x = np.matrix('1 0; 1 1; 1 1; 1 2; 1 1')
y = np.matrix('3; 2; 1; 1; 3')
a = np.dot(x.transpose(), x)
b = np.dot(x.transpose(), y)
np.dot(np.linalg.inv(a), b)
| notebooks/minimos quadrados.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/lawrenceChege/AI/blob/master/Temp.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="hckWqZpQ_d5s" colab_type="code" colab={}
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.ERROR)
import numpy as np
celcius_q = np.array([-40, -10, 0, 8, 15, 22, 38], dtype=float)
farenheit_a = np.array([-40, 14, 32, 46, 59, 72, 100], dtype=float)
for i,c in enumerate(celcius_q):
print("{} Degrees Celcius = {} Degrees Farenheit".format(c, farenheit_a[i]))
# + id="tPghV0WDKvPC" colab_type="code" colab={}
model =tf.keras.Sequential([
tf.keras.layers.Dense(units=1, input_shape=[1])
])
# + id="ZdXL8wxBNhEz" colab_type="code" colab={}
model.compile(loss='mean_squared_error',
optimizer = tf.keras.optimizers.Adam(0.1))
# + id="Y9JsDpQEO20l" colab_type="code" outputId="8f0b24d2-bc68-452c-b18c-20094212162e" colab={"base_uri": "https://localhost:8080/", "height": 34}
history = model.fit(celcius_q, farenheit_a, epochs=800, verbose=False)
print("Finished training model")
# + [markdown] id="NH1b-cW0W9T1" colab_type="text"
#
# + id="YLiDh0-nPnWv" colab_type="code" colab={}
import matplotlib.pyplot as plt
plt.xlabel('Epoch Number')
plt.ylabel('Loss Magnitude')
plt.plot(history.history['loss'])
# + id="HdCm8Xt3QF-h" colab_type="code" outputId="7eb1620f-18cd-4c87-9331-385308ad26bb" colab={"base_uri": "https://localhost:8080/", "height": 85}
print(model.predict([12,34,45,453]))
# + id="yTxJMSdBRPx9" colab_type="code" outputId="e953446b-c076-41c0-e911-42cf3286af7e" colab={"base_uri": "https://localhost:8080/", "height": 34}
print("These are the layer variables: {}".format(model.get_weights()))
| Temp.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "-"}
# # 多头注意力
#
#
# -
import sys
sys.path.append('..')
# + origin_pos=2 tab=["pytorch"]
import math
import mindspore
import mindspore.numpy as mnp
import mindspore.nn as nn
import mindspore.ops as ops
from d2l import mindspore as d2l
# + [markdown] slideshow={"slide_type": "slide"}
# 选择缩放点积注意力作为每一个注意力头
# + origin_pos=6 tab=["pytorch"]
class MultiHeadAttention(nn.Cell):
"""多头注意力"""
def __init__(self, key_size, query_size, value_size, num_hiddens,
num_heads, dropout, has_bias=False, **kwargs):
super(MultiHeadAttention, self).__init__(**kwargs)
self.num_heads = num_heads
self.attention = d2l.DotProductAttention(dropout)
self.W_q = nn.Dense(query_size, num_hiddens, has_bias=has_bias)
self.W_k = nn.Dense(key_size, num_hiddens, has_bias=has_bias)
self.W_v = nn.Dense(value_size, num_hiddens, has_bias=has_bias)
self.W_o = nn.Dense(num_hiddens, num_hiddens, has_bias=has_bias)
def construct(self, queries, keys, values, valid_lens):
queries = transpose_qkv(self.W_q(queries), self.num_heads)
keys = transpose_qkv(self.W_k(keys), self.num_heads)
values = transpose_qkv(self.W_v(values), self.num_heads)
if valid_lens is not None:
valid_lens = mnp.repeat(
valid_lens, repeats=self.num_heads, axis=0)
output, attention_weights = self.attention(queries, keys, values, valid_lens)
output_concat = transpose_output(output, self.num_heads)
return self.W_o(output_concat), attention_weights
# + [markdown] slideshow={"slide_type": "slide"}
# 使多个头并行计算
# + origin_pos=10 tab=["pytorch"]
def transpose_qkv(X, num_heads):
"""为了多注意力头的并行计算而变换形状。"""
X = X.reshape(X.shape[0], X.shape[1], num_heads, -1)
X = X.transpose(0, 2, 1, 3)
return X.reshape(-1, X.shape[2], X.shape[3])
def transpose_output(X, num_heads):
"""逆转 `transpose_qkv` 函数的操作。"""
X = X.reshape(-1, num_heads, X.shape[1], X.shape[2])
X = X.transpose(0, 2, 1, 3)
return X.reshape(X.shape[0], X.shape[1], -1)
# + [markdown] slideshow={"slide_type": "slide"}
# 测试
# + origin_pos=14 tab=["pytorch"]
num_hiddens, num_heads = 100, 5
attention = MultiHeadAttention(num_hiddens, num_hiddens, num_hiddens,
num_hiddens, num_heads, 0.5)
attention.set_train(False)
# + origin_pos=16 tab=["pytorch"]
batch_size, num_queries, num_kvpairs, valid_lens = 2, 4, 6, mindspore.Tensor([3, 2], mindspore.int32)
X = mnp.ones((batch_size, num_queries, num_hiddens))
Y = mnp.ones((batch_size, num_kvpairs, num_hiddens))
attention(X, Y, Y, valid_lens)[0].shape
| chapter_10_attention_mechanisms/3_multihead-attention.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:torch] *
# language: python
# name: conda-env-torch-py
# ---
import sys
sys.path.insert(0, "../src")
# %reload_ext autoreload
# %autoreload 2
# %matplotlib inline
# %reload_ext nb_black
# +
import gc
import warnings
from pathlib import Path
from tqdm.notebook import tqdm
import cv2
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn import metrics
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader, RandomSampler, SequentialSampler
import pytorch_lightning as pl
import torchvision.transforms as T
import albumentations as A
from albumentations.pytorch import ToTensorV2
from transformers import AdamW, get_cosine_schedule_with_warmup
import optim
import loss
from utils import visualize, radar2precipitation, seed_everything
# -
warnings.simplefilter("ignore")
# # U-Net
# ## Config
# +
args = dict(
seed=42,
dams=(6071, 6304, 7026, 7629, 7767, 8944, 11107),
train_folds_csv=Path("../input/train_folds.csv"),
train_data_path=Path("../input/train"),
test_data_path=Path("../input/test"),
model_dir=Path("../models"),
output_dir=Path("../output"),
rng=255.0,
num_workers=4,
gpus=1,
lr=1e-3,
max_epochs=30,
batch_size=256,
precision=16,
optimizer="adamw",
scheduler="cosine",
accumulate_grad_batches=1,
gradient_clip_val=5.0,
warmup_epochs=1,
)
args["trn_tfms"] = A.Compose(
[
A.PadIfNeeded(min_height=128, min_width=128, always_apply=True, p=1),
ToTensorV2(always_apply=True, p=1),
]
)
args["val_tfms"] = A.Compose(
[
A.PadIfNeeded(min_height=128, min_width=128, always_apply=True, p=1),
ToTensorV2(always_apply=True, p=1),
]
)
# -
# ## Dataset
class NowcastingDataset(Dataset):
def __init__(self, paths, tfms=None, test=False):
self.paths = paths
if tfms is not None:
self.tfms = tfms
else:
self.tfms = A.Compose(
[
A.PadIfNeeded(
min_height=128, min_width=128, always_apply=True, p=1
),
ToTensorV2(always_apply=True, p=1),
]
)
self.test = test
def __len__(self):
return len(self.paths)
def __getitem__(self, idx):
path = self.paths[idx]
data = np.load(path)
augmented = self.tfms(image=data)
data = augmented["image"]
x = data[:4, :, :]
x = x / args["rng"]
if self.test:
return x
else:
y = data[4, :, :]
y = y / args["rng"]
y = y.unsqueeze(0)
return x, y
class NowcastingDataModule(pl.LightningDataModule):
def __init__(
self,
train_df=None,
val_df=None,
batch_size=args["batch_size"],
num_workers=args["num_workers"],
test=False,
):
super().__init__()
self.train_df = train_df
self.val_df = val_df
self.batch_size = batch_size
self.num_workers = num_workers
self.test = test
def setup(self, stage="train"):
if stage == "train":
train_paths = [
args["train_data_path"] / fn for fn in self.train_df.filename.values
]
val_paths = [
args["train_data_path"] / fn for fn in self.val_df.filename.values
]
self.train_dataset = NowcastingDataset(train_paths, tfms=args["trn_tfms"])
self.val_dataset = NowcastingDataset(val_paths, tfms=args["val_tfms"])
else:
test_paths = list(sorted(args["test_data_path"].glob("*.npy")))
self.test_dataset = NowcastingDataset(test_paths, test=True)
def train_dataloader(self):
return DataLoader(
self.train_dataset,
batch_size=self.batch_size,
sampler=RandomSampler(self.train_dataset),
pin_memory=True,
num_workers=self.num_workers,
drop_last=True,
)
def val_dataloader(self):
return DataLoader(
self.val_dataset,
batch_size=2 * self.batch_size,
sampler=SequentialSampler(self.val_dataset),
pin_memory=True,
num_workers=self.num_workers,
)
def test_dataloader(self):
return DataLoader(
self.test_dataset,
batch_size=2 * self.batch_size,
sampler=SequentialSampler(self.test_dataset),
pin_memory=True,
num_workers=self.num_workers,
)
# ## Model
# ### Basic
class BasicBlock(nn.Module):
def __init__(self, in_ch, out_ch):
assert in_ch == out_ch
super().__init__()
self.net = nn.Sequential(
nn.Conv2d(in_ch, out_ch, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(out_ch),
nn.LeakyReLU(inplace=True),
nn.Conv2d(out_ch, out_ch, kernel_size=3, padding=1),
)
def forward(self, x):
return x + self.net(x)
# ### Encoder
# +
class DownBlock(nn.Module):
def __init__(self, in_ch, out_ch):
super().__init__()
self.id_conv = nn.Conv2d(in_ch, out_ch, kernel_size=1, stride=2)
self.net = nn.Sequential(
nn.BatchNorm2d(in_ch),
nn.LeakyReLU(inplace=True),
nn.MaxPool2d(2),
nn.BatchNorm2d(in_ch),
nn.LeakyReLU(inplace=True),
nn.Conv2d(in_ch, out_ch, kernel_size=3, padding=1),
)
def forward(self, x):
residual = x
residual = self.id_conv(residual)
x = self.net(x)
return residual + x, x
class Encoder(nn.Module):
def __init__(self, chs=[4, 64, 128, 256, 512, 1024]):
super().__init__()
self.blocks = nn.ModuleList(
[DownBlock(chs[i], chs[i + 1]) for i in range(len(chs) - 1)]
)
self.basic = BasicBlock(chs[-1], chs[-1])
def forward(self, x):
feats = []
for block in self.blocks:
x, feat = block(x)
feats.append(feat)
x = self.basic(x)
feats.append(x)
return feats
# -
# ### Decoder
# +
class UpBlock(nn.Module):
def __init__(self, in_ch, out_ch, bilinear=False):
super().__init__()
self.id_conv = nn.ConvTranspose2d(
in_ch + in_ch, out_ch, kernel_size=2, stride=2
)
layers = []
if bilinear:
layers.append(nn.Upsample(scale_factor=2, mode="nearest"))
else:
layers.append(
nn.ConvTranspose2d(in_ch + in_ch, out_ch, kernel_size=2, stride=2)
)
layers.extend(
[
nn.BatchNorm2d(out_ch),
nn.LeakyReLU(inplace=True),
nn.Conv2d(out_ch, out_ch, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(out_ch),
nn.LeakyReLU(inplace=True),
nn.Conv2d(out_ch, out_ch, kernel_size=3, padding=1, bias=False),
]
)
self.block = nn.Sequential(*layers)
def forward(self, x, feat):
x = torch.cat([x, feat], dim=1)
residual = x
residual = self.id_conv(residual)
x = self.block(x)
return x + residual
class Decoder(nn.Module):
def __init__(self, chs=[1024, 512, 256, 128, 64]):
super().__init__()
self.blocks = nn.ModuleList(
[UpBlock(chs[i], chs[i + 1]) for i in range(len(chs) - 1)]
)
def forward(self, x, feats):
for block, feat in zip(self.blocks, feats):
x = block(x, feat)
return x
# -
# ### U-Net
class UNet(pl.LightningModule):
def __init__(
self,
lr=args["lr"],
enc_chs=[4, 64, 128, 256, 512, 1024],
dec_chs=[1024, 512, 256, 128, 64],
num_train_steps=None,
):
super().__init__()
self.lr = lr
self.num_train_steps = num_train_steps
self.criterion = nn.L1Loss()
self.tail = BasicBlock(4, enc_chs[0])
self.encoder = Encoder(enc_chs)
self.decoder = Decoder(dec_chs)
self.head = nn.Sequential(
nn.ConvTranspose2d(dec_chs[-1], 32, kernel_size=2, stride=2, bias=False),
nn.BatchNorm2d(32),
nn.LeakyReLU(inplace=True),
nn.Conv2d(32, 1, kernel_size=3, padding=1),
# nn.ReLU(inplace=True),
nn.Sigmoid(),
)
def forward(self, x):
x = self.tail(x)
feats = self.encoder(x)
feats = feats[::-1]
x = self.decoder(feats[0], feats[1:])
x = self.head(x)
return x
def shared_step(self, batch, batch_idx):
x, y = batch
y_hat = self(x)
loss = self.criterion(y_hat, y)
return loss, y, y_hat
def training_step(self, batch, batch_idx):
loss, y, y_hat = self.shared_step(batch, batch_idx)
self.log("train_loss", loss)
for i, param_group in enumerate(self.optimizer.param_groups):
self.log(f"lr/lr{i}", param_group["lr"])
return {"loss": loss}
def validation_step(self, batch, batch_idx):
loss, y, y_hat = self.shared_step(batch, batch_idx)
return {"loss": loss, "y": y.detach(), "y_hat": y_hat.detach()}
def validation_epoch_end(self, outputs):
avg_loss = torch.stack([x["loss"] for x in outputs]).mean()
self.log("val_loss", avg_loss)
y = torch.cat([x["y"] for x in outputs])
y_hat = torch.cat([x["y_hat"] for x in outputs])
crop = T.CenterCrop(120)
y = crop(y)
y_hat = crop(y_hat)
batch_size = len(y)
y = y.detach().cpu().numpy()
y *= args["rng"]
y = y.reshape(batch_size, -1)
y = y[:, args["dams"]]
y_hat = y_hat.detach().cpu().numpy()
y_hat *= args["rng"]
y_hat = y_hat.reshape(batch_size, -1)
y_hat = y_hat[:, args["dams"]]
y_true = radar2precipitation(y)
y_true = np.where(y_true >= 0.1, 1, 0)
y_true = y_true.ravel()
y_pred = radar2precipitation(y_hat)
y_pred = np.where(y_pred >= 0.1, 1, 0)
y_pred = y_pred.ravel()
y = y.ravel()
y_hat = y_hat.ravel()
mae = metrics.mean_absolute_error(y, y_hat, sample_weight=y_true)
self.log("mae", mae)
tn, fp, fn, tp = metrics.confusion_matrix(y_true, y_pred).ravel()
csi = tp / (tp + fn + fp)
self.log("csi", csi)
comp_metric = mae / (csi + 1e-12)
self.log("comp_metric", comp_metric)
print(
f"Epoch {self.current_epoch} | MAE/CSI: {comp_metric} | MAE: {mae} | CSI: {csi} | Loss: {avg_loss}"
)
def configure_optimizers(self):
# optimizer
if args["optimizer"] == "adam":
self.optimizer = torch.optim.Adam(self.parameters(), lr=self.lr)
elif args["optimizer"] == "adamw":
self.optimizer = AdamW(self.parameters(), lr=self.lr)
elif args["optimizer"] == "radam":
self.optimizer = optim.RAdam(self.parameters(), lr=self.lr)
elif args["optimizer"] == "ranger":
self.optimizer = optim.RAdam(self.parameters(), lr=self.lr)
self.optimizer = optim.Lookahead(self.optimizer)
# scheduler
if args["scheduler"] == "cosine":
self.scheduler = get_cosine_schedule_with_warmup(
self.optimizer,
num_warmup_steps=self.num_train_steps * args["warmup_epochs"],
num_training_steps=self.num_train_steps * args["max_epochs"],
)
return [self.optimizer], [{"scheduler": self.scheduler, "interval": "step"}]
elif args["scheduler"] == "step":
self.scheduler = torch.optim.lr_scheduler.StepLR(
self.optimizer, step_size=10, gamma=0.5
)
return [self.optimizer], [
{"scheduler": self.scheduler, "interval": "epoch"}
]
elif args["scheduler"] == "plateau":
self.scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
self.optimizer, mode="min", factor=0.1, patience=3, verbose=True
)
return [self.optimizer], [
{
"scheduler": self.scheduler,
"interval": "epoch",
"reduce_on_plateau": True,
"monitor": "comp_metric",
}
]
else:
self.scheduler = None
return [self.optimizer]
# ## Train
seed_everything(args["seed"])
pl.seed_everything(args["seed"])
df = pd.read_csv(args["train_folds_csv"])
def train_fold(df, fold, lr_find=False):
train_df = df[df.fold != fold]
val_df = df[df.fold == fold]
datamodule = NowcastingDataModule(train_df, val_df)
datamodule.setup()
num_train_steps = np.ceil(
len(train_df) // args["batch_size"] / args["accumulate_grad_batches"]
)
model = UNet(num_train_steps=num_train_steps)
trainer = pl.Trainer(
gpus=args["gpus"],
max_epochs=args["max_epochs"],
precision=args["precision"],
progress_bar_refresh_rate=50,
benchmark=True,
)
if lr_find:
lr_finder = trainer.tuner.lr_find(model, datamodule=datamodule)
fig = lr_finder.plot(suggest=True)
fig.show()
return
print(f"Training fold {fold}...")
trainer.fit(model, datamodule)
checkpoint = (
args["model_dir"]
/ f"unet_sigmoid_fold{fold}_bs{args['batch_size']}_epochs{args['max_epochs']}_lr{model.lr}_{args['optimizer']}_{args['scheduler']}.ckpt"
)
trainer.save_checkpoint(checkpoint)
print("Model saved at", checkpoint)
del model, trainer, datamodule
gc.collect()
torch.cuda.empty_cache()
# AdamW bs256 lr 1e-3
for fold in range(5):
train_fold(df, fold)
# AdamW bs256 lr 1e-3 sigmoid
for fold in range(5):
train_fold(df, fold)
# ## Inference
def inference(checkpoints):
datamodule = NowcastingDataModule()
datamodule.setup("test")
test_paths = datamodule.test_dataset.paths
test_filenames = [path.name for path in test_paths]
final_preds = np.zeros((len(datamodule.test_dataset), 14400))
for checkpoint in checkpoints:
print(f"Inference from {checkpoint}")
model = UNet.load_from_checkpoint(str(checkpoint))
model.cuda()
model.eval()
preds = []
with torch.no_grad():
for batch in tqdm(datamodule.test_dataloader()):
batch = batch.cuda()
imgs = model(batch)
imgs = imgs.detach().cpu().numpy()
imgs = imgs[:, 0, 4:124, 4:124]
imgs = args["rng"] * imgs
imgs = imgs.clip(0, 255)
imgs = imgs.round()
preds.append(imgs)
preds = np.concatenate(preds)
preds = preds.astype(np.uint8)
preds = preds.reshape(-1, 14400)
final_preds += preds / len(checkpoint)
del model
gc.collect()
torch.cuda.empty_cache()
final_preds = final_preds.round()
final_preds = final_preds.astype(np.uint8)
subm = pd.DataFrame()
subm["file_name"] = test_filename
for i in tqdm(range(14400)):
subm[str(i)] = final_preds[:, i]
return subm
checkpoints = [args["model_dir"] / f"unet_fold{fold}_bs{args['batch_size']}_epochs{args['max_epochs']}_lr{model.lr}_{args['optimizer']}_{args['scheduler']}.ckpt" for fold in range(5)]
output_path = args["output_dir"] / f"unet_bs{args['batch_size']}_epochs{args['max_epochs']}_lr{model.lr}_{args['optimizer']}_{args['scheduler']}.csv"
subm.to_csv(output_path, index=False)
subm.head()
| notebooks/03-unet.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Earliness
#
# The differences in performance with respect to earliness, is measured using mean absolute error over the first \textit{k} prefixes, across methods and data-sets. In other words, every \textit{t'th} event is evaluated independently of all prior or subsequent events in its trace.
#
# \begin{equation}MAE_{t} = \frac{1}{N}\sum_{i=1}^{N}\mid y_{t}^i - \hat{y}_{t}^i\mid\end{equation}
#
# Here, \textit{t} is the prefix or event number in each trace, and \textit{i} is the trace in the test period. This approach have been used in similar literature on both remaining time prediction, as well as classification problems with data in prefix-log format \cite{NiekTax2017,Verenich2019,Camargo2019,Teinemaa2018}. Since there is a difference in number of prefixes generated from each of the datasets (due to truncation), the same \textit{k} is used for the comparison of differences.
#
# Due to the nature of the prefix log format, the support of the distribution of prefixes is skewed towards the lowest prefix indexes $t$. However, as \textit{business objective B} is not directed towards traces with low support, no form of normalization is applied across different prefixes in the earliness evaluation. Significance of the results are evaluated through ANOVA test.
#
# +
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
#sns.set_theme(style="whitegrid")
sns.set(rc={'figure.figsize':(16,8)})
experiments = pd.read_csv("../Experiments.csv")
# filter out unfinished experiments
experiments = experiments.loc[experiments.Done == 1]
"""
Filter below needs to be removed!!
"""
# look at only one dataset (initial analysis)
#experiments = experiments.loc[experiments.F_dataset == 0.0]
experiments.head(50)
# +
results = []
# go trough all experiments
for exp in set(experiments.RUN):
# get experiment results
experiment_i = experiments.loc[experiments.RUN == exp]
# get inference table
inference_test = pd.read_csv("../experiments/"+str(exp)+"/inference_tables/Inf_test.csv")
########## process the results #####################
#placeholders
event_numbers = []
#num_of_events = []
event_mae = []
event_mae_normalized = []
for prefix in set(inference_test.event_number):
mae_prefix = np.mean(inference_test.loc[inference_test.event_number == prefix].AE)/60/60/24
#get average duration per prefix
avg_duration = np.mean(inference_test.loc[inference_test.event_number == prefix].caseduration_days)
mae_prefix_normalized = mae_prefix/avg_duration
num_events = inference_test.loc[inference_test.event_number == prefix].num_events
event_numbers.append(prefix)
event_mae_normalized.append(mae_prefix_normalized)
event_mae.append(mae_prefix)
# Collect experiment results
results_i = pd.DataFrame({"event_number":event_numbers,
"event_mae_normalized":event_mae_normalized,
"event_mae":event_mae})
for column in experiment_i.columns:
results_i[column] = experiment_i.loc[experiment_i.index[0]][column]
results.append(results_i)
#Create df
results = pd.concat(results)
# -
results.columns
#Save results
results.to_csv("../Earliness_results.csv",index=False)
# ## Inspect a single dataset
# +
#Quick inspection of single dataset
dataset = ["Sepsis","helpdesk","traffic_fines","hospital_billing"][0]
"""
Look at only log-transformed models:
"""
max_num_events = 10
results_sub = results.loc[results.F_transform == 0]
results_sub = results_sub.loc[results_sub.F_max_epochs == 200]
results_sub = results_sub.loc[results_sub.F_dataset == dataset]
results_sub = results_sub.loc[results_sub.event_number < max_num_events+1]
results_sub = results_sub.loc[results_sub.event_mae < 30]
print("Num runs: ",str(len(results_sub.RUN.unique())))
#betas = results_sub.F_beta.unique()
#print(betas)
#results_sub = results_sub.loc[results_sub.F_beta > 0.5]
alphas = results_sub.F_alpha.unique()
print(alphas)
#results_sub = results_sub.loc[results_sub.F_alpha == alphas[5]]
#results_sub = results_sub.loc[results_sub.F_alpha == 1]
ax = sns.boxplot(x="event_number", y="event_mae", hue="F_lossfunction",
data=results_sub)#,fliersize=0.0)
ax.set_title(dataset)
# +
import statsmodels.api as sm
from statsmodels.formula.api import ols
import statsmodels
# Subset results
results_sub = results.loc[results.event_number < max_num_events+1]
#results_sub = results_sub.loc[results_sub.event_mae < 25]
table = results_sub
#for loss in results_sub.F_lossfunction.unique():
# print("\n\nLOSS: "+str(loss)+"\n\n")
alphas = results_sub.F_alpha.unique()
print(alphas)
for alpha in alphas:
testdata = results_sub.loc[results_sub.F_alpha==alpha]
#specify loss name
baseline = "MAE"
d1 = testdata.loc[testdata.F_lossfunction==baseline].event_mae.values
#specify loss name
comparison = "MAE_td"
d2 = testdata.loc[testdata.F_lossfunction==comparison]#.MAE.values
#further specify subset
d2 = d2.event_mae.values
out = statsmodels.stats.weightstats.CompareMeans.from_data(d1, d2).summary(use_t=True, alpha=0.05, usevar='pooled')
print("DATA: "+ dataset)
print("DATA: "+ str(alpha))
print("MAE: "+ str(np.mean(d1)))
print(str(comparison)+": "+ str(np.mean(d2)))
print(out)
# +
import statsmodels.api as sm
from statsmodels.formula.api import ols
import statsmodels
# Subset results
results_sub = results.loc[results.event_number < max_num_events+1]
#results_sub = results_sub.loc[results_sub.event_mae < 25]
table = results_sub
#for loss in results_sub.F_lossfunction.unique():
# print("\n\nLOSS: "+str(loss)+"\n\n")
alphas = results_sub.F_alpha.unique()
print(alphas)
alpha = alphas[0]
for evno in list(range(1,max_num_events)):
testdata = results_sub.loc[results_sub.event_number == evno]
testdata = testdata.loc[testdata.F_alpha==alpha]
#specify loss name
baseline = "MAE"
d1 = testdata.loc[testdata.F_lossfunction==baseline].event_mae.values
#specify loss name
comparison = "MAE_td"
d2 = testdata.loc[testdata.F_lossfunction==comparison]#.MAE.values
#further specify subset
d2 = d2.event_mae.values
out = statsmodels.stats.weightstats.CompareMeans.from_data(d1, d2).summary(use_t=True, alpha=0.05, usevar='pooled')
print("DATA: "+ dataset)
print("Alpha: "+ str(alpha))
print("Event: "+ str(evno))
print("MAE: "+ str(np.mean(d1)))
print(str(comparison)+": "+ str(np.mean(d2)))
print(out)
# +
alpha = alphas[0]
plotdata = results_sub.loc[results_sub.F_alpha==alpha]
ax = sns.boxplot(x="event_number", y="event_mae_normalized", hue="F_lossfunction",
data=plotdata)#,fliersize=0.0)
ax.set_title(dataset+", Alpha = "+str(alpha))
#ax.set_ylabel(target)
# -
table = pd.pivot_table(results_sub,
values='event_mae_normalized', index=['F_lossfunction','F_alpha'],
columns=['event_number'], aggfunc=np.mean)
table
# +
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_theme()
# Load the example flights dataset and convert to long-form
flights_long = sns.load_dataset("flights")
flights = flights_long.pivot("month", "year", "passengers")
# Draw a heatmap with the numeric values in each cell
f, ax = plt.subplots(figsize=(16, 12))
cmap = sns.cm.rocket_r
sns.heatmap(table,
cmap=cmap,
annot=True,
#fmt="d",
linewidths=.5,
ax=ax)
# -
# # Normalized comparison
results = pd.read_csv("../Earliness_results.csv")
results.head(120)
results.F_lossfunction.value_counts()
# +
import seaborn as sns, matplotlib.pyplot as plt
#sns.set(style="whitegrid")
data = results.loc[results.event_number< 6]
ordered_data = sorted(data['F_transform'].unique())
g = sns.FacetGrid(data,col='F_transform',
col_order=ordered_data,
col_wrap=2,
aspect=1,
height=5.5,
legend_out=False,# xlim=5,
sharex=False)
g.map(sns.boxplot,'event_number','event_mae_normalized',"F_beta",
fliersize=0.0,
width=0.8,
linewidth=1,
saturation=0.75,
palette='muted')
#g.despine(left=True)
#for ax in g.axes.flatten():
# ax.tick_params(labelbottom=True)
plt.legend(loc='lower right')
plt.tight_layout()
plt.savefig("Earliness.png")
plt.show()
# +
import seaborn as sns, matplotlib.pyplot as plt
#sns.set(style="whitegrid")
data = results.loc[results.event_number< 6]
ordered_data = sorted(data['F_dataset'].unique())
g = sns.FacetGrid(data,col='F_dataset',
col_order=ordered_data,
col_wrap=2,
aspect=1,
height=5.5,
legend_out=False,# xlim=5,
sharex=False)
g.map(sns.boxplot,'event_number','event_mae_normalized',"F_lossfunction",
fliersize=0.0,
width=0.8,
linewidth=1,
saturation=0.75,
palette='muted')
#g.despine(left=True)
#for ax in g.axes.flatten():
# ax.tick_params(labelbottom=True)
plt.legend(loc='lower right')
plt.tight_layout()
plt.savefig("Earliness2.png")
plt.show()
# -
# # Generate a table: Event 1 only
# Drop irrelevant loss functions
table = results_sub
#table = table.loc[table.F_lossfunction!="MSE"]
table = table.loc[table.event_number==1]
table
# +
import numpy as np
import scipy.stats
def mean_confidence_interval(data, confidence=0.95):
a = 1.0 * np.array(data)
n = len(a)
m, se = np.mean(a), scipy.stats.sem(a)
h = se * scipy.stats.t.ppf((1 + confidence) / 2., n-1)
ci = np.round(h,3)
mean = np.round(m,3)
string = str(mean)+ " ! " +str(ci)
return string
df = pd.pivot_table(table, index='F_beta', values="event_mae",
columns="F_gamma", aggfunc=mean_confidence_interval)
latex = str((df.to_latex(index=True)))
latex = latex.replace('!', '\pm')
print(latex)
# +
df = pd.pivot_table(table, index='F_gamma', values="event_mae",
columns="F_beta", aggfunc=mean_confidence_interval)
df
# -
# # F tests - all datasets
table
import statsmodels.api as sm
from statsmodels.formula.api import ols
acc_lm = ols('MAE ~ C(F_lossfunction) + C(F_dataset) + C(F_lossfunction) * C(F_dataset)', data=table).fit()
anovatable = sm.stats.anova_lm(acc_lm, typ=2) # Type 2 ANOVA DataFrame
print(anovatable)
# # T-test comparisons with MAE as baseline
# ## Event_number == 1*
# +
import statsmodels
datasets = ["Sepsis","helpdesk","hospital_billing","traffic_fines"]
losses = ["MAE","MAE_td"]#,"MAE_Mtd","MAE_Etd","MSE"]
# Subset results on first N events
table = results.loc[results.event_number==1]
for loss in losses:
print("\n\nLOSS: "+loss+"\n\n")
for dataset in datasets:
testdata = results.loc[results.F_dataset==dataset]
baseline = "MAE"
d1 = testdata.loc[testdata.F_lossfunction==baseline].MAE.values
comparison = loss
d2 = testdata.loc[testdata.F_lossfunction==comparison].MAE.values
out = statsmodels.stats.weightstats.CompareMeans.from_data(d1, d2).summary(use_t=True, alpha=0.05, usevar='pooled')
print("DATA: "+dataset)
print("MAE: "+ str(np.mean(d1)))
print(comparison+": "+ str(np.mean(d2)))
print(out)
# +
max_events = np.max(inference_test.event_number)
if max_events < 10:
events = 5
else:
events = 10
inf_results = inference_test.loc[inference_test.event_number < events+1]
inf_results.AE = inf_results.AE /(24.0*3600)
pivottable = pd.pivot_table(inf_results,
values='AE',
columns=['event_number'], aggfunc=np.mean)
newcols = []
for colno in range(0,len(pivottable.columns)):
colno = colno + 1
name = "AE_"+str(colno)+""
newcols.append(name)
pivottable.columns = newcols
pivottable.index = [0]
pivottable
# -
events
| analysis/.ipynb_checkpoints/RQ1_Earliness_results-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Argopy Introduction
# load libraries
import numpy as np
from argopy import DataFetcher as ArgoDataFetcher
argo_loader = ArgoDataFetcher()
from argopy import IndexFetcher as ArgoIndexFetcher
index_loader = ArgoIndexFetcher()
# ## [Fetch data for a space/time domain](https://argopy.readthedocs.io/en/latest/data_fetching.html)
ds = argo_loader.region([-75, -45, 20, 30, 0, 10, '2011-01-01', '2011-06']).to_xarray()
print(ds)
np.unique(ds.PLATFORM_NUMBER)
# ## Points X Profiles
#
# https://argopy.readthedocs.io/en/latest/data_manipulation.html#Points-vs-profiles
# Fetched data are returned as a 1D array collection of measurements:
ds_points = argo_loader.to_xarray()
print(ds_points)
# If you prefer to work with a 2D array collection of vertical profiles, simply transform the dataset with argopy.xarray.ArgoAccessor.point2profile():
ds_profiles = ds_points.argo.point2profile()
print(ds_profiles)
# You can simply reverse this transformation with the argopy.argo.profile2point():
#
ds = ds_profiles.argo.profile2point()
print(ds)
# ## [Fetch Argo metadata](https://argopy.readthedocs.io/en/latest/metadata_fetching.html)
idx = index_loader.region([-75, -45, 20, 30, '2011-01-01', '2011-06'])
idx
idx.to_dataframe()
# So from this you can also see that 'profiler code' for some of the observations is the same..
| Argopy-Intro.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] deletable=true editable=true
# # Interactive Network Exploration with pyreaclib
# + [markdown] deletable=true editable=true
# This notebook shows off the interactive `RateCOllection` network plot.
#
# You must have widgets enabled, e.g., via:
# ```
# jupyter nbextension enable --py --user widgetsnbextension
# ```
# for a user install or
# ```
# jupyter nbextension enable --py --sys-prefix widgetsnbextension
# ```
# for a system-wide installation
# + deletable=true editable=true
# %matplotlib inline
# + deletable=true editable=true
import pyreaclib as pyrl
# + [markdown] deletable=true editable=true
# This collection of rates has the main CNO rates plus a breakout rate into the hot CNO cycle
# + deletable=true editable=true
files = ["c12-pg-n13-ls09",
"c13-pg-n14-nacr",
"n13--c13-wc12",
"n13-pg-o14-lg06",
"n14-pg-o15-im05",
"n15-pa-c12-nacr",
"o14--n14-wc12",
"o15--n15-wc12",
"o14-ap-f17-Ha96c",
"f17-pg-ne18-cb09",
"ne18--f18-wc12",
"f18-pa-o15-il10"]
rc = pyrl.RateCollection(files)
# + [markdown] deletable=true editable=true
# To evaluate the rates, we need a composition. This is defined using a list of Nuceli objects.
# + deletable=true editable=true
comp = pyrl.Composition(rc.get_nuclei())
comp.set_solar_like()
# + [markdown] deletable=true editable=true
# Interactive exploration is enabled through the Explorer class, which takes a RateCollection and a Composition
# + deletable=true editable=true
re = pyrl.Explorer(rc, comp)
re.explore()
# + deletable=true editable=true
| examples/interactive-example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import jieba
import matplotlib.pyplot as plt
import pandas as pd
from wordcloud import (WordCloud, get_single_color_func,STOPWORDS)
import re
# +
class SimpleGroupedColorFunc(object):
"""Create a color function object which assigns EXACT colors
to certain words based on the color to words mapping
"""
def __init__(self, color_to_words, default_color):
self.word_to_color = {word: color
for (color, words) in color_to_words.items()
for word in words}
self.default_color = default_color
def __call__(self, word, **kwargs):
return self.word_to_color.get(word, self.default_color)
class GroupedColorFunc(object):
"""Create a color function object which assigns DIFFERENT SHADES of
specified colors to certain words based on the color to words mapping.
Uses wordcloud.get_single_color_func
"""
def __init__(self, color_to_words, default_color):
self.color_func_to_words = [
(get_single_color_func(color), set(words))
for (color, words) in color_to_words.items()]
self.default_color_func = get_single_color_func(default_color)
def get_color_func(self, word):
"""Returns a single_color_func associated with the word"""
try:
color_func = next(
color_func for (color_func, words) in self.color_func_to_words
if word in words)
except StopIteration:
color_func = self.default_color_func
return color_func
def __call__(self, word, **kwargs):
return self.get_color_func(word)(word, **kwargs)
# +
def content_preprocess(csv):
# preprocess: extract comment content
df = pd.read_csv(csv)
preprocessed_data = df[['评论']]
# Index and columns are not saved
preprocessed_data.to_csv('content4wordcloud.csv',header = 0,index = 0)
content_preprocess('douban_comment.csv')
# content_preprocess('douban_comment.csv')
def word_cloud_creation(filename):
'''create word cloud and split the words'''
text = open(filename, encoding = 'utf-8', errors = 'ignore').read()
word_list = jieba.cut(text, cut_all = True)
wl = ' '.join(word_list)
return wl
stoptext1 = open('stopword.txt',encoding='utf-8').read()
stopwords = stoptext1.split('\n')
stoptext2 = open('stopword2.txt',encoding='utf-8').read()
stopwords = stopwords+stoptext2.split('\n')
stopwords = stopwords+['一部','这部','看过','真的','感觉','一种']
def word_cloud_setting():
wc = WordCloud(max_words=500, collocations = False,repeat = True,background_color='white',scale=1.5, stopwords=stopwords,height = 1080, width = 1920, font_path = 'C:\Windows\Fonts\simsun.ttc')
return wc
def word_cloud_implementation(wl,wc):
'''Generate word cloud and display'''
my_words = wc.generate(wl)
plt.imshow(my_words)
plt.axis('off')
wc.to_file('word_cloud.png')
plt.show()
wl = word_cloud_creation('content4wordcloud.csv')
wc = word_cloud_setting()
word_cloud_implementation(wl,wc)
# +
# This Part: Emphasize what's most focused and professional
color_to_words = {
# words below will be colored with a single color function
# focus on the film itself
'red': ['电影', '导演', '故事', '剧情', '配乐', '剧本', '表演','角色','镜头', '音乐','主角','观众','片子'],
# talk about something else or feeling/attitude
'green': ['真的', '感觉','精彩','感动','喜欢','特别','人生', '世界', '生活','人性','经典']
}
# Words that are not in any of the color_to_words values
# will be colored with a grey single color function
default_color = 'grey'
# Create a color function with single tone
# grouped_color_func = SimpleGroupedColorFunc(color_to_words, default_color)
# Create a color function with multiple tones
grouped_color_func = GroupedColorFunc(color_to_words, default_color)
# Apply our color function
wc.recolor(color_func=grouped_color_func)
wc.to_file('word_cloud_emphasized.png')
plt.figure()
plt.imshow(wc, interpolation="bilinear")
plt.axis("off")
plt.show()
# -
df = pd.read_csv('imdb_movie_review_info.csv')
preprocessed_data = df[['userReview']]
preprocessed_data
# +
def content_preprocess(csv):
# preprocess: extract comment content
df = pd.read_csv(csv)
preprocessed_data = df[['userReview']]
# Index and columns are not saved
preprocessed_data.to_csv('content4wordcloud.csv',header = 0,index = 0)
content_preprocess('imdb_movie_review_info.csv')
# content_preprocess('douban_comment.csv')
def word_cloud_creation(filename):
'''create word cloud and split the words'''
text = open(filename, encoding = 'utf-8', errors = 'ignore').read()
# word_list = jieba.cut(text, cut_all = True)
wl = ''.join(text)
wl = re.sub('<.*?>','',wl)
wl = re.sub('the','',wl)
# wl = re.sub('this')
return wl
stoptext1 = open('stopword.txt',encoding='utf-8').read()
stopwords = stoptext1.split('\n')
stoptext2 = open('stopword2.txt',encoding='utf-8').read()
stopwords = stopwords+stoptext2.split('\n')
stopwords = stopwords+['wa','a','i','time','make','watch']
def word_cloud_setting():
# stopwords = ['当然','所以','另外','不过','so','that','what','me','to','so','of','it','and','the','in','you','but','will','with','但是','最后','还有']
wc = WordCloud(max_words=500, collocations = False,repeat = True,background_color='white',scale=1.5, stopwords=stopwords,height = 1080, width = 1920, font_path = 'C:\Windows\Fonts\simsun.ttc')
return wc
def word_cloud_implementation(wl,wc):
'''Generate word cloud and display'''
my_words = wc.generate(wl)
plt.imshow(my_words)
plt.axis('off')
wc.to_file('word_cloud_imdb.png')
plt.show()
wl = word_cloud_creation('content4wordcloud.csv')
wc = word_cloud_setting()
word_cloud_implementation(wl,wc)
# +
# This Part: Emphasize what's most focused and professional
color_to_words = {
# words below will be colored with a single color function
'red': ['movie', 'film', 'character', 'performance', 'story', 'shot','actor','scene', 'director','plot','acting'],
# talk about something else or feeling/attitude
'green': ['life', 'people','good','like','bad','love', 'great', 'feel','world','excellent','perfect','real','classic']
}
# Words that are not in any of the color_to_words values
# will be colored with a grey single color function
default_color = 'grey'
# Create a color function with multiple tones
grouped_color_func = GroupedColorFunc(color_to_words, default_color)
# Apply our color function
wc.recolor(color_func=grouped_color_func)
wc.to_file('word_cloud_emphasized_imdb.png')
plt.figure()
plt.imshow(wc, interpolation="bilinear")
plt.axis("off")
plt.show()
# -
# In this part, we will find out what people tend to talk about in the movie reviews on douban and imdb separately. We're doing this by WordCloud which is a fascinating approach for us to figure out what's frequently occurs in people's reviews. By WordCloud, the most frequent words would be larger than other words. We can instantly see what we should pay attention to. Here is the two wordclouds figures of the reviews of each top250 movies on imdb and douban. We chose the top100 rated reviews which are basically positive. They can tell us what on earth people love about the movies.
#
# Of course, stopwords list must be added to filter out some meaningless words for example, 'the', 'than', 'that'.
#
# I noticed that people on douban may be customed to make a comment that is more based on self-feelings and experience, while people on imdb tend to talk about the movie itself. To see this feature more clearly, I have marked the words concerned about the movies **red** and the words about self experience and emotional feelings **green**. Basically, red words are more objective and green words are more subjective.
#
# So I chose these words:
#
# Indeed, if you take a closer look, you will find that many comments on douban are more likely to talk about world, life, and whether they like the movie, which makes the clouds greener. However, imdb users tend to talk about performance, character, scenes.(red) I can't help wondering if this suggest that Chinese people and English-speaking world have a difference in thinking pattern or way of describing a thing. We Chinese like to focus on ourselves' life and feeling while the English-speaking community may prefer start from something about the movies.
#
# Well, this could also be the result of the difference in grammar. But I figure that this might not be the main reason.
#
# Moreover, Chinese seldom use simple words like '赞,棒great' to directly express their feelings('好good' is in the stopwords, 'like' as well), though they start with something that's not closely related to the movies.(world, life) We prefer to say a movie is '感人touching', or '真实close to reality' if we think they are very good. On the other hand, imdb users describe a movie with 'excellent', 'perfect'. They use these words as the highest praise.
#
# For further research on reviews, my teammate Haoyun has done some research on prediction about genres by reviews.
#
#
#
# douban:
#
# 'red': '电影', '导演', '故事', '剧情', '配乐', '剧本', '表演','角色','镜头', '音乐','主角','观众','片子'
#
# meaning: movie, director, story, plot, soundtrack, script, performance, character, shot, music, main character, audience, film(another)
#
# 'green': '真的', '感觉','精彩','感动','喜欢','特别','人生', '世界', '生活','人性','经典','现实'
#
# meaning: really, feel, excellent, touching, like, special(particularly), life, world, living(daily), humanity, classic, reality
#
# imdb:
#
# 'red': 'movie', 'film', 'character', 'performance', 'story', 'shot','actor','scene', 'director','plot','acting'
#
# 'green': 'life', 'people','good','like','bad','love', 'great', 'feel','world','excellent','perfect','real'
| data_crawler & data/.ipynb_checkpoints/WordCloudTextAnalysis-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from ga4stpg.util import STEIN_B
from ga4stpg.graph import ReaderORLibrary
from ga4stpg.tree.evaluation import EvaluateTreeGraph
from ga4stpg.tree.generate import GenerateBasedRandomWalk, GenerateBasedPrimRST
# +
import seaborn as sns
from matplotlib import pyplot as plt
from os import path
# -
import numpy as np
import pandas as pd
import pylab
from scipy import stats
import statsmodels.api as sm
import config
# +
# nro_individuals = 500
# nro_individuals=750
# nro_individuals = 1_000
# +
def test_normal_distribution(costs_values):
print(10*'= ')
print("Test if costs is normal distributed")
result = stats.shapiro(costs_values)
print("P-value: ", result.pvalue)
if result.pvalue <= 0.05:
print("Reject Null Hypotesis: the costs distribution doesn't follow a normal distribution.")
else:
print("Accept Null Hypotesis: the costs distribution follows a normal distribution.")
print(10*'= ')
print("Mean: ", np.mean(costs_values))
print("St Dev:", np.std(costs_values))
print("Skewness: ", stats.skew(costs_values))
def compare_initial_population_for(instance_problem, nro_individuals):
filename = path.join(config.dataset_directory, instance_problem)
stpg = ReaderORLibrary().parser(filename)
print("STPG information", '\n', 10*'- ','\n')
print('Instance: ', stpg.name)
print('Best Known cost: ', STEIN_B[INDEX][1])
print("Nro. Node:", stpg.nro_nodes)
print("Nro. Edges:", stpg.nro_edges)
print("Nro. Terminals:", stpg.nro_terminals)
print("Terminals: \n", stpg.terminals)
# print(10*'- ')
gen_randomWalk = GenerateBasedRandomWalk(stpg)
gen_primRST = GenerateBasedPrimRST(stpg)
evaluator = EvaluateTreeGraph(stpg)
costs_randomWalk = np.array([evaluator(gen_randomWalk())[0]
for _ in range(nro_individuals)])
test_normal_distribution(costs_randomWalk)
# sns.boxplot(x=costs_randomWalk)
# sns.displot(costs_randomWalk, kde=True)
# sm.qqplot(costs_randomWalk, line='s')
costs_primBased = np.array([evaluator(gen_primRST())[0]
for _ in range(nro_individuals)])
test_normal_distribution(costs_primBased)
# sns.boxplot(x=costs_primBased)
# sns.displot(costs_primBased, kde=True)
# sm.qqplot(costs_primBased, line='s')
df = pd.DataFrame(data=costs_primBased, columns=["primBased"])
df['randomWalk'] = costs_randomWalk
ax = sns.displot(df)
ax.set(title="Comparando distribuições")
# result_ttest = stats.ttest_ind(costs_randomWalk, cost_primBased)
# print(result_ttest)
# print()
# print(result_ttest.pvalue < 0.005)
var_randomWalk = np.var(costs_randomWalk)
var_primBased = np.var(costs_primBased)
ratio = max(var_randomWalk, var_primBased) / min(var_randomWalk, var_primBased)
same_var = ratio < 4
result_ttest2 = stats.ttest_ind(costs_randomWalk,
costs_primBased,
equal_var=same_var)
print(10*'= ')
print("Test if the population mean are equal or different.")
print("Same var population: ", same_var)
print("P-value: ", result_ttest2.pvalue)
if result_ttest2.pvalue < 0.05:
print("Reject: the cost mean are different.")
else :
print("Accept: the cost mean are the same.")
print(10*"= ")
print("Perform the Mann-Whitney U test")
result_Utest = stats.mannwhitneyu(costs_randomWalk,
costs_primBased,
alternative='two-sided')
print("P-value: ", result_Utest.pvalue)
# +
INDEX = 17
filename = STEIN_B[INDEX][0]
print(filename)
# -
compare_initial_population_for(filename, 500)
compare_initial_population_for(filename, 750)
compare_initial_population_for(filename, 1_000)
# ## Referências
#
# 1. [How to Conduct a Two Sample T-Test in Python](https://www.statology.org/two-sample-t-test-python/)
# 2. [How to Conduct a Mann-Whitney U Test in Python](https://www.statology.org/mann-whitney-u-test-python/)
# 3. [Levene’s Test: The Assessment for Equality of Variances](https://medium.com/@kyawsawhtoon/levenes-test-the-assessment-for-equality-of-variances-94503b695a57)
# 4. [Python T-Test – A Friendly Guide](https://www.hackdeploy.com/python-t-test-a-friendly-guide)
# 5. [scipy.stats.mannwhitneyu](https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.mannwhitneyu.html)
# 6. [scipy.stats.ttest_ind](https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.ttest_ind.html#scipy.stats.ttest_ind)
#
def test_shapiro(costs, pvalue=0.05):
result = stats.shapiro(costs)
return result.pvalue < pvalue
def calc_max_edges(k):
return k * (k-1) / 2
# +
# %%time
data = list()
nro_samples = 200
for instance, _ in STEIN_B:
filename = path.join(config.dataset_directory, instance)
reader = ReaderORLibrary()
stpg = reader.parser(filename)
gen_primRST = GenerateBasedPrimRST(stpg)
evaluator = EvaluateTreeGraph(stpg)
tmp = list()
print(instance, end='\r')
for i in range(100):
costs = np.array([evaluator(gen_primRST())[0]
for _ in range(nro_samples)])
tmp.append(test_shapiro(costs))
print(instance, i*"#", end='\r')
# collect data
print(instance, 100*"#", end='\n')
nro_nodes = stpg.nro_nodes
nro_edges = stpg.nro_edges
nro_terminals = len(stpg.terminals)
nro_max_edges = calc_max_edges(stpg.nro_nodes)
density = nro_edges / nro_max_edges
total = sum(tmp)
data.append([instance, total, total/len(tmp), density, nro_nodes, nro_edges, nro_terminals])
# -
df = pd.DataFrame(data, columns=["Instance", "Total_Reject", "Rel_Reject", "Density", "Nro_Nodes", "Nro_Edges", "Nro_Terminals"])
df.head()
plt.figure(figsize=(5,10))
sns.barplot(y='Instance', x='Total_Reject', data=df)
sns.scatterplot(x='Nro_Terminals', y='Total_Reject', data=df)
sns.scatterplot(x='Density', y='Total_Reject', data=df)
| Studies_Generation_Population/Distribution Initial Population.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import seaborn as sns
column_names = ['area', 'perimeter', 'compactness', 'kernel_length', 'kernel_width', 'asymmetry_coefficient', 'kernel_groove_length', 'type']
seeds = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/00236/seeds_dataset.txt', sep='\s+', names=column_names)
seeds.head()
seeds.shape
seeds.corr()
sns.pairplot(seeds, hue='type')
| 003_wheat_seeds.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="UV_mis-jdwLd" executionInfo={"status": "ok", "timestamp": 1628675161888, "user_tz": -330, "elapsed": 1071, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}}
import os
project_name = "reco-tut-mlh"; branch = "main"; account = "sparsh-ai"
project_path = os.path.join('/content', project_name)
# + id="KRGLEjqMd3dV" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1628675168975, "user_tz": -330, "elapsed": 6237, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="c58b5119-2f3f-4642-e413-d2d175fa9d38"
if not os.path.exists(project_path):
# !cp /content/drive/MyDrive/mykeys.py /content
import mykeys
# !rm /content/mykeys.py
path = "/content/" + project_name;
# !mkdir "{path}"
# %cd "{path}"
import sys; sys.path.append(path)
# !git config --global user.email "<EMAIL>"
# !git config --global user.name "reco-tut"
# !git init
# !git remote add origin https://"{mykeys.git_token}":x-oauth-basic@github.com/"{account}"/"{project_name}".git
# !git pull origin "{branch}"
# !git checkout main
else:
# %cd "{project_path}"
# + id="HWliEWwod3dX" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1628676418325, "user_tz": -330, "elapsed": 815, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="3944512c-183e-4fdb-db00-4ff38ebe0702"
# !git status
# + colab={"base_uri": "https://localhost:8080/"} id="9_2KIl3NrgFb" executionInfo={"status": "ok", "timestamp": 1628676479906, "user_tz": -330, "elapsed": 512, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="c018ac4c-3aa6-4f89-bac3-c574942216b4"
# !git pull --rebase origin main
# + id="dGCJpyjLd3dY" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1628676490056, "user_tz": -330, "elapsed": 1413, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="66f70f79-5637-4275-8c7c-d353c3964a55"
# !git add . && git commit -m 'commit' && git push origin "{branch}"
# + id="B4Gz_IHckk_U" executionInfo={"status": "ok", "timestamp": 1628675168978, "user_tz": -330, "elapsed": 12, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}}
import sys
sys.path.insert(0,'./code')
# + [markdown] id="uJPjCuUXfbMZ"
# ---
# + [markdown] id="3ddck2b7JQgH"
# # Standard Variational Autoencoder (SVAE)
#
# The Standard Variational Autoencoder (SVAE), SVAE uses an autoencoder to generate a salient feature representation of users, learning a latent vector for each user. The decoder then takes this latent representation and outputs a probability distribution over all items; we get probabilities of all the movies being watched by each user.
# + [markdown] id="registered-helmet"
# # Imports
# + id="developmental-scotland" executionInfo={"status": "ok", "timestamp": 1628675775828, "user_tz": -330, "elapsed": 961, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}}
import numpy as np
import os
import pandas as pd
from utils import numpy_stratified_split
import build_features
import metrics
from models import SVAE
from tensorflow.python.framework.ops import disable_eager_execution
disable_eager_execution()
# + [markdown] id="liquid-contest"
# # Prepare Data
# + colab={"base_uri": "https://localhost:8080/", "height": 221} id="crucial-stable" executionInfo={"status": "ok", "timestamp": 1628675362382, "user_tz": -330, "elapsed": 12, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="978a3da5-b258-4a76-e856-eba4a5c1e990"
fp = os.path.join('./data/bronze', 'u.data')
raw_data = pd.read_csv(fp, sep='\t', names=['userId', 'movieId', 'rating', 'timestamp'])
print(f'Shape: {raw_data.shape}')
raw_data.sample(5, random_state=123)
# + colab={"base_uri": "https://localhost:8080/", "height": 376} id="unable-orchestra" executionInfo={"status": "ok", "timestamp": 1628675367525, "user_tz": -330, "elapsed": 756, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="0a06a313-7631-4771-96b6-118e6ccce09c"
# Binarize the data (only keep ratings >= 4)
df_preferred = raw_data[raw_data['rating'] > 3.5]
print (df_preferred.shape)
df_low_rating = raw_data[raw_data['rating'] <= 3.5]
df_preferred.head(10)
# + colab={"base_uri": "https://localhost:8080/"} id="initial-tours" executionInfo={"status": "ok", "timestamp": 1628675371383, "user_tz": -330, "elapsed": 465, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="c0050f9c-9927-4385-f423-1465b15e377d"
# Keep users who clicked on at least 5 movies
df = df_preferred.groupby('userId').filter(lambda x: len(x) >= 5)
# Keep movies that were clicked on by at least on 1 user
df = df.groupby('movieId').filter(lambda x: len(x) >= 1)
print(df.shape)
# + colab={"base_uri": "https://localhost:8080/"} id="ordinary-visitor" executionInfo={"status": "ok", "timestamp": 1628675373952, "user_tz": -330, "elapsed": 12, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="71538a9c-3b62-4546-bcee-0bf9e320db8d"
# Obtain both usercount and itemcount after filtering
usercount = df[['userId']].groupby('userId', as_index = False).size()
itemcount = df[['movieId']].groupby('movieId', as_index = False).size()
# Compute sparsity after filtering
sparsity = 1. * raw_data.shape[0] / (usercount.shape[0] * itemcount.shape[0])
print("After filtering, there are %d watching events from %d users and %d movies (sparsity: %.3f%%)" %
(raw_data.shape[0], usercount.shape[0], itemcount.shape[0], sparsity * 100))
# + [markdown] id="english-morris"
# ## Split
# + id="adapted-basketball" executionInfo={"status": "ok", "timestamp": 1628675377414, "user_tz": -330, "elapsed": 4, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}}
unique_users =sorted(df.userId.unique())
np.random.seed(123)
unique_users = np.random.permutation(unique_users)
# + colab={"base_uri": "https://localhost:8080/"} id="exempt-difference" executionInfo={"status": "ok", "timestamp": 1628675379082, "user_tz": -330, "elapsed": 7, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="f80f8a78-30da-40cc-ebd7-035e28e0d939"
HELDOUT_USERS = 200
# Create train/validation/test users
n_users = len(unique_users)
print("Number of unique users:", n_users)
train_users = unique_users[:(n_users - HELDOUT_USERS * 2)]
print("\nNumber of training users:", len(train_users))
val_users = unique_users[(n_users - HELDOUT_USERS * 2) : (n_users - HELDOUT_USERS)]
print("\nNumber of validation users:", len(val_users))
test_users = unique_users[(n_users - HELDOUT_USERS):]
print("\nNumber of test users:", len(test_users))
# + colab={"base_uri": "https://localhost:8080/"} id="hearing-collective" executionInfo={"status": "ok", "timestamp": 1628675384953, "user_tz": -330, "elapsed": 665, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="c0b37626-c072-4b9b-c436-939d9396e0ba"
# For training set keep only users that are in train_users list
train_set = df.loc[df['userId'].isin(train_users)]
print("Number of training observations: ", train_set.shape[0])
# For validation set keep only users that are in val_users list
val_set = df.loc[df['userId'].isin(val_users)]
print("\nNumber of validation observations: ", val_set.shape[0])
# For test set keep only users that are in test_users list
test_set = df.loc[df['userId'].isin(test_users)]
print("\nNumber of test observations: ", test_set.shape[0])
# train_set/val_set/test_set contain user - movie interactions with rating 4 or 5
# + colab={"base_uri": "https://localhost:8080/"} id="prescription-spider" executionInfo={"status": "ok", "timestamp": 1628675386291, "user_tz": -330, "elapsed": 6, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="39b8fc98-168f-4d69-9119-e8c32ed9f3fa"
# Obtain list of unique movies used in training set
unique_train_items = pd.unique(train_set['movieId'])
print("Number of unique movies that rated in training set", unique_train_items.size)
# + colab={"base_uri": "https://localhost:8080/"} id="wrapped-companion" executionInfo={"status": "ok", "timestamp": 1628675392699, "user_tz": -330, "elapsed": 955, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="66ea592e-1b77-414e-d84b-46866c0422a6"
# For validation set keep only movies that used in training set
val_set = val_set.loc[val_set['movieId'].isin(unique_train_items)]
print("Number of validation observations after filtering: ", val_set.shape[0])
# For test set keep only movies that used in training set
test_set = test_set.loc[test_set['movieId'].isin(unique_train_items)]
print("\nNumber of test observations after filtering: ", test_set.shape[0])
# + id="compatible-paste" executionInfo={"status": "ok", "timestamp": 1628675502944, "user_tz": -330, "elapsed": 2119, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}}
# Instantiate the sparse matrix generation for train, validation and test sets
# use list of unique items from training set for all sets
am_train = build_features.AffinityMatrix(df=train_set, items_list=unique_train_items)
am_val = build_features.AffinityMatrix(df=val_set, items_list=unique_train_items)
am_test = build_features.AffinityMatrix(df=test_set, items_list=unique_train_items)
# + colab={"base_uri": "https://localhost:8080/"} id="embedded-spell" executionInfo={"status": "ok", "timestamp": 1628675509043, "user_tz": -330, "elapsed": 1311, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="92df3577-4526-49ea-894d-b5b37c4af988"
# Obtain the sparse matrix for train, validation and test sets
train_data, _, _ = am_train.gen_affinity_matrix()
print(train_data.shape)
val_data, val_map_users, val_map_items = am_val.gen_affinity_matrix()
print(val_data.shape)
test_data, test_map_users, test_map_items = am_test.gen_affinity_matrix()
print(test_data.shape)
# + id="surface-worship" executionInfo={"status": "ok", "timestamp": 1628675569837, "user_tz": -330, "elapsed": 723, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}}
# Split validation and test data into training and testing parts
val_data_tr, val_data_te = numpy_stratified_split(val_data, ratio=0.75, seed=123)
test_data_tr, test_data_te = numpy_stratified_split(test_data, ratio=0.75, seed=123)
# + id="shared-central" executionInfo={"status": "ok", "timestamp": 1628675573148, "user_tz": -330, "elapsed": 730, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}}
# Binarize train, validation and test data
train_data = np.where(train_data > 3.5, 1.0, 0.0)
val_data = np.where(val_data > 3.5, 1.0, 0.0)
test_data = np.where(test_data > 3.5, 1.0, 0.0)
# Binarize validation data: training part
val_data_tr = np.where(val_data_tr > 3.5, 1.0, 0.0)
# Binarize validation data: testing part (save non-binary version in the separate object, will be used for calculating NDCG)
val_data_te_ratings = val_data_te.copy()
val_data_te = np.where(val_data_te > 3.5, 1.0, 0.0)
# Binarize test data: training part
test_data_tr = np.where(test_data_tr > 3.5, 1.0, 0.0)
# Binarize test data: testing part (save non-binary version in the separate object, will be used for calculating NDCG)
test_data_te_ratings = test_data_te.copy()
test_data_te = np.where(test_data_te > 3.5, 1.0, 0.0)
# + id="reserved-speed" executionInfo={"status": "ok", "timestamp": 1628675587231, "user_tz": -330, "elapsed": 6591, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}}
# retrieve real ratings from initial dataset
test_data_te_ratings=pd.DataFrame(test_data_te_ratings)
val_data_te_ratings=pd.DataFrame(val_data_te_ratings)
for index,i in df_low_rating.iterrows():
user_old= i['userId'] # old value
item_old=i['movieId'] # old value
if (test_map_users.get(user_old) is not None) and (test_map_items.get(item_old) is not None) :
user_new=test_map_users.get(user_old) # new value
item_new=test_map_items.get(item_old) # new value
rating=i['rating']
test_data_te_ratings.at[user_new,item_new]= rating
if (val_map_users.get(user_old) is not None) and (val_map_items.get(item_old) is not None) :
user_new=val_map_users.get(user_old) # new value
item_new=val_map_items.get(item_old) # new value
rating=i['rating']
val_data_te_ratings.at[user_new,item_new]= rating
val_data_te_ratings=val_data_te_ratings.to_numpy()
test_data_te_ratings=test_data_te_ratings.to_numpy()
# + [markdown] id="indoor-sheet"
# # SVAE
# + id="pointed-repeat" executionInfo={"status": "ok", "timestamp": 1628675860829, "user_tz": -330, "elapsed": 1004, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}}
INTERMEDIATE_DIM = 200
LATENT_DIM = 64
EPOCHS = 400
BATCH_SIZE = 100
# + id="incredible-stage" executionInfo={"status": "ok", "timestamp": 1628675861590, "user_tz": -330, "elapsed": 771, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}}
model = SVAE.StandardVAE(n_users=train_data.shape[0], # Number of unique users in the training set
original_dim=train_data.shape[1], # Number of unique items in the training set
intermediate_dim=INTERMEDIATE_DIM,
latent_dim=LATENT_DIM,
n_epochs=EPOCHS,
batch_size=BATCH_SIZE,
k=10,
verbose=0,
seed=123,
drop_encoder=0.5,
drop_decoder=0.5,
annealing=False,
beta=1.0
)
# + colab={"base_uri": "https://localhost:8080/"} id="female-pregnancy" executionInfo={"status": "ok", "timestamp": 1628676104392, "user_tz": -330, "elapsed": 214894, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="4af35ba2-7aaa-4679-c86f-049630613258"
# %%time
model.fit(x_train=train_data,
x_valid=val_data,
x_val_tr=val_data_tr,
x_val_te=val_data_te_ratings, # with the original ratings
mapper=am_val
)
# + [markdown] id="EKn8Iq1vgP9H"
# # Recommend
# + id="equipped-boards" executionInfo={"status": "ok", "timestamp": 1628676104395, "user_tz": -330, "elapsed": 24, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}}
# Model prediction on the training part of test set
top_k = model.recommend_k_items(x=test_data_tr,k=10,remove_seen=True)
# Convert sparse matrix back to df
recommendations = am_test.map_back_sparse(top_k, kind='prediction')
test_df = am_test.map_back_sparse(test_data_te_ratings, kind='ratings') # use test_data_te_, with the original ratings
# + [markdown] id="i5d2_zBCwGL2"
# ## Evaluation metrics
# + id="iMOnHJy9sz9p" executionInfo={"status": "ok", "timestamp": 1628676104396, "user_tz": -330, "elapsed": 22, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}}
# Create column with the predicted movie's rank for each user
top_k = recommendations.copy()
top_k['rank'] = recommendations.groupby('userId', sort=False).cumcount() + 1 # For each user, only include movies recommendations that are also in the test set
# + id="rhDysbs0tFkd" executionInfo={"status": "ok", "timestamp": 1628676104399, "user_tz": -330, "elapsed": 24, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}}
precision_at_k = metrics.precision_at_k(top_k, test_df, 'userId', 'movieId', 'rank')
recall_at_k = metrics.recall_at_k(top_k, test_df, 'userId', 'movieId', 'rank')
mean_average_precision = metrics.mean_average_precision(top_k, test_df, 'userId', 'movieId', 'rank')
ndcg = metrics.ndcg(top_k, test_df, 'userId', 'movieId', 'rank')
# + colab={"base_uri": "https://localhost:8080/"} id="GQeRUFCNtKuM" executionInfo={"status": "ok", "timestamp": 1628676104401, "user_tz": -330, "elapsed": 25, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "13037694610922482904"}} outputId="39776d6e-d1db-4258-ee04-adf72e1a0b87"
print(f'Precision: {precision_at_k:.6f}',
f'Recall: {recall_at_k:.6f}',
f'MAP: {mean_average_precision:.6f} ',
f'NDCG: {ndcg:.6f}', sep='\n')
# + [markdown] id="IkLPL0YCJ5qN"
# # References
#
#
# 1. <NAME>, <NAME>, <NAME>, A Hybrid Variational Autoencoder for Collaborative Filtering, 2018, https://arxiv.org/pdf/1808.01006.pdf
#
# 2. Microsoft SVAE implementation: https://github.com/microsoft/recommenders/blob/main/examples/02_model_collaborative_filtering/standard_vae_deep_dive.ipynb
#
| notebooks/reco-tut-mlh-02-svae.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Exercise 3.7 Bayesian analysis of the Poisson distribution
# In Exercise 3.6, we defined the Poisson distribution with rate $\lambda$ and derived its MLE. Here we perform a conjugate Bayesian analysis.
#
# - a. Derive the posterior $p(\lambda|D)$ assuming a conjugate prior $p(\lambda) = \mathrm{Ga}(\lambda|a, b) \propto \lambda^{a-1}e^{-\lambda b}$. Hint: the posterior is also a Gamma distribution.
# - b. What does the posterior mean tend to as $a \rightarrow 0$ and $b \rightarrow 0$? (Recall that the mean of a $\mathrm{Ga}(a, b)$ distribution is $a/b$.)
# ### Solution
# #### (a)
# Let $D = \{x_i,\ldots, x_n\}$ be i.i.d samples from $\mathrm{Poi}(x|\lambda)$.
#
# The likelihood is
#
# $$
# p(D|\lambda) = e^{-n\lambda}\frac{\lambda^{\sum_i x_i}}{\prod_ix_i!}
# $$
#
# The prior is
#
# $$
# p(\lambda) = \mathrm{Ga}(a, b) = \frac{b^a}{\Gamma(a)}\lambda^{a-1}e^{-\lambda b}
# $$
#
# So the posterior is given by
#
# $$
# p(\lambda|D) \propto p(D|\lambda)p(\lambda) \propto e^{-\lambda(b+n)}\lambda^{(a+\sum_ix_i)-1} \propto \mathrm{Ga}(\lambda|a + \sum_i x_i, b+n)
# $$
#
# Therefore, $p(\lambda| D) = \mathrm{Ga}(\lambda|a + \sum_ix_i, b+n)$.
#
# Let's think about how the parameters of the Gamma function are updated. Remember the Gamma distribution has two parameters, the 'shape' $a$ and 'rate' $b$. Then the shape parameter is a function of the **values** of the dataset. The rate (second parameter) is a function of only the **size** of the dataset. So we can think of the hyperparameter $b$ as a *pseudo-count*, and $a$ as the sum of the 'content' of the pseudo count.
# #### (b)
#
# Let's first calculate the posterior mean: (mean of the Gamma distribution)
#
# $$
# E[\lambda|D] = \frac{\sum_1^N x_i + a}{N + b}
# $$
#
# The mean is a well behaved function of $a$ and $b$, so we can take the limit:
#
# \begin{aligned}
# \lim_{a\rightarrow 0, b\rightarrow 0} E[\lambda|D] & = \lim_{a\rightarrow 0, b\rightarrow 0}\frac{\sum_1^Nx_i + a}{N+b} \\
# & = \frac{\sum_1^N x_i}{N} = \lambda_{MLE}
# \end{aligned}
#
# This tells in the limit the mean of the posterior becomes the maximum likelihood estimator.
# ### Summary
# In this exercise, by performing Bayesian analysis of the Poisson distribution, we discovered two things:
# 1. Assuming a Gamma prior, the posterior also takes the form of a Gamma distribution
# 2. In the extreme case where the parameters go to zero, the expected value goes to the MLE.
| murphy-book/chapter03/q07.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ```
# BEGIN ASSIGNMENT
# init_cell: false
# check_all_cell: false
# test_files: true
# save_environment: true
# files:
# - data.csv
# ```
#
import matplotlib.pyplot as plt
import numpy as np
# **Question 1.** Assign `x` to the smallest prime number.
#
# ```
# BEGIN QUESTION
# name: q1
# manual: false
# points:
# each: .75
# ```
x = 2 # SOLUTION
## Test ##
isinstance(x, int)
## TEST ##
None
## test ##
0 < x < 100
## HIDDEN TEST ##
x
## hidden test ##
str(print(x))
# **Question 2.** Visualize the answer
#
# ```
# BEGIN QUESTION
# name: q2
# manual: true
# ```
## solution ##
plt.plot(x); # SOLUTION NO PROMPT
""" # BEGIN PROMPT
plt.plot(...);
""" # END PROMPT
# This cell is not part of a question.
y = 3
# **Question 3.** Define `square` and assign `nine` to 3 squared.
#
# ```
# BEGIN QUESTION
# name: q3
# private_points: 2
# ```
# +
def square(x):
y = x * x # SOLUTION
return y # SOLUTION
nine = square(3)
# -
## TEST ##
nine
## TEST ##
square(16)
## hidden test ##
square(1)
# **Question 4.** What does equilateral mean?
#
# ```
# BEGIN QUESTION
# name: q4
# points: 2
# manual: True
# ```
# **SOLUTION**: Having equal side lengths.
# +
# this isn't part of a question
# it's here to make sure that we get a MD cell above to close the export
# of question 4
# -
# **Question 5.** Approximate the area and circumference of a circle with radius 3.
#
# ```
# BEGIN QUESTION
# name: question5
# ```
# +
pi = 3.14
if True:
# BEGIN SOLUTION
radius = 3
area = radius * pi * pi
# END SOLUTION
print('A circle with radius', radius, 'has area', area)
def circumference(r):
# BEGIN SOLUTION NO PROMPT
return 2 * pi * r
# END SOLUTION
""" # BEGIN PROMPT
# Next, define a circumference function.
pass
"""; # END PROMPT
# +
# This question has no tests.
# -
# **Question 6.** Write something
#
# _This question has a custom prompt below, so that prompt should be in the output. It also has no solution!_
#
# ```
# BEGIN QUESTION
# name: question6
# manual: true
# ```
# _Write your thing here._
# **SOLUTION:** some thing
# **Question 7:** What is the answer?
#
# ```
# BEGIN QUESTION
# name: q7
# manual: true
# ```
# <div class="alert alert-danger">
#
# <strong>SOLUTION</strong>: 42
#
# </div>
# **Question 8:** Test intercell seeding by generating 10 random $N(4,2)$ numbers.
#
# ```
# BEGIN QUESTION
# name: q8
# ```
np.random.seed(42) # SEED
z = np.random.normal(4, 2, 10) # SOLUTION
z
## Test ##
len(z) == 10
## Hidden Test ##
np.allclose(z, [3.07316461, 3.06854049, 4.48392454, 0.17343951, 0.55016433,
2.87542494, 1.97433776, 4.62849467, 2.18395185, 1.1753926 ])
# **You're done!**
#
| test/test-assign/generate-otter.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: yjh
# language: python
# name: yjh
# ---
# + pycharm={"is_executing": false, "name": "#%%\n"}
import numpy as np
import torch
from torch import optim
import math
from metric import get_mrr, get_recall
import datetime
from torch.utils.data import DataLoader, TensorDataset
import torch.nn as nn
import torch.nn.functional as F
import torch.nn.init as init
import pickle
from entmax import entmax_bisect
# + pycharm={"is_executing": false, "name": "#%%\n"}
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
torch.cuda.set_device(0)
test64 = pickle.load(open('data/diginetica/test.txt', 'rb'))
train64 = pickle.load(open('data/diginetica/train.txt', 'rb'))
train64_x = train64[1]
train64_y = train64[2]
test64_x = test64[1]
test64_y = test64[2]
train_pos = list()
test_pos = list()
item_set = set()
item_set = set()
for items in train64[1]:
pos = list()
for id_ in range(len(items)):
item_set.add(items[id_])
pos.append(id_ + 1)
pos.append(len(items)+1)
train_pos.append(pos)
for item in train64[2]:
item_set.add(item)
for items in test64[1]:
pos = []
for id_ in range(len(items)):
item_set.add(items[id_])
pos.append(id_ + 1)
pos.append(len(items)+1)
test_pos.append(pos)
for item in test64[2]:
item_set.add(item)
item_list = sorted(list(item_set))
item_dict = dict()
for i in range(1, len(item_set)+1):
item = item_list[i-1]
item_dict[item] = i
# + pycharm={"is_executing": false, "name": "#%%\n"}
train64_x = list()
train64_y = list()
test64_x = list()
test64_y = list()
for items in train64[1]:
new_list = []
for item in items:
new_item = item_dict[item]
new_list.append(new_item)
train64_x.append(new_list)
for item in train64[2]:
new_item = item_dict[item]
train64_y.append(new_item)
for items in test64[1]:
new_list = []
for item in items:
new_item = item_dict[item]
new_list.append(new_item)
test64_x.append(new_list)
for item in test64[2]:
new_item = item_dict[item]
test64_y.append(new_item)
# + pycharm={"is_executing": false, "name": "#%%\n"}
max_length = 0
for sample in train64_x:
max_length = len(sample) if len(sample) > max_length else max_length
for sample in test64_x:
max_length = len(sample) if len(sample) > max_length else max_length
train_seqs = np.zeros((len(train64_x), max_length))
train_poses = np.zeros((len(train64_x), max_length+1))
test_seqs = np.zeros((len(test64_x), max_length))
test_poses = np.zeros((len(test64_x), max_length+1))
for i in range(len(train64_x)):
seq = train64_x[i]
pos = train_pos[i]
length = len(seq)
train_seqs[i][-length:] = seq
train_poses[i][-length-1:] = pos
for i in range(len(test64_x)):
seq = test64_x[i]
pos = test_pos[i]
length = len(seq)
test_seqs[i][-length:] = seq
test_poses[i][-length-1:] = pos
target_seqs = np.array(train64_y)
target_test_seqs = np.array(test64_y)
# + pycharm={"is_executing": false, "name": "#%%\n"}
item_set = set()
for items in train64_x:
for item in items:
item_set.add(item)
for item in train64_y:
item_set.add(item)
for items in test64_x:
for item in items:
item_set.add(item)
for item in test64_y:
item_set.add(item)
# + pycharm={"is_executing": false, "name": "#%%\n"}
train_x = torch.Tensor(train_seqs)
train_pos = torch.Tensor(train_poses)
train_y = torch.Tensor(target_seqs)
test_x = torch.Tensor(test_seqs)
test_pos = torch.Tensor(test_poses)
test_y = torch.Tensor(target_test_seqs)
train_label = torch.Tensor([40841]).repeat(len(train64_x)).unsqueeze(1)
test_label = torch.Tensor([40841]).repeat(len(test64_x)).unsqueeze(1)
# + pycharm={"is_executing": false, "name": "#%%\n"}
train_x = torch.cat((train_x, train_label), 1)
test_x = torch.cat((test_x, test_label), 1)
# + pycharm={"is_executing": false, "name": "#%%\n"}
class DualAttention(nn.Module):
def __init__(self, item_dim, pos_dim, n_items, n_pos, w, atten_way='dot', decoder_way='bilinear', dropout=0,
activate='relu'):
super(DualAttention, self).__init__()
self.item_dim = item_dim
self.pos_dim = pos_dim
dim = item_dim + pos_dim
self.dim = dim
self.n_items = n_items
self.embedding = nn.Embedding(n_items + 1, item_dim, padding_idx=0,max_norm=1.5)
self.pos_embedding = nn.Embedding(n_pos, pos_dim, padding_idx=0, max_norm=1.5)
self.atten_way = atten_way
self.decoder_way = decoder_way
self.atten_w0 = nn.Parameter(torch.Tensor(1, dim))
self.atten_w1 = nn.Parameter(torch.Tensor(dim, dim))
self.atten_w2 = nn.Parameter(torch.Tensor(dim, dim))
self.atten_bias = nn.Parameter(torch.Tensor(dim))
self.w_f = nn.Linear(2*dim, item_dim)
self.dropout = nn.Dropout(dropout)
self.self_atten_w1 = nn.Linear(dim, dim)
self.self_atten_w2 = nn.Linear(dim, dim)
self.LN = nn.LayerNorm(dim)
self.LN2 = nn.LayerNorm(item_dim)
self.is_dropout = True
self.attention_mlp = nn.Linear(dim, dim)
self.alpha_w = nn.Linear(dim, 1)
self.w = w
if activate == 'relu':
self.activate = F.relu
elif activate == 'selu':
self.activate = F.selu
self.initial_()
def initial_(self):
init.normal_(self.atten_w0, 0, 0.05)
init.normal_(self.atten_w1, 0, 0.05)
init.normal_(self.atten_w2, 0, 0.05)
init.constant_(self.atten_bias, 0)
init.constant_(self.attention_mlp.bias, 0)
init.constant_(self.embedding.weight[0], 0)
init.constant_(self.pos_embedding.weight[0], 0)
def forward(self, x, pos):
self.is_dropout = True
x_embeddings = self.embedding(x) # B,seq,dim
pos_embeddings = self.pos_embedding(pos) # B, seq, dim
mask = (x != 0).float() # B,seq
x_ = torch.cat((x_embeddings, pos_embeddings), 2) # B seq, 2*dim
x_s = x_[:, :-1, :] # B, seq-1, 2*dim
alpha_ent = self.get_alpha(x = x_[:, -1, :], number= 0)
m_s, x_n = self.self_attention(x_, x_, x_, mask, alpha_ent)
alpha_global = self.get_alpha(x= m_s, number=1)
global_c = self.global_attention(m_s, x_n, x_s, mask, alpha_global) # B, 1, dim
h_t = global_c
result = self.decoder(h_t, m_s)
return result
def get_alpha(self, x=None, number=None):
if number == 0:
alpha_ent = torch.sigmoid(self.alpha_w(x)) + 1
alpha_ent = self.add_value(alpha_ent).unsqueeze(1)
alpha_ent = alpha_ent.expand(-1, 70, -1)
return alpha_ent
if number == 1:
alpha_global = torch.sigmoid(self.alpha_w(x)) + 1
alpha_global = self.add_value(alpha_global)
return alpha_global
def add_value(self, value):
mask_value = (value ==1).float()
value = value.masked_fill(mask_value == 1, 1.00001)
return value
def self_attention(self, q, k, v, mask=None, alpha_ent = 1):
if self.is_dropout:
q_ = self.dropout(self.activate(self.attention_mlp(q)))
else:
q_ = self.activate(self.attention_mlp(q))
scores = torch.matmul(q_, k.transpose(1, 2)) / math.sqrt(self.dim)
if mask is not None:
mask = mask.unsqueeze(1).expand(-1, q.size(1), -1)
scores = scores.masked_fill(mask == 0, -np.inf)
alpha = entmax_bisect(scores, alpha_ent, dim=-1)
att_v = torch.matmul(alpha, v) # B, seq, dim
if self.is_dropout:
att_v = self.dropout(self.self_atten_w2(self.activate(self.self_atten_w1(att_v)))) + att_v
else:
att_v = self.self_atten_w2(self.activate(self.self_atten_w1(att_v))) + att_v
att_v = self.LN(att_v)
c = att_v[:, -1, :].unsqueeze(1)
x_n = att_v[:, :-1, :]
return c, x_n
def global_attention(self,target,k, v, mask=None, alpha_ent=1):
alpha = torch.matmul(
torch.relu(k.matmul(self.atten_w1) + target.matmul(self.atten_w2) + self.atten_bias),
self.atten_w0.t()) # (B,seq,1)
if mask is not None:
mask = mask.unsqueeze(-1)
mask = mask[:, :-1, :]
alpha = alpha.masked_fill(mask == 0, -np.inf)
alpha = entmax_bisect(alpha, alpha_ent, dim=1)
c = torch.matmul(alpha.transpose(1, 2), v) # (B, 1, dim)
return c
def decoder(self, global_c, self_c):
if self.is_dropout:
c = self.dropout(torch.selu(self.w_f(torch.cat((global_c, self_c), 2))))
else:
c = torch.selu(self.w_f(torch.cat((global_c, self_c), 2)))
c = c.squeeze()
l_c = (c/torch.norm(c, dim=-1).unsqueeze(1))
l_emb = self.embedding.weight[1:-1]/torch.norm(self.embedding.weight[1:-1], dim=-1).unsqueeze(1)
z = self.w * torch.matmul(l_c, l_emb.t())
return z
def predict(self, x, pos, k=20):
self.is_dropout = False
x_embeddings = self.embedding(x) # B,seq,dim
pos_embeddings = self.pos_embedding(pos) # B, seq, dim
mask = (x != 0).float() # B,seq
x_ = torch.cat((x_embeddings, pos_embeddings), 2) # B seq, 2*dim
x_s = x_[:, :-1, :] # B, seq-1, 2*dim
alpha_ent = self.get_alpha(x = x_[:, -1, :], number= 0)
m_s, x_n = self.self_attention(x_, x_, x_, mask, alpha_ent)
alpha_global = self.get_alpha(x= m_s, number=1)
global_c = self.global_attention(m_s, x_n, x_s, mask, alpha_global) # B, 1, dim
h_t = global_c
result = self.decoder(h_t, m_s)
rank = torch.argsort(result, dim=1, descending=True)
return rank[:, 0:k]
# + pycharm={"is_executing": false, "name": "#%%\n"}
w_list = [20]
record = list()
for w in w_list:
np.random.seed(1)
torch.manual_seed(1)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
train_sets = TensorDataset(train_x.long(), train_pos.long(), train_y.long())
train_dataload = DataLoader(train_sets, batch_size=512, shuffle=True)
criterion = nn.CrossEntropyLoss().cuda()
test_x, test_pos, test_y = test_x.long(), test_pos.long(), test_y.long()
all_test_sets = TensorDataset(test_x, test_pos, test_y)
test_dataload = DataLoader(all_test_sets, batch_size=512,shuffle=False)
model = DualAttention(100, 100, 40842, 71, w, dropout=0.5, activate='relu').cuda()
opti = optim.Adam(model.parameters(), lr=0.001, weight_decay=0, amsgrad=True)
best_result = 0
total_time = 0
best_result_5 = 0
best_result_ = []
# for epoch in range(50):
# start_time = datetime.datetime.now()
# print(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
# losses = 0
# for step, (x_train, pos_train, y_train) in enumerate(train_dataload):
# opti.zero_grad()
# q = model(x_train.cuda(), pos_train.cuda())
# loss = criterion(q, y_train.cuda()-1)
# loss.backward()
# opti.step()
# losses += loss.item()
# if (step + 1) % 100 == 0:
# print("[%02d/%d] [%03d/%d] mean_loss : %0.2f" % (epoch, 50, step, len(train_sets) / 512, losses / step + 1))
# end_time = datetime.datetime.now()
# with torch.no_grad():
# y_pre_all = torch.LongTensor().cuda()
# y_pre_all_10 = torch.LongTensor()
# y_pre_all_5 = torch.LongTensor()
# for x_test, pos_test, y_test in test_dataload:
# with torch.no_grad():
# y_pre = model.predict(x_test.cuda(), pos_test.cuda(), 20)
# y_pre_all = torch.cat((y_pre_all, y_pre), 0)
# y_pre_all_10 = torch.cat((y_pre_all_10, y_pre.cpu()[:, :10]), 0)
# y_pre_all_5 = torch.cat((y_pre_all_5, y_pre.cpu()[:, :5]), 0)
# recall = get_recall(y_pre_all, test_y.cuda().unsqueeze(1)-1)
# recall_10 = get_recall(y_pre_all_10, test_y.unsqueeze(1)-1)
# recall_5 = get_recall(y_pre_all_5, test_y.unsqueeze(1)-1)
# mrr = get_mrr(y_pre_all, test_y.cuda().unsqueeze(1)-1)
# mrr_10 = get_mrr(y_pre_all_10, test_y.unsqueeze(1)-1)
# mrr_5 = get_mrr(y_pre_all_5, test_y.unsqueeze(1)-1)
#
# print("Recall@20: " + "%.4f" %recall + " Recall@10: " + "%.4f" %recall_10 +" Recall@5:" + "%.4f" %recall_5)
# print("MRR@20:" + "%.4f" % mrr.tolist() + " MRR@10:" + "%.4f" % mrr_10.tolist() + " MRR@5:" + "%.4f" % mrr_5.tolist())
# if best_result < recall:
# best_result = recall
# best_result_ = [recall_5, recall_10, recall, mrr_5, mrr_10, mrr]
# torch.save(model.state_dict(), 'BestModel/best_dn_w_%s.pth' % str(w))
# print("best result: " + str(best_result))
# print("==================================")
# record.append(best_result_)
# print(record)
# + pycharm={"is_executing": false, "name": "#%%\n"}
model = DualAttention(100, 100, 40842, 71, 20, atten_way='MLP', decoder_way='trilinear2', dropout=0.5, activate='relu').cuda()
model.load_state_dict(torch.load('BestModel/best_dn_w_20.pth'))
# + pycharm={"is_executing": false, "name": "#%%\n"}
with torch.no_grad():
y_pre_all = torch.LongTensor().cuda()
y_pre_all_10 = torch.LongTensor()
y_pre_all_5 = torch.LongTensor()
for x_test, pos_test, y_test in test_dataload:
with torch.no_grad():
y_pre = model.predict(x_test.cuda(), pos_test.cuda(), 20)
y_pre_all = torch.cat((y_pre_all, y_pre), 0)
y_pre_all_10 = torch.cat((y_pre_all_10, y_pre.cpu()[:, :10]), 0)
y_pre_all_5 = torch.cat((y_pre_all_5, y_pre.cpu()[:, :5]), 0)
recall = get_recall(y_pre_all, test_y.cuda().unsqueeze(1)-1)
recall_10 = get_recall(y_pre_all_10, test_y.unsqueeze(1)-1)
recall_5 = get_recall(y_pre_all_5, test_y.unsqueeze(1)-1)
mrr = get_mrr(y_pre_all, test_y.cuda().unsqueeze(1)-1)
mrr_10 = get_mrr(y_pre_all_10, test_y.unsqueeze(1)-1)
mrr_5 = get_mrr(y_pre_all_5, test_y.unsqueeze(1)-1)
print("Recall@20: " + "%.4f" %recall + " Recall@10: " + "%.4f" %recall_10 +" Recall@5:" + "%.4f" %recall_5)
print("MRR@20:" + "%.4f" % mrr.tolist() + " MRR@10:" + "%.4f" % mrr_10.tolist() + " MRR@5:" + "%.4f" % mrr_5.tolist())
# + pycharm={"is_executing": false, "name": "#%%\n"}
# + pycharm={"name": "#%%\n"}
| DualAdaptiveTrain.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.7
# language: python
# name: python3.7
# ---
# %load_ext autoreload
# %autoreload 2
import os,sys
sys.path.insert(0,"..")
from glob import glob
import matplotlib.pyplot as plt
import numpy as np
import torch
import torchvision
import torchxrayvision as xrv
d_nih = xrv.datasets.NIH_Dataset(imgpath="/lustre04/scratch/cohenjos/NIH/images-224",
views=["PA","AP"], unique_patients=False)
d_nih
d_nih.csv.iloc[0]
sample = d_nih[0]
plt.imshow(sample["img"][0], cmap="Greys_r");
dict(zip(d_nih.pathologies,sample["lab"]))
d_pc = xrv.datasets.PC_Dataset(imgpath="/lustre04/scratch/cohenjos/PC/images-224",
views=["PA","AP"], unique_patients=False)
d_pc
sample = d_pc[0]
tr = xrv.datasets.XRayResizer(224)
data_transforms = torchvision.transforms.Compose([
xrv.datasets.ToPILImage(),
torchvision.transforms.RandomAffine(45, translate=(0.15, 0.15), scale=(0.85, 1.15)),
torchvision.transforms.ToTensor()
])
a = data_transforms(tr(sample["img"]))
plt.imshow(a[0], cmap="Greys_r")
d_chex = xrv.datasets.CheX_Dataset(imgpath="/lustre03/project/6008064/jpcohen/chexpert/CheXpert-v1.0-small",
csvpath="/lustre03/project/6008064/jpcohen/chexpert/CheXpert-v1.0-small/train.csv",
views=["PA","AP"], unique_patients=False)
d_chex
d_nih2 = xrv.datasets.NIH_Google_Dataset(imgpath="/lustre04/scratch/cohenjos/NIH/images-224",
views=["PA","AP"], unique_patients=False)
d_nih2
sample = d_nih2[0]
plt.imshow(sample["img"][0], cmap="Greys_r");
dict(zip(d_nih.pathologies,sample["lab"]))
d_mimic_chex = xrv.datasets.MIMIC_Dataset(#datadir="/lustre03/project/6008064/jpcohen/MIMICCXR-2.0/files",
imgpath="/lustre04/scratch/cohenjos/MIMIC/images-224/files",
csvpath="/lustre03/project/6008064/jpcohen/MIMICCXR-2.0/mimic-cxr-2.0.0-chexpert.csv.gz",
metacsvpath="/lustre03/project/6008064/jpcohen/MIMICCXR-2.0/mimic-cxr-2.0.0-metadata.csv.gz",
views=["PA","AP"], unique_patients=False)
d_mimic_chex
d_mimic_negbio = xrv.datasets.MIMIC_Dataset(#datadir="/lustre03/project/6008064/jpcohen/MIMICCXR-2.0/files",
imgpath="/lustre04/scratch/cohenjos/MIMIC/images-224/files",
csvpath="/lustre03/project/6008064/jpcohen/MIMICCXR-2.0/mimic-cxr-2.0.0-negbio.csv.gz",
metacsvpath="/lustre03/project/6008064/jpcohen/MIMICCXR-2.0/mimic-cxr-2.0.0-metadata.csv.gz",
views=["PA","AP"], unique_patients=False)
d_mimic_negbio
sample = d_mimic_chex[15011]
plt.imshow(sample["img"][0], cmap="Greys_r");
dict(zip(d_mimic_chex.pathologies,sample["lab"]))
import torchvision, torchvision.transforms
transform = torchvision.transforms.Compose([xrv.datasets.XRayCenterCrop(),xrv.datasets.XRayResizer(224)])
d_openi = xrv.datasets.Openi_Dataset(imgpath="/lustre03/project/6008064/jpcohen/OpenI/images",
transform=transform,
unique_patients=False)
d_openi
sample = d_openi[0]
plt.imshow(sample["img"][0], cmap="Greys_r");
dict(zip(d_openi.pathologies,sample["lab"]))
#RSNA_Pneumonia_Dataset
d_rsna = xrv.datasets.RSNA_Pneumonia_Dataset(imgpath="/home/cohenjos/projects/rpp-bengioy/jpcohen/kaggle-pneumonia/stage_2_train_images_jpg",
views=["PA","AP"],
unique_patients=False)
d_rsna
pathologies = [ 'Atelectasis',
'Consolidation',
'Infiltration',
'Pneumothorax',
'Edema',
'Emphysema',
'Fibrosis',
'Effusion',
'Pneumonia',
'Pleural_Thickening',
'Cardiomegaly',
'Nodule',
'Mass',
'Hernia',
'Lung Lesion',
'Fracture',
'Lung Opacity',
'Enlarged Cardiomediastinum'
]
print(d_nih.__class__)
xrv.datasets.relabel_dataset(pathologies, d_nih)
print(d_pc.__class__)
xrv.datasets.relabel_dataset(pathologies, d_pc)
print(d_chex.__class__)
xrv.datasets.relabel_dataset(pathologies, d_chex)
print(d_mimic_chex.__class__)
xrv.datasets.relabel_dataset(pathologies, d_mimic_chex)
dd = xrv.datasets.Merge_Dataset([d_nih, d_pc, d_chex, d_mimic_chex])
dd
| scripts/xray_datasets.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
print("Hello World")
# +
firstName = "Shakeel"
lastName = "Haider"
email = "<EMAIL>";
print("Name : " + firstName + " " + lastName)
print("Contact : " + email)
# +
fNum = 43;
lNum = 3;
sum = fNum + lNum;
print("Total is " + str(sum))
# +
fnum = 3.3;
lnum = 4.7;
sum = fnum + lnum;
print("total is " + str(sum))
# -
print("Reminder !! ...variables naming rule")
# +
num1 = 12;
num2 = 13;
x = num1 + num2;
num1 += 3;
z = num1 / num2;
a = num1 * num2;
print(x);
print(num1);
print(z);
print(a);
# +
num1 = 1 + 4 * 3 / 2;
num2 = (1 + 4) * 3 / 2;
num3 = 1 + (4 * 3) / 2;
print("python follow precedence rules");
print(num1);
print(num2);
print(num3);
# +
name = "Shakeel";
language = "Pashto";
city = "karachi";
print("My name is " + str(name) + " and i'm living in " + str(city));
# +
num1 = 3;
num2 = 5;
if num1 == num2:
print("they are equal");
elif num1 < num2:
print("first number is less than second number");
else:
print("first number is greater than second number");
# +
name1 = "Shakeel";
shakeelMarks = 55;
name2 = "zeeshan";
zeeshanMarks = 75;
if shakeelMarks == zeeshanMarks:
print("Draw !!");
elif shakeelMarks < zeeshanMarks:
print(str(name1) + " win !!");
else:
print(str(name2) + " win !!");
# +
name1 = "Shakeel";
shakeelMarks = 55;
name2 = "zeeshan";
zeeshanMarks = 75;
if shakeelMarks == zeeshanMarks:
print("Draw !!");
elif shakeelMarks < zeeshanMarks:
print(str(name1) + " win !!");
else:
print(str(name2) + " win !!");
# +
shakeelWeight = 90;
nomanWeight = 72;
dawoodWeight = 53;
sAge = 24;
nAge = 19;
dAge = 23;
if shakeelWeight > 75 and sAge >20:
print("join gym");
elif dawoodWeight < 65 or (dAge < 30 and dAge > 18):
print("problem !!");
# +
name = "shakeel";
studentId = 1735;
if name == "shakeel":
print("name is cleared !!");
if studentId == 1735:
print("identified");
else:
print("wrong person");
else:
print("wrong person");
# +
# addition will takes place after multiplication and addition
num1 = 1 + 4 * 3 / 2;
# same as 5 * 3 /2
num2 = (1 + 4) * 3 / 2;
# same as 1+12/2
num3 = 1 + (4 * 3) / 2;
print("python follow precedence rules");
# this should produce 7.5
print(num1);
print(num2);
print(num3);
# +
foods = [ "Nihari", "Biryani", "Karahi", "Qorma", "Salad", "Kabab", "Polaow" ];
print(str(foods[1]) + " != " + str(foods[6]));
# +
cities = ["karachi", "Miltan", "Faisalabad"]
print(cities);
cities.append("Mardan")
print("After append(Mardan) ")
print(cities)
cities.insert(0, "Peshawar")
print("After insert(0, Peshawar)")
print(cities)
# +
players = ["Ronaldo", "Drogba", "Lempard", "Messi", "Isco", "Hazard",]
print(players)
print("Best Players Are :")
print(players[1:4])
# +
players = ["Messi", "Suarez", "Coutinho", "Dembele", "Pique"]
print(players)
del players[4]
print(players)
players.remove("Dembele")
print(players)
# +
players = ["Messi", "Suarez", "Coutinho", "Dembele", "Pique"]
print(players)
defenders = players.pop(4)
print("Attackers are :")
print(players)
# +
Tuple1 = ("Karachi", "Lahore", "Islamabad", "Peshawar", "Mardan");
print (Tuple1);
for a in Tuple1:
print("City Name: " + a);
# +
players = ["Messi", "Ronaldo", "Neymar", "Hazard", "Mbappe", "Kimich", "Lewandolski"]
best_players = ["Messi", "Ronaldo", "Hazard"]
for a in players:
if a == best_players:
print("This is one of tha Best Players")
print(a)
else
print("No Match")
# -
for i in range(1,10):
for j in range(1,10):
k = i*j
print (k, end=' ')
print()
# +
print('Enter your name:');
x = input();
print ('Enter you Age');
a = input();
print('Hello, ' + x +' Your age is: ' + a);
print("Hello, " + str(x) + " Your age is: " + str(a));
# +
string = "THIS SHOULD ALL BE LOWERCASE."
print(string.lower())
string = "this should all be uppercase."
print(string.upper())
string = "ThIs ShOuLd Be MiXeD cAsEd."
print(string.swapcase())
# +
BioData = {
"Name": "<NAME>",
"Age": "24",
"DOB": "06/08/1994",
"Location": "Karachi"
}
print(BioData)
# +
Data = {
"Name:": "<NAME>",
"ID:": "1500-2015",
"Age:": "21",
"DOB:": "29/05/1997",
"Location:": "Karachi"
}
for a,b in Data.items():
print(a,b)
# -
Names = {
"Shakeel": "1735-2015",
"Usman": "2370-2015",
"Sohail": "1432-2015"
}
print("ID for Shakeel is " + Names["Shakeel"])
for k,v in Names.items():
print(k,v)
# +
print("Integer Dictionary:")
IntDict = {1: "Pakistan", 2: "India", 3: "Sweden", 4: "Iraq"}
print(IntDict)
print("String Dictionary")
St1Dict = {"Country": "Iraq"}
print(St1Dict)
# +
Student = {'name':'Shakeel', 'age': 24}
Student
print(Student)
Student['Location'] = 'Karachi'
print(Student)
# -
players = { "name" : "Messi", "age" : 32, "goals": 800, "cap" : 700}
print(players)
players["age"] = 33
print(players)
players["goals"] = 812
print(players)
players = { "name" : "Messi", "age" : 32, "goals": 800, "cap" : 700}
players
for v in players.values():
print(v)
# +
players = { "name" : "Messi", "age" : 32, "goals": 800, "cap" : 700}
players.keys()
for k in players.keys():
print(k)
# +
players = { "name" : "Messi", "age" : 32, "goals": 800, "cap" : 700}
print(players)
for k,v in players.items():
print(k,v)
# +
players = [
{
'name': 'messi',
'age': 33,
'goals': 814
},
{
'name': 'ronaldo',
'age': 46,
'goals': 77
}
]
print(players)
# +
players = [
{
'name': 'messi',
'age': 33,
'goals': 814
},
{
'name': 'ronaldo',
'age': 46,
'goals': 77
}
]
print(players)
best = players[0]
most_goals = best['goals']
print(most_goals)
# +
Listofnums = [1, 2, 3, 4, 5, 6,7,8,9]
x = len(Listofnums)
print("length of list is " + str(x))
customers = [
{
"customer id": 0,
"first name":"Shakeel",
"last name": "Haider",
}
]
new_customer_id = len(customers)
new_first_name = "Zeeshan"
new_last_name = "Khan"
new_dictionary = {
"customer id": new_customer_id,
"first name": new_first_name,
"last name:": new_last_name,
}
customers.append(new_dictionary)
print(customers)
# +
dict = {
"population": 1000,
"group sizes": [47600, 45600, 20851],
}
print("This is dictionary")
print(dict)
product_lists = {
"products": ["Fanta", "Sprite", "Pakola","Pepsi", "7up", "Coca‑Cola" ],
"product qtys": [5, 15, 6, 13, 22, 45, 24],
}
print("This is List")
print(product_lists)
# -
details = {
"nickname": "Shakeel",
"married": "no",
"careers": ["Web Developer", "Software Engineer", "CEO"],
}
if "Web Developer" in details["careers"]:
print("yes!")
else:
print("no")
# +
customers = [
{
"customer id": 0,
"first name":"Shakeel",
"last name": "Haider",
"address": "S.I.T.E Town",
},
{
"customer id": 1,
"first name":"Zeeshan",
"last name": "Khan",
"address": "Orangi Town",
},
{
"customer id": 2,
"first name":"Ibrar",
"last name": "Shah",
"address": "Mardan",
},]
print(str(customers))
# -
customers = {
0: {
"First name":"Shakeel",
"Middle name": "Haider",
"Last name": "Mashwani",
"Address": "S.I.T.E Town",
},
1: {
"First Name":"Bilal",
"Middle name": "Khan",
"Last Name": "Mashwani",
"Address": "Mardan",
},
2: {
"First Name":"Yasir",
"Middle name": "Shah",
"Last Name": "<NAME>",
"Address": "Karachi",
},
}
print(customers[0])
print(customers[1])
print(customers[2])
# +
first_number = 6
second_number = 8
total = first_number + second_number
print("Total no without function", total)
def add_numbers():
first_number = 9
second_number = 3
total = first_number + second_number
print("Total number with function", total)
# +
def addition1():
num1 = 3
num2 = 4
sum = num1 + num2
print(sum)
def addition(num1, num2):
sum = num1 + num2
print(sum)
addition1()
addition(4,5)
# +
def addition(num1=0, num2=0):
sum = num1 + num2
print(sum)
addition()
addition(3,5)
# +
def addition(num1=0, num2=0):
sum = num1 + num2
print(sum)
addition()
addition(4,23)
# +
def greeting(name="s"):
if(name == "s"):
print("You didnt write your name")
else:
print("Hello " + str(name) + " !!!")
var = input("Enter your name or write s")
greeting(var)
# +
def display_result(winner, score, **other_info):
print("The winner was " + winner)
print("The score was " + score)
for key, value in other_info.items():
print(key + ": " + value)
team = "Barcelona"
goals = "6-0"
injures = "none"
substitute = 3
display_result(team, goals)
# +
def addition(val1, val2):
return "Hi " + str(val1) + str(val2)
val1 = input("Enter First Name")
val2 = input("Enter Last Name")
sums = addition(val1, val2)
print(sums)
# +
def add_Numbers(num1 , num2):
return num1 + num2
def subtract_Numbers(num1, num2):
return num1 - num2
def multi_Numbers(num1, num2):
return num1 * num2
def devide_Numbers(num1, num2):
return num1 / num2
x = int(input("Enter First Number"))
y = int(input("Enter Second Number"))
addition = add_Numbers(x , y)
subtraction = subtract_Numbers(x, y)
multiplication = multi_Numbers(x, y)
division = devide_Numbers(x, y)
print(addition)
print(subtraction)
print(multiplication)
print(division)
# +
#global Variable
x = 100
def fun(x):
#local scope of variable
x = 3 + 2
return x
local = fun(x)
print(local)
print(x)
# +
def fun():
def extra():
print("Hello ")
def extra1():
print("Shakeel")
fun1 = extra()
fun2 = extra1()
fun()
# -
num = 6
val = 0
while val!=num:
print(val)
val += 1
# +
num = 0
while num != 5:
if(num == 2):
print("num now = 2")
else:
print(num)
num = num + 1
print(num)
# +
class Employee:
empCount = 0
def __init__(self, name, salary):
self.name = name
self.salary = salary
Employee.empCount += 1
def displayCount(self):
print("Total Employee %d" % Employee.empCount)
def displayEmployee(self):
print("Name : ", self.name, ", Salary: ", self.salary)
# This would create first object of Employee class
emp1 = Employee("Shakeel", 2000)
# This would create second object of Employee class
emp2 = Employee("Salman", 5000)
emp1.displayEmployee()
emp2.displayEmployee()
# +
class Employee:
empCount = 0
def __init__(self, name, salary):
self.name = name
self.salary = salary
Employee.empCount += 1
def displayCount(self):
print("Total Employee %d" % Employee.empCount)
def displayEmployee(self):
print("Name : ", self.name, ", Salary: ", self.salary)
# This would create first object of Employee class
emp1 = Employee("Shakeel", 2000)
# This would create second object of Employee class
emp2 = Employee("Salman", 5000)
emp1.displayEmployee()
emp2.displayEmployee()
# +
class Employee:
empCount = 0
def __init__(self, name, salary):
self.name = name
self.salary = salary
Employee.empCount += 1
def displayCount(self):
print("Total Employee %d" % Employee.empCount)
def displayEmployee(self):
print("Name : ", self.name, ", Salary: ", self.salary)
# This would create first object of Employee class
emp1 = Employee("Shakeel", 2000)
# This would create second object of Employee class
emp2 = Employee("Salman", 5000)
emp1.displayEmployee()
emp2.displayEmployee()
# +
class Employee:
empCount = 0
def __init__(self, name, salary):
self.name = name
self.salary = salary
Employee.empCount += 1
def displayCount(self):
print("Total Employee %d" % Employee.empCount)
def displayEmployee(self):
print("Name : ", self.name, ", Salary: ", self.salary)
# This would create first object of Employee class
emp1 = Employee("Shakeel", 2000)
# This would create second object of Employee class
emp2 = Employee("Salman", 5000)
emp1.displayEmployee()
emp2.displayEmployee()
# +
class Employee:
empCount = 0
def __init__(self, name, salary, Age):
self.name = name
self.salary = salary
self.Age = Age
Employee.empCount += 1
def displayCount(self):
print("Total Employee %d" % Employee.empCount)
def displayEmployee(self):
print("Name : "+ str(self.name)+ ", Salary: "+ str(self.salary)+ ", Age: "+ str(self.Age))
# This would create first object of Employee class
emp1 = Employee("Shakeel", 2000, 24)
# This would create second object of Employee class
emp2 = Employee("Salman", 5000, 27)
emp1.displayEmployee()
emp2.displayEmployee()
# +
class Employee:
empCount = 0
def __init__(self, name, salary, Age):
self.name = name
self.salary = salary
self.Age = Age
Employee.empCount += 1
def displayCount(self):
print("Total Employee %d" % Employee.empCount)
def displayEmployee(self):
print("Name : "+ str(self.name)+ ", Salary: "+ str(self.salary)+ ", Age: "+ str(self.Age))
# This would create first object of Employee class
emp1 = Employee("Shakeel", 2000, 24)
# This would create second object of Employee class
emp2 = Employee("Salman", 5000, 27)
name1 = emp1.name
name2 = emp2.name
age1 = emp1.Age
age2 = emp2.Age
print(name1)
print(age1)
print(name2)
print(age2)
# +
class Employee:
empCount = 0
def __init__(self, name, salary, Age):
self.name = name
self.salary = salary
self.Age = Age
Employee.empCount += 1
def displayCount(self):
print("Total Employee %d" % Employee.empCount)
def displayEmployee(self):
print("Name : "+ str(self.name)+ ", Salary: "+ str(self.salary)+ ", Age: "+ str(self.Age))
# This would create first object of Employee class
emp1 = Employee("Shakeel", 2000, 24)
# This would create second object of Employee class
emp2 = Employee("Salman", 5000, 27)
name1 = emp1.name
name2 = emp2.name
age1 = emp1.Age
age2 = emp2.Age
print(name1)
print(age1)
print(name2)
print(age2)
# +
class Employee:
empCount = 0
def __init__(self, name, salary, Age):
self.name = name
self.salary = salary
self.Age = Age
Employee.empCount += 1
def displayCount(self):
print("Total Employee %d" % Employee.empCount)
def displayEmployee(self):
print("Name : "+ str(self.name)+ ", Salary: "+ str(self.salary)+ ", Age: "+ str(self.Age))
# This would create first object of Employee class
emp1 = Employee("Shakeel", 2000, 24)
# This would create second object of Employee class
emp2 = Employee("Salman", 5000, 27)
name1 = emp1.name
name2 = emp2.name
age1 = emp1.Age
age2 = emp2.Age
print(name1)
print(age1)
print(name2)
print(age2)
# +
class Employee:
empCount = 0
def __init__(self, name, salary, Age):
self.name = name
self.salary = salary
self.Age = Age
Employee.empCount += 1
def displayCount(self):
print("Total Employee %d" % Employee.empCount)
def displayEmployee(self):
print("Name : "+ str(self.name)+ ", Salary: "+ str(self.salary)+ ", Age: "+ str(self.Age))
def changeAge(self, newAge):
self.Age = newAge
# This would create first object of Employee class
emp1 = Employee("Shakeel", 2000, 24)
# This would create second object of Employee class
emp2 = Employee("Salman", 5000, 27)
name1 = emp1.name
name2 = emp2.name
age1 = emp1.Age
age2 = emp2.Age
print(name1)
print(age1)
print(name2)
print(age2)
emp1.changeAge(45)
print(str(emp1.Age))
# -
| Smart Way To Learn Python - Solution.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda_mxnet_p36
# language: python
# name: conda_mxnet_p36
# ---
# # Mphasis HyperGraf Hotel Reservation Cancellation Predictor
#
# Mphasis HyperGraf Hotel Reservation Cancellation Predictor is an Ensemble Machine Learning algorithm-based solution which predicts the probability of guests cancelling their hotel reservations. Cancellation of scheduled hotel stays is a significant challenge for hospitality companies.Cancellations lead to erroneous demand estimation, room pricing and revenue management. This solution predicts the likelihood of guests cancelling their hotel reservations based on guests' booking information. The Solution assists hospitality companies to maximize occupancy and revenue per available room.
#
# ### Prerequisite
#
# To run this algorithm you need to have access to the following AWS Services:
# - Access to AWS SageMaker and the model package.
# - An S3 bucket to specify input/output.
# - Role for AWS SageMaker to access input/output from S3.
#
# This sample notebook shows you how to deploy Mphasis HyperGraf Hotel Reservation Cancellation Predictor using Amazon SageMaker.
#
# > **Note**: This is a reference notebook and it cannot run unless you make changes suggested in the notebook.
#
# #### Pre-requisites:
# 1. **Note**: This notebook contains elements which render correctly in Jupyter interface. Open this notebook from an Amazon SageMaker Notebook Instance or Amazon SageMaker Studio.
# 1. Ensure that IAM role used has **AmazonSageMakerFullAccess**
# 1. To deploy this ML model successfully, ensure that:
# 1. Either your IAM role has these three permissions and you have authority to make AWS Marketplace subscriptions in the AWS account used:
# 1. **aws-marketplace:ViewSubscriptions**
# 1. **aws-marketplace:Unsubscribe**
# 1. **aws-marketplace:Subscribe**
# 2. or your AWS account has a subscription to Mphasis HyperGraf Hotel Reservation Cancellation Predictor. If so, skip step: [Subscribe to the model package](#1.-Subscribe-to-the-model-package)
#
# #### Contents:
# 1. [Subscribe to the model package](#1.-Subscribe-to-the-model-package)
# 2. [Create an endpoint and perform real-time inference](#2.-Create-an-endpoint-and-perform-real-time-inference)
# 1. [Create an endpoint](#A.-Create-an-endpoint)
# 2. [Create input payload](#B.-Create-input-payload)
# 3. [Perform real-time inference](#C.-Perform-real-time-inference)
# 4. [Output Result](#D.-Output-Result)
# 5. [Delete the endpoint](#E.-Delete-the-endpoint)
# 3. [Perform batch inference](#3.-Perform-batch-inference)
# 4. [Clean-up](#4.-Clean-up)
# 1. [Delete the model](#A.-Delete-the-model)
# 2. [Unsubscribe to the listing (optional)](#B.-Unsubscribe-to-the-listing-(optional))
#
#
# #### Usage instructions
# You can run this notebook one cell at a time (By using Shift+Enter for running a cell).
# ### 1. Subscribe to the model package
# To subscribe to the model package:
# 1. Open the model package listing page Mphasis HyperGraf Hotel Reservation Cancellation Predictor
# 1. On the AWS Marketplace listing, click on the **Continue to subscribe** button.
# 1. On the **Subscribe to this software** page, review and click on **"Accept Offer"** if you and your organization agrees with EULA, pricing, and support terms.
# 1. Once you click on **Continue to configuration button** and then choose a **region**, you will see a **Product Arn** displayed. This is the model package ARN that you need to specify while creating a deployable model using Boto3. Copy the ARN corresponding to your region and specify the same in the following cell.
model_package_arn='arn:aws:sagemaker:us-east-2:786796469737:model-package/hotel-cancel-v1'
import pandas as pd
import numpy as np
import json
import os
import boto3
from zipfile import ZipFile
import uuid
from sagemaker import ModelPackage
import sagemaker as sage
from sagemaker import get_execution_role
from sagemaker import ModelPackage
from IPython.display import Image, display
# +
role = get_execution_role()
sagemaker_session = sage.Session()
bucket=sagemaker_session.default_bucket()
bucket
# -
# ### 2. Create an endpoint and perform real-time inference
# If you want to understand how real-time inference with Amazon SageMaker works, see [Documentation](https://docs.aws.amazon.com/sagemaker/latest/dg/how-it-works-hosting.html).
# +
model_name='hotel-cancel'
content_type='text/csv'
real_time_inference_instance_type='ml.m5.large'
batch_transform_inference_instance_type='ml.m5.large'
# -
# #### A. Create an endpoint
# +
def predict_wrapper(endpoint, session):
return sage.predictor.RealTimePredictor(endpoint, session,content_type)
#create a deployable model from the model package.
model = ModelPackage(role=role,
model_package_arn=model_package_arn,
sagemaker_session=sagemaker_session,
predictor_cls=predict_wrapper)
# -
predictor = model.deploy(1, real_time_inference_instance_type, endpoint_name=model_name)
# Once endpoint has been created, you would be able to perform real-time inference.
# #### B. Create input payload
# #### Instructions
#
# 1) Supported content types: 'csv' file only
#
# 2) Mandatory fields: Guest_Id, hotel_location_type,lead_time, expected_arrival_month, expected_weekend_stays, expected_weekday_stays, adults, children, babies,meal_plan , guest_market_segment, repeat_guest, previous_cancellations, previous_bookings_not_canceled, booking_modifications,reservation_queue,average_daily_rate, required_car_parking_spaces, special_requests.
#
# 3) Input field descriptions:
#
# * Guest_Id: Numeric or alpha-numeric value to uniquely identify a guest. For example, ‘G01’. (Categorical)
#
# * hotel_location_type: If the location of the hotel is in city then value is ‘City Destination’, Else. if the location is a holiday spot then value is ‘Holiday Destination’. (Categorical)
#
# * lead_time: Number of days between the date of reservation and the expected date of arrival. For example, If confirmation date is July 1 and appointment date is July 30, then Lead_Time value is ‘29’. (Numeric)
#
# * expected_arrival_month: Month of expected arrival date. The values can be ‘January’ , February‘, ‘March’,’ April’, ‘May’, ’June’, ’July’, ‘August’, ’September’, ’October’, ’November’, ’December’. (Categorical)
#
# * expected_weekend_stays: Number of weekend nights (Saturday or Sunday) the guest booked to stay at the hotel. (Numeric)
#
# * expected_weekday_stays: Number of week day nights (Monday to Friday) the guest booked to stay at the hotel. (Numeric)
#
# * adults : Number of adults. (Numeric)
#
# * children : Number of children. (Numeric)
#
# * babies : Number of babies. (Numeric)
#
# * meal_plan: Type of meal booked.
#
# If the meal plan is not confirm, the value is ‘Undefined’, Else.
#
# If the meal plan is Self-Catering, the value is ‘SC’, Else.
#
# If the meal plan is of type Bed and Breakfast, the value is ‘BB’, Else.
#
# If the meal plan is of type Half Board (breakfast + 1 other meal), the value is ‘HB’, Else.
#
# If the meal plan is of type Full Board (breakfast + lunch+ dinner) , the value is ‘FB’. (Categorical)
#
# * guest_market_segment : segment to which the guest belong to.
#
# If the market-segment is Online Travel Agent/ Travel Operator, then the value is ‘Online TA’, Else.
#
# If the market-segment is Offline Travel Agent/ Travel Operator, then the value is ‘Offline TA’, Else.
#
# If the guest market-segment is Groups type, then the value is ‘Groups’, Else.
#
# If the guest market-segment is Direct type, the value is ‘Direct’, Else.
#
# If the guest market-segment is Corporate type, the value is ‘Corporate’, Else.
#
# If the guest market-segment is Aviation type, the value is ‘Aviation’, Else.
#
# If the guest market-segment is Complimentary type, the value is ‘Complimentary’, Else.
#
# If the guest market-segment is Undefined, the value is ‘Undefined’. (Categorical)
#
# * repeat_guest - If a guest has previous reservations in the hotel or stayed at the hotel in past, the value is ‘Y’ Else. the value is ‘N’. (Categorical)
#
# * previous_cancellations - Number of previous reservations that were cancelled by the guest prior to the current reservation. (Numerical)
#
# * previous_bookings_not_canceled -Number of previous reservations not cancelled by the guest prior to the current reservation. (Numerical)
#
# * booking_modifications – Number of amendments made to the reservation. (Numerical)
#
# * reservation_queue– Number of days the booking was in the waiting list before the reservation was confirmed. (Numerical)
#
# * average_daily_rate – total booking amount divided by the number of days of booking, in € (Euro). (Numerical)
#
# * required_car_parking_spaces – Number of car parking spaces required by the guest. (Numerical)
#
# * special_requests – Number of special requests made by the guest. (Numerical)
#
file_name = 'sample_input.csv'
# #### C. Perform real-time inference
# !aws sagemaker-runtime invoke-endpoint --endpoint-name $model_name --body fileb://$file_name --content-type 'text/csv' --region us-east-2 output.csv
# #### D. Output Result
#
# * Supported content types: 'csv' files only.
#
# * Output file columns: Guest_Id & Cancellation_Probability.
#
# * The Cancellation_Probability corresponding to a Guest_Id is the probability of guests cancelling their reservations.
#
file_path = os.getcwd()
output_df_path = os.path.join(file_path, 'output.csv')
print("Sample Output:")
output_df = pd.read_csv(output_df_path)
output_df.head()
# #### E. Delete the endpoint
# Now that you have successfully performed a real-time inference, you do not need the endpoint any more. You can terminate the endpoint to avoid being charged.
predictor=sage.predictor.Predictor(model_name, sagemaker_session,content_type)
predictor.delete_endpoint(delete_endpoint_config=True)
# ### 3. Perform batch inference
# In this section, you will perform batch inference using multiple input payloads together. If you are not familiar with batch transform, and want to learn more, see these links:
# 1. [How it works](https://docs.aws.amazon.com/sagemaker/latest/dg/ex1-batch-transform.html)
# 2. [How to run a batch transform job](https://docs.aws.amazon.com/sagemaker/latest/dg/how-it-works-batch.html)
#upload the batch-transform job input files to S3
transform_input_folder = "data/input/batch"
transform_input = sagemaker_session.upload_data(transform_input_folder, key_prefix=model_name)
print("Transform input uploaded to " + transform_input)
#Run the batch-transform job
transformer = model.transformer(1, batch_transform_inference_instance_type)
transformer.transform(transform_input, content_type=content_type)
transformer.wait()
#output is available on following path
transformer.output_path
s3_conn = boto3.client("s3")
bucket_name="sagemaker-us-east-2-786796469737"
with open('output2.csv', 'wb') as f:
s3_conn.download_fileobj(bucket_name, os.path.basename(transformer.output_path)+'/sample_input.csv.out', f)
print("Output file loaded from bucket")
file_path = os.getcwd()
output_df_path = os.path.join(file_path, 'output2.csv')
print("Sample Output:")
output_df = pd.read_csv(output_df_path)
output_df.head()
# ### 4. Clean-up
# #### A. Delete the model
model.delete_model()
# #### B. Unsubscribe to the listing (optional)
# If you would like to unsubscribe to the model package, follow these steps. Before you cancel the subscription, ensure that you do not have any [deployable model](https://console.aws.amazon.com/sagemaker/home#/models) created from the model package or using the algorithm. Note - You can find this information by looking at the container name associated with the model.
#
# **Steps to unsubscribe to product from AWS Marketplace**:
# 1. Navigate to __Machine Learning__ tab on [__Your Software subscriptions page__](https://aws.amazon.com/marketplace/ai/library?productType=ml&ref_=mlmp_gitdemo_indust)
# 2. Locate the listing that you want to cancel the subscription for, and then choose __Cancel Subscription__ to cancel the subscription.
| Hotel Reservation Cancellation Predictor.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Machine Learning
#
# In this file, instructions how to approach the challenge can be found.
# We are going to work on different types of Machine Learning problems:
#
# - **Regression Problem**: The goal is to predict delay of flights.
# - **(Stretch) Multiclass Classification**: If the plane was delayed, we will predict what type of delay it is (will be).
# - **(Stretch) Binary Classification**: The goal is to predict if the flight will be cancelled.
# +
# import pandas
import pandas as pd
import numpy as np
import copy
import seaborn as sns
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
from sklearn.decomposition import PCA
from sklearn.model_selection import train_test_split, GridSearchCV, cross_validate, cross_val_score
from sklearn.linear_model import LinearRegression, Ridge, Lasso
from sklearn.ensemble import GradientBoostingRegressor, RandomForestRegressor
from sklearn.svm import SVR
from xgboost import XGBRegressor, XGBClassifier, plot_importance
from sklearn.metrics import r2_score, mean_squared_error
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
# -
# ### Read Preprocessed Data
# load data
df = pd.read_csv("data/flights_preprocessed_42k.csv", index_col=0)
df.head(3)
df.shape
# +
# reset dtypes
categorical_features = ['op_unique_carrier',
'tail_num',
'op_carrier_fl_num',
'origin_airport_id',
'dest_airport_id',
# 'share_code',
'origin_city',
'origin_state',
'dest_city',
'dest_state',
'fl_month',
'fl_weekday',
'season',
'inbound_fl']
df[categorical_features] = df[categorical_features].astype('str')
# df_train[categorical_features] = df_train[categorical_features].astype('str')
# df_test[categorical_features] =df_test[categorical_features].astype('str')
# + [markdown] tags=[]
# #### Transform Target Variable
# -
# target variable distribution
# left skewed with long tail
# target_train = df_train['arr_delay']
# target_test = df_test['arr_delay']
target = df['arr_delay']
sns.histplot(data=target)
plt.show()
# target variable distribution after log transformation in training set
# If use transformed target variable to train, when evaluate prediction results, REMEMBER to transform predicted value back first
diff = df['arr_delay'].min() - 10 # min value of whole dataset
# target_train_log = np.log(target_train - diff)
target_log = np.log(target - diff)
sns.histplot(data=target_log)
plt.show()
# use target for this case
df['arr_delay'] = target_log
# + [markdown] tags=[]
# #### More Feature Engineering
# + [markdown] tags=[]
# ##### Transform some new features by using 'arr_delay'
# + [markdown] tags=[]
# ##### Target Encoding before splitting dataset
# -
def leave_one_out_pct(df, i, d='arr_delay'):
"""
Caculate group occurance percentage with cross calculation for interested categorical column, and imput leave_one_out_mean value into dataframe
PARAMS:
df (pd.DataFrame):
i (str): categorial independent variable
d (str): dependent variable
RETURNS (pd.Series):
pandas series containing leave-one-out occurance percentage
"""
data = df.copy()[[i, d]]
group_ct = data.groupby(i, as_index=False).count().rename(columns={d: 'ct'})
group_delay_ct = data[data[d] >= np.log(15 - diff)].groupby(i, as_index=False).count().rename(columns={d: 'delay_ct'})
data = pd.merge(data, group_ct, how='left', on=i)
data = pd.merge(data, group_delay_ct, how='left', on=i)
data['leftout_pct'] = (data['delay_ct'] - 1) / (data['ct'] - 1)
data = data.fillna(0)
return data['leftout_pct']
def leave_one_out_mean(df, i, d='arr_delay'):
"""
Caculate group means with cross calculation for interested categorical column, and imput leave_one_out_mean value into dataframe
PARAMS:
df (pd.DataFrame):
i (str): categorial independent variable
d (str): dependent variable
RETURNS (pd.Series):
pandas series containing leave-one-out mean values
"""
data = df.copy()[[i, d]]
group_sum_count = data.groupby(i)[d].agg(['sum', 'count']).reset_index()
data = pd.merge(data, group_sum_count, how='left', on=i)
data['leftout_sum'] = data['sum'] - data[d]
data['leftout_mean'] = data['leftout_sum'] / (data['count'] - 1)
data = data.fillna(0)
return data['leftout_mean']
df.shape
# +
# calculate how many delay count percentage ('arr_delay' > 15) happened on each carrier/flight_num/tail_num/carrier/origin_airport/dest_airport/origin_city/origin_state/dest_city/dest_state
# calculate average delay time of each ... (same as above)
# merge with df
tran_features = ['op_unique_carrier', 'tail_num', 'op_carrier_fl_num', 'origin_airport_id', 'dest_airport_id', 'origin_city', 'origin_state', 'dest_city', 'dest_state']
for col in tran_features:
df[f'{col}_leftout_pct'] = leave_one_out_pct(df, col)
df[f'{col}_leftout_mean'] = leave_one_out_mean(df, col)
# -
df.shape
df.iloc[:, -9:].isnull().sum()
# + [markdown] tags=[]
# ## Main Task: Regression Problem
# -
# The target variable is **ARR_DELAY**. We need to be careful which columns to use and which don't. For example, DEP_DELAY is going to be the perfect predictor, but we can't use it because in real-life scenario, we want to predict the delay before the flight takes of --> We can use average delay from earlier days but not the one from the actual flight we predict.
#
# For example, variables **CARRIER_DELAY, WEATHER_DELAY, NAS_DELAY, SECURITY_DELAY, LATE_AIRCRAFT_DELAY** shouldn't be used directly as predictors as well. However, we can create various transformations from earlier values.
#
# We will be evaluating your models by predicting the ARR_DELAY for all flights **1 week in advance**.
# + [markdown] tags=[]
# ### Feature Selection / Dimensionality Reduction
# + [markdown] tags=[]
# ### Modeling
# -
# Use different ML techniques to predict each problem.
#
# - linear / logistic / multinomial logistic regression
# - Naive Bayes
# - Random Forest
# - SVM
# - XGBoost
# - The ensemble of your own choice
# #### XGBoost
df.columns
df.head()
avail_features = [
# 'fl_date',
# 'op_unique_carrier',
# 'tail_num',
# 'op_carrier_fl_num',
# 'origin_airport_id',
# 'dest_airport_id',
# 'crs_dep_time',
# 'crs_arr_time',
# 'crs_elapsed_time',
'distance',
'share_code',
# 'origin_city',
# 'origin_state',
# 'dest_city',
# 'dest_state',
# 'arr_date',
# 'dep_datetime',
# 'arr_datetime',
# 'fl_month',
# 'fl_weekday',
# 'season',
# 'day_num_of_flights',
'num_flights_6hrs',
'inbound_fl_num',
# 'inbound_fl',
# 'dep_min_of_day',
# 'arr_min_of_day',
# 'dep_hr',
# 'arr_hr',
'arr_min_sin',
'arr_min_cos',
# 'arr_hr_sin',
# 'arr_hr_cos',
'dep_min_sin',
'dep_min_cos',
# 'dep_hr_sin',
# 'dep_hr_cos',
'fl_mnth_sin',
'fl_mnth_cos',
'fl_wkday_sin',
'fl_wkday_cos',
'op_unique_carrier_leftout_pct',
'op_unique_carrier_leftout_mean',
# 'tail_num_leftout_pct',
# 'tail_num_leftout_mean',
# 'op_carrier_fl_num_leftout_pct',
'op_carrier_fl_num_leftout_mean',
# 'origin_airport_id_leftout_pct',
'origin_airport_id_leftout_mean',
# 'dest_airport_id_leftout_pct',
# 'dest_airport_id_leftout_mean',
# 'origin_city_leftout_pct',
# 'origin_city_leftout_mean',
# 'origin_state_leftout_pct',
'origin_state_leftout_mean',
# 'dest_city_leftout_pct',
# 'dest_city_leftout_mean',
# 'dest_state_leftout_pct',
'dest_state_leftout_mean'
]
# +
X_train, X_test, y_train, y_test = train_test_split(df[avail_features], df['arr_delay'], train_size=0.7, test_size=0.3, random_state=888)
xg_reg = XGBRegressor(objective ='reg:squarederror',
learning_rate = 0.05,
max_depth = 3,
# reg_lambda = 15,
# gamma = 10,
n_estimators = 150)
xg_reg.fit(X_train, y_train)
y_pred = xg_reg.predict(X_test)
# y_pred = np.exp(xg_reg.predict(X_test)) + diff
# -
r2_score(y_test, y_pred)
xg_reg.score(X_train, y_train)
# #### Predict
# +
# read test file
df_pred = pd.read_csv('data/flights_test_preprocessed.csv', index_col=0)
# reset dtypes
categorical_features = ['op_unique_carrier',
'tail_num',
'op_carrier_fl_num',
'origin_airport_id',
'dest_airport_id',
# 'share_code',
'origin_city',
'origin_state',
'dest_city',
'dest_state',
'fl_month',
'fl_weekday',
'season',
'inbound_fl']
df_pred[categorical_features] = df_pred[categorical_features].astype('str')
# Feature imputation
# add features to predict set with values computed by above dataset
# for example, flight No.#### used to have 7 delays in training set, then add 7 to same flight No. in test set
# assign 7 to flight No.##### in this example
feature_add = [['op_unique_carrier', 'op_unique_carrier_leftout_pct', 'op_unique_carrier_leftout_mean'],
['tail_num', 'tail_num_leftout_pct', 'tail_num_leftout_mean'],
['op_carrier_fl_num', 'op_carrier_fl_num_leftout_pct', 'op_carrier_fl_num_leftout_mean'],
['origin_airport_id', 'origin_airport_id_leftout_pct', 'origin_airport_id_leftout_mean'],
['dest_airport_id', 'dest_airport_id_leftout_pct', 'dest_airport_id_leftout_mean'],
['origin_city', 'origin_city_leftout_pct', 'origin_city_leftout_mean'],
['origin_state', 'origin_state_leftout_pct', 'origin_state_leftout_mean'],
['dest_city', 'dest_city_leftout_pct', 'dest_city_leftout_mean'],
['dest_state', 'dest_state_leftout_pct', 'dest_state_leftout_mean']]
delay_mean = np.log(df['arr_delay'].mean() - diff)
for cols in feature_add:
cats_means = df[cols].groupby(cols[0], as_index=False).mean()
df_pred = pd.merge(df_pred, cats_means, on=cols[0], how='left').fillna(delay_mean)
# -
X_pred = df_pred[avail_features]
y_pred = np.exp(xg_reg.predict(X_pred)) + diff # transform back
df_pred.head()
test_raw = pd.read_csv('data/flights_test_raw_wk1.csv', index_col=0)
keys = ['fl_date', 'op_unique_carrier', 'tail_num', 'op_carrier_fl_num', 'origin_airport_id', 'dest_airport_id', 'crs_elapsed_time', 'distance']
test_raw[keys] = test_raw[keys].astype('str')
df_pred[keys] = df_pred[keys].astype('str')
test_raw['fl_date'] = pd.to_datetime(test_raw['fl_date'])
df_pred['fl_date'] = pd.to_datetime(test_raw['fl_date'])
f = ['fl_date', 'op_unique_carrier', 'tail_num', 'op_carrier_fl_num', 'origin_airport_id', 'dest_airport_id', 'crs_elapsed_time', 'distance', 'predicted_delay']
df_pred['predicted_delay'] = y_pred
test_raw.shape
df_submit = pd.merge(test_raw, df_pred[f], on=keys, how='left')
df_submit.to_csv('data/submission.csv')
# +
# pca_features = [
# # 'op_unique_carrier',
# # 'tail_num'.
# # 'op_carrier_fl_num',
# # 'origin_airport_id',
# # 'dest_airport_id',
# 'crs_elapsed_time',
# 'distance',
# 'share_code',
# # 'origin_city',
# # 'origin_state',
# # 'dest_city',
# # 'dest_state',
# 'fl_month',
# 'fl_weekday',
# 'season',
# 'day_num_of_flights',
# 'num_flights_6hr',
# 'inbound_fl_num',
# 'inbound_fl',
# 'dep_min_of_day',
# 'arr_min_of_day',
# 'dep_hr',
# 'arr_hr',
# 'arr_hr_sin',
# 'arr_hr_cos',
# 'arr_min_sin',
# 'arr_min_cos',
# 'dep_min_sin',
# 'dep_min_cos',
# 'dep_hr_sin',
# 'dep_hr_cos',
# 'fl_mnth_sin',
# 'fl_mnth_cos',
# 'fl_wkday_sin',
# 'fl_wkday_cos',
# 'op_unique_carrier_delayct',
# 'op_unique_carrier_delaymedian',
# 'tail_num_delayct',
# 'tail_num_delaymedian',
# 'op_carrier_fl_num_delayct',
# 'op_carrier_fl_num_delaymedian',
# 'origin_airport_id_delayct',
# 'origin_airport_id_delaymedian',
# 'dest_airport_id_delayct',
# 'dest_airport_id_delaymedian',
# 'origin_city_delayct',
# 'origin_city_delaymedian',
# 'origin_state_delayct',
# 'origin_state_delaymedian',
# 'dest_city_delayct',
# 'dest_city_delaymedian',
# 'dest_state_delayct',
# 'dest_state_delaymedian'
# ]
# +
# df_X = pd.concat([df_train[pca_features], df_test[pca_features]])
# df_train.shape[0]
# +
# X_scaled = scaler.fit_transform(df_X)
# pca = PCA(n_components='mle')
# pca.fit(X_scaled)
# X_pca = pca.transform(X_scaled)
# +
# X_scaled_train = X_pca[:10609, :]
# X_scaled_test = X_pca[10609:, :]
# y_train = target_train_log
# y_test = target_test
# xg_reg = XGBRegressor(objective ='reg:squarederror',
# learning_rate = 0.1,
# max_depth = 6,
# # reg_lambda = 10,
# n_estimators = 300)
# xg_reg.fit(X_scaled_train, y_train)
# # y_pred = xg_reg.predict(X_test)
# y_pred = np.exp(xg_reg.predict(X_scaled_test)) + diff
# +
# r2_score(y_test, y_pred)
# +
# features = [
# # 'op_unique_carrier',
# # 'tail_num'.
# # 'op_carrier_fl_num',
# # 'origin_airport_id',
# # 'dest_airport_id',
# # 'crs_elapsed_time',
# 'distance',
# 'share_code',
# # 'origin_city',
# # 'origin_state',
# # 'dest_city',
# # 'dest_state',
# # 'fl_month',
# # 'fl_weekday',
# # 'season',
# # 'day_num_of_flights',
# # 'num_flights_6hr',
# # 'inbound_fl_num',
# # 'inbound_fl',
# # 'dep_min_of_day',
# # 'arr_min_of_day',
# # 'dep_hr',
# # 'arr_hr',
# # 'arr_hr_sin',
# # 'arr_hr_cos',
# # 'arr_min_sin',
# # 'arr_min_cos',
# 'dep_min_sin',
# # 'dep_min_cos',
# # 'dep_hr_sin',
# # 'dep_hr_cos',
# # 'fl_mnth_sin',
# # 'fl_mnth_cos',
# # 'fl_wkday_sin',
# # 'fl_wkday_cos',
# # 'op_unique_carrier_delayct',
# # 'op_unique_carrier_delaymedian',
# 'tail_num_delayct',
# # 'tail_num_delaymedian',
# 'op_carrier_fl_num_delayct',
# # 'op_carrier_fl_num_delaymedian',
# # 'origin_airport_id_delayct',
# # 'origin_airport_id_delaymedian',
# # 'dest_airport_id_delayct',
# # 'dest_airport_id_delaymedian',
# # 'origin_city_delayct',
# 'origin_city_delaymedian',
# # 'origin_state_delayct',
# 'origin_state_delaymedian',
# 'dest_city_delayct',
# # 'dest_city_delaymedian',
# # 'dest_state_delayct',
# 'dest_state_delaymedian'
# ]
# +
# scores = []
# for f in features:
# X_train = df_train[[f]]
# y_train = target_train_log
# X_test = df_test[[f]]
# y_test = target_test
# xg_reg = XGBRegressor(objective ='reg:squarederror',
# learning_rate = 0.1,
# max_depth = 6,
# # reg_lambda = 10,
# n_estimators = 300)
# xg_reg.fit(X_train, y_train)
# y_pred = np.exp(xg_reg.predict(X_test)) + diff
# # y_pred = xg_reg.predict(X_test)
# scores.append([f, xg_reg.score(X_train, y_train), r2_score(y_test, y_pred)])
# + jupyter={"outputs_hidden": true} tags=[]
# s = pd.DataFrame(scores)
# s[s[2]==s[2].max()]
# -
| src/notebooks/modeling_leaveoneout_target_encodeing_BEFORE_split_ylog.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# + [markdown] tags=[]
# # Assignment 3: Combustor Design
#
#
# + [markdown] tags=[]
# ## Introduction
#
# The global desire to reduce greenhouse gas emissions is the main reason for the interest in the use of hydrogen for power generation.
# Although hydrogen shows to be a promising solution, there are many challenges that need to be solved.
# One of the challenges focuses on the use of hydrogen as a fuel in gas turbines.
#
# In gas turbines hydrogen could replace natural gas as a fuel in the combustor. Unfortunately, this is accompanied with a technical challenge which deals with an important property in premixed combustion: the flame speed. The flame speed of hydrogen is an order of magnitude higher than natural gas due to the highly reactive nature of hydrogen. As a result a hydrogen flame is more prone to flashback than a natural gas flame.
#
# Flame flashback is the undesired upstream propagation of a flame into the premix section of a combustor. Flashback occurs when the flame speed is higher than the velocity of the incoming fresh mixture. This could cause severe equipment damage and turbine shutdown. Adjustments to traditional combustors are required in order to guarantee safe operation when using hydrogen as fuel.
#
# To this end the students are asked to investigate the use of hydrogen, natural gas and a blend thereof in gas turbines. The first part will focus on the impact of the different fuels on the combustor geometry. Finally, we will have a closer look at the influence of different fuels on the $CO_2$ and $NO_x$ emissions. For simplicty, it is assumed that natural gas consists purely of methane ($CH_4$).
#
#
# ## Tasks
#
# ### Diameter of the combustor
# A gas turbine has a power output of 100 MW. The combustion section consists of 8 can combustors. Each can combustor is, for the sake of simplicty, represented by a tube with a diameter $D$.<br>
# The inlet temperature $T_2$ of the compressor is 293 K and the inlet pressure $p_2$ is 101325 Pa. To prevent damage of the turbine blades a turbine inlet temperature (TIT) of 1800 K is desired. Furthermore, assume that the specific heat of the fluid is constant through the compressor, i.e. specific heat capacity $c_{p,c}$=1.4 and a heat capacity ratio $\gamma_c$=1.4. The polytropic efficiency of the compressor and turbine are 0.90 and 0.85, respectively.
#
# The pressure ratio over the compressor will depend on your studentID:
#
# PR = 10 if (numpy.mod(studentID, 2) + 1) == 1<br>
# PR = 20 if (numpy.mod(studentID, 2) + 1) == 2
#
# Assume the TIT to be equal to the temperature of the flame inside the combustor. The flame temperature depends on the equivalence ratio ($\phi$), the hydrogen volume percentage of the fuel ($H_2\%$) and the combustor inlet temperature and pressure. For now consider the fuel to consist of pure natural gas ($H_2\%=0$). Note that the equivalence ratio is given by:
#
# \begin{align}
# \phi = \frac{\frac{m_{fuel}}{m_{air}}}{(\frac{m_{fuel}}{m_{air}})_{stoich}}
# \end{align}
#
# **1. Calculate the inlet temperature $T_3$ and inlet pressure $p_3$ of the combustor and determine the required equivalence ratio (adjust PART A and PART B and run the code), so that the TIT specification is met.** <br>
#
# Inside the combustor the flow is turbulent. Turbulence causes an increase in the flame speed, so that the turbulent flame speed $S_T \approx 10 S_L$.
#
# **2. With the equivalence ratio determined in the previous question, calculate the total mass flow rate ($\dot{m}_{tot}$) through the gas turbine and the maximal diameter $D$ of a single combustor tube, so that flashback is prevented. Adjust PART A, PART B, PART C and PART D in the code and run it again. Report the steps you have taken. <br>
# Is there also a minimum diameter? If so, no calculation required, discuss what could be the reason for the necessity of a minimal diameter of the combustor tube.**
#
# The combustion of methane is represented by the reaction: $CH_4 + 2 (O_2 + 3.76 N_2) \rightarrow CO_2 + 2 H_2O + 7.52 N_2$ <br>
#
# **3. Use the above reaction equation and the definition of $\phi$ to find the mass flow rate of the fuel $\dot{m}_{fuel}$.** <br>
#
# **4. Calculate the total heat input using $\dot{m}_{fuel}$ and calculate the efficiency of the complete cycle.** <br>
#
# **5. Repeat tasks 1-4 for a fuel consisting of $50\%H_2$/$50\%CH_4$ and $100\%H_2$. Discuss the effect of the addition of hydrogen to the fuel on the combustor geometry and cycle performance.**
#
# ### $CO_2$ and $NO_x$ emissions
#
# **6. A gas turbine manufacturer claims that their gas turbines can be fired with a hydrogen content of 30%. Discuss wheter this could be regarded an achievement (use the top plot in Figure 5).**
#
# **7. Consider an equivalence ratio $\phi=0.5$. Regarding emissions, discuss the advantages and disadvantages of increasing the hydrogen content of the fuel. Adjust PART A and use Figure 5.**
#
# ### Bonus assignment
# For simplicty, it was assumed that natural gas does consist of pure methane. In reality, it could be a mix of methane, higher hydrocarbons and nitrogen. <br>An example is Dutch Natural Gas (DNG), which consists of $80\%CH_4$, $5\%C_2H_6$ and $15\%N_2$.
#
# **Repeat tasks 1-4 for a fuel consisting of $50\%H_2$/$50\%DNG$. <br> Hint1: Nitrogen does not participate in the reaction. <br> Hint2: This requires more adjustment of the code than just PARTS A, B, C, D.**
#
# ## Code
# Two properties of importance in this assignment are the laminar flame speed $S_L$ and the adiabtic flame temperature $T_{ad}$ of a mixture. These properties can be determined by solving the equations for continuity, momentum, species and energy in one dimension. Fortunetaly, we do not need to solve these equations by hand, instead a chemical kinetics software (Cantera) is used to solve these equations by running a simulation. The simulation is illustrated in the sketch below. Keep in mind that the simulation can take some time to complete.
#
# For more information about Cantera visit: https://cantera.org/. <br>
# For more background information regarding the 1D flame simulation visti: https://cantera.org/science/flames.html
#
# 
#
# + tags=[]
#%% Load required packages
import sys
import cantera as ct
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import cm
#%% Constants
R_gas_mol = 8314 # Universal gas constant [units: J*K^-1*kmol^-1]
R_gas_mass = 287 # universal gas constant [units: J*K^-1*kg^-1]
#%% Start
# Power output of turbine
power_output = 100 # units: MW
power_output*=1e6
# Compressor and turbine polytropic efficiencies
etap_c = 1
etap_t = 1
# Pressure ratio
PR = 10
# Compressor inlet temperature and pressure
T2 = 293.15 # units: K
p2 = 101325 # units: Pa
# Heat capacity ratio of air at T=293.15 K
gam_c = 1.4
# Compressor stage
# Specific heat capacity (heat capacity per unit mass) of mixture in compressor
cp_c = R_gas_mass*gam_c/(gam_c-1)
# cp_c = 1006 # units: J.kg^-1.K^-1
# cv_c = 717 # units: J.kg^-1.K^-1
# Molar mass of species [units: kg*kmol^-1]
M_H = 1.008
M_C = 12.011
M_N = 14.007
M_O = 15.999
M_H2 = M_H*2
M_CH4 = M_C + M_H*4
M_CO2 = M_C + M_O*4
M_O2 = M_O*2
M_N2 = M_N*2
# Define volume fractions of species in air [units: -]
f_O2 = 0.21
f_N2 = 0.79
########## PART A: ADJUST CODE HERE ##########
# Equivalence ratios
phis = [None, None, None] # Set equivalence ratios ranging from 0.4 to 0.8
# Hydrogen percentages
H2_percentages = [None, None, None] # Set hydrogen volume percentages of the fuel ranging from 0 to 100
################# END PART A ##################
# Define colors to make distinction between different mixtures based on hydrogen percentage
colors = cm.rainbow(np.linspace(0, 1, len(H2_percentages)))
#%% Premixed flame object
class mixture_class:
def __init__(self, phi, H2_percentage, T_u=293.15, p_u=101325):
# Color and label for plots
self.color = colors[H2_percentages.index(H2_percentage)]
self.label = str(int(H2_percentage)) + r'$\% H_2$'
# Temperature and pressure of the unburnt mixture
self.T_u = T_u # units: K
self.p_u = p_u # units: Pa
# Equivalence ratio
self.phi = phi
# Hydrogen percentage of fuel
self.H2_percentage = H2_percentage
# DNG percentage of fuel
self.CH4_percentage = 100 - self.H2_percentage
# Volume fractions of fuel
self.f_H2 = self.H2_percentage/100
self.f_CH4 = self.CH4_percentage/100
# Mass densities of fuel species
rho_H2 = M_H2*self.p_u/(self.T_u*R_gas_mol)
rho_CH4 = M_CH4*self.p_u/(self.T_u*R_gas_mol)
# Check if volume fractions of fuel and air are correct
check_air = f_O2 + f_N2
check_fuel = self.f_H2 + self.f_CH4
if check_air == 1.0 and round(check_fuel,3) == 1.0:
pass
else:
sys.exit("fuel or air composition is incorrect!")
if round(check_fuel,3) == 1.0:
pass
else:
sys.exit("fuel composition is incorrect!")
# Definition of the mixture
# 1. Set the reaction mechanism
self.gas = ct.Solution('gri30.cti')
# 2. Define the fuel and air composition
fuel = {'H2':self.f_H2, 'CH4':self.f_CH4}
air = {'N2':f_N2/f_O2, 'O2':1.0}
# 3. Set the equivalence ratio
self.gas.set_equivalence_ratio(phi, fuel, air)
# 4. Set the transport model
self.gas.transport_model= 'Multi'
# 5. Set the unburnt mixture temperature and pressure
self.gas.TP = T_u, p_u
# Unburnt mixture properties
self.h_u = self.gas.enthalpy_mass # units: J.kg^-1
self.cp_u = self.gas.cp_mass # units: J*K^-1*kg^-1
self.cv_u = self.gas.cv_mass # units: J*K^-1*kg^-1
self.rho_u = self.gas.density_mass # units: kg.m^-3
self.rho_u_H2 = rho_H2 # units: kg.m^-3
self.rho_u_CH4 = rho_CH4 # units: kg.m^-3
self.mu_u = self.gas.viscosity # Pa.s
self.nu_u = self.mu_u/self.rho_u # units: m^2.s^-1
self.lambda_u= self.gas.thermal_conductivity # units: W.m^-1.K^-1
self.alpha_u = self.lambda_u/(self.rho_u*self.cp_u) # units: m^2.s^-1
def solve_equations(self):
# Unburnt molar fractions
self.X_H2 = self.gas["H2"].X[0]
self.X_CH4 = self.gas["CH4"].X[0]
self.X_O2 = self.gas["O2"].X[0]
self.X_N2 = self.gas["N2"].X[0]
# Set domain size (1D)
width = 0.05 # units: m
# Create object for freely-propagating premixed flames
flame = ct.FreeFlame(self.gas, width=width)
# Set the criteria used to refine one domain
flame.set_refine_criteria(ratio=3, slope=0.1, curve=0.1)
# Solve the equations
flame.solve(loglevel=0, auto=True)
# Result 1: Laminar flame speed
self.S_L0 = flame.velocity[0]*100 # units: cm.s^-1
self.S_T = 10*self.S_L0/100 # units: m.s^-1 Rough estimation of the turbulent flame speed
# Result 2: Adiabtaic flame temperature
self.T_ad = self.gas.T
# Burnt mixture properties
self.h_b = self.gas.enthalpy_mass # units: J.kg^-1
self.cp_b = self.gas.cp_mass # units: J*K^-1*kg^-1
self.cv_b = self.gas.cv_mass # units: J*K^-1*kg^-1
self.rho_b = self.gas.density_mass # units: kg.m^-3
self.mu_b = self.gas.viscosity # Pa.s
self.nu_b = self.mu_b/self.rho_b # units: m^2.s^-1
self.lambda_b = self.gas.thermal_conductivity # units: W.m^-1.K^-1
self.alpha_b = self.lambda_b/(self.rho_b*self.cp_b) # units: m^2.s^-1
# Burnt mixture molar fractions
self.X_CO2 = self.gas["CO2"].X[0]
self.X_NO = self.gas["NO"].X[0]
self.X_NO2 = self.gas["NO2"].X[0]
#%% Function to retrieve the LHV of different kind of fuels
def heating_value(fuel):
""" Returns the LHV and HHV for the specified fuel """
T_u = 293.15
p_u = 101325
gas1 = ct.Solution('gri30.cti')
gas1.TP = T_u, p_u
gas1.set_equivalence_ratio(1.0, fuel, 'O2:1.0')
h1 = gas1.enthalpy_mass
Y_fuel = gas1[fuel].Y[0]
# complete combustion products
Y_products = {'CO2': gas1.elemental_mole_fraction('C'),
'H2O': 0.5 * gas1.elemental_mole_fraction('H'),
'N2': 0.5 * gas1.elemental_mole_fraction('N')}
gas1.TPX = None, None, Y_products
h2 = gas1.enthalpy_mass
LHV = -(h2-h1)/Y_fuel
return LHV
# Lower Heating Values of well-known combustion fuels
LHV_H2 = heating_value('H2')
LHV_CH4 = heating_value('CH4')
LHV_C2H6 = heating_value('C2H6')
#%% Create list of flame objects for multiple mixtures depending on the equivalence ratio
# and the percentage of hydrogen in the fuel (volume based)
# Initialize list for flame objects
mixtures = []
# Create flame objects and start simulations
for phi in phis:
for H2_percentage in H2_percentages:
########## PART B: ADJUST CODE HERE ##########
# Compressor stage
# Temperature after compressor stage
T3 = None # units: K
p3 = None # units: Pa
# Combustor inlet temperature in K and pressure in Pa
T_u = T3 # units: K
p_u = p3 # units: Pa
################# END PART B ##################
# Combustor stage
# Define unburnt mixture that goes into the combustor
mixture = mixture_class(phi, H2_percentage, T_u, p_u)
# Solve equations and obtain burnt mixture properties
mixture.solve_equations()
# Append the mixture (with unburnt and burnt properties) to list of mixtures
mixtures.append(mixture)
# Turbine stage
# Heat capacity ratio of mixture in turbine
gam_t = mixture.cp_b/mixture.cv_b
# Turbine inlet temperature
T4 = mixture.T_ad
########## PART C: ADJUST CODE HERE ##########
# Turbine outlet temperature
T5 = None
################# END PART C ##################
print('mixture solved: phi=' + str(phi) + ', H2%=' + str(H2_percentage))
#%% Plots A: Laminar flame speed/adiabatic flame temperture vs equivalence ratio
plt.close('all')
# Plot parameters
fontsize = 12
marker = 'o'
markersize = 8
linewidth = 1
linestyle = 'None'
# Figure 1: Laminar flame speed vs equivalence ratio
fig1, ax1 = plt.subplots()
ax1.set_xlabel(r'$\phi$ [-]', fontsize=fontsize)
ax1.set_ylabel(r'$S_L$ [cm.s$^{-1}$]', fontsize=fontsize)
ax1.set_xlim(0.3, 1.1)
ax1.set_ylim(0, 250)
ax1.set_title('Laminar flame speed vs. equivalence ratio \n $T_u=$' + str(round(T_u,2)) + ' K, $p_u$=' + str(p_u*1e-5) + ' bar')
ax1.grid()
# Figure 2: Adiabatic flame temperature vs equivalence ratio
fig2, ax2 = plt.subplots()
ax2.set_xlabel(r'$\phi$ [-]', fontsize=fontsize)
ax2.set_ylabel(r'$T_{ad}$ [K]', fontsize=fontsize)
ax2.set_xlim(0.3, 1.1)
ax2.set_ylim(1200, 2800)
ax2.grid()
ax2.set_title('Adiabtic flame temperature vs. equivalence ratio \n $T_u=$' + str(round(T_u,2)) + ' K, $p_u$=' + str(p_u*1e-5) + ' bar')
# Initialize list for laminar flame speeds
S_L0_lists = [[] for i in range(len(H2_percentages))]
# Initialize list for adiabatic flame temperatures
T_ad_lists = [[] for i in range(len(H2_percentages))]
# Fill Figure 1 and 2
for mixture in mixtures:
index = H2_percentages.index(mixture.H2_percentage)
ax1.plot(mixture.phi, mixture.S_L0, ls=linestyle, marker=marker, ms=markersize, c=mixture.color, label=mixture.label if mixture.phi == phis[0] else "")
ax2.plot(mixture.phi, mixture.T_ad, ls=linestyle, marker=marker, ms=markersize, c=mixture.color, label=mixture.label if mixture.phi == phis[0] else "")
S_L0_lists[index] = np.append(S_L0_lists[index], mixture.S_L0)
T_ad_lists[index] = np.append(T_ad_lists[index], mixture.T_ad)
# Plot polynomial fits to show trends for laminar flame speed and adiabatic flame temperature as a function of the equivalence ratio
if len(phis) == 1:
pass
else:
# Create zipped lists for polynomial fits
lists_zipped = zip(S_L0_lists, T_ad_lists, colors)
# Order of polynomial
poly_order = 3
for (S_L0, T_ad, color) in lists_zipped:
# Create new array for phi
phis_fit = np.linspace(phis[0], phis[-1])
# Plot 4th order polynomial fit for laminar flame speed
coeff_S_L0 = np.polyfit(phis, S_L0, poly_order)
poly_S_L0 = np.poly1d(coeff_S_L0)
S_L0_fit = poly_S_L0(phis_fit)
ax1.plot(phis_fit, S_L0_fit, ls="--", c=color)
# Plot 4th order polynomial fit for adiabatic flame temperature
coeff_T_ad = np.polyfit(phis, T_ad, poly_order)
poly_T_ad = np.poly1d(coeff_T_ad)
T_ad_fit = poly_T_ad(phis_fit)
ax2.plot(phis_fit, T_ad_fit, ls="--", c=color)
#% Plots B: Fuel blend properties and emissions
# Assume constant power (or heat input): heat_input = m_H2_dot*LHV_H2 + m_CH4_dot*LHV_CH4 = 1 (constant)
# Plot parameters
x_ticks = np.linspace(0, 100, 11)
y_ticks = x_ticks
bin_width = 5
# Initialize lists
H2_fraction_heat_input, H2_fraction_mass, CH4_fraction_heat_input, CH4_fraction_mass, CO2_fraction, fuel_energy_mass = ([] for i in range(6))
# Densities of hydrogen and methane of unburnt mixture
rho_H2 = mixture.rho_u_H2
rho_CH4 = mixture.rho_u_CH4
# Reference: Amount of CO2 when H2%=0 (1 mol of CH4 == 1 mol CO2)
Q_CO2_ref = 1 / (rho_CH4*LHV_CH4)
# Hydrogen fraction in the fuel
H2_fraction_volume = np.linspace(0, 1, 21)
# Mixture calculations
for x in H2_fraction_volume:
# Fractions of H2 and CH4 by heat input
H2_part = rho_H2*LHV_H2*x
CH4_part = rho_CH4*LHV_CH4*(1-x)
H2_fraction_heat_input_i = H2_part / (H2_part + CH4_part)
CH4_fraction_heat_input_i = 1 - H2_fraction_heat_input_i
H2_fraction_heat_input = np.append(H2_fraction_heat_input, H2_fraction_heat_input_i)
CH4_fraction_heat_input = np.append(CH4_fraction_heat_input, CH4_fraction_heat_input_i)
# Fraction of CO2 reduction
Q_u_i = 1 / (rho_H2*LHV_H2*x + rho_CH4*LHV_CH4*(1-x))
Q_CH4_i = Q_u_i*(1-x)
Q_CO2_i = Q_CH4_i
CO2_fraction_i = Q_CO2_i/Q_CO2_ref
CO2_fraction = np.append(CO2_fraction, CO2_fraction_i)
# Fractions of H2 and CH4 by mass
H2_part = rho_H2*x
CH4_part = rho_CH4*(1-x)
H2_fraction_mass_i = H2_part / (H2_part + CH4_part)
CH4_fraction_mass_i = 1- H2_fraction_mass_i
H2_fraction_mass = np.append(H2_fraction_mass, H2_fraction_mass_i)
CH4_fraction_mass = np.append(CH4_fraction_mass, CH4_fraction_mass_i)
# Fuel energy content
fuel_energy_mass_i = (H2_fraction_mass_i*LHV_H2 + CH4_fraction_mass_i*LHV_CH4)/1e6 # units: MJ.kg^-1
fuel_energy_mass = np.append(fuel_energy_mass, fuel_energy_mass_i)
# Convert fractions to percentages
CO2_percentage = 100*CO2_fraction
CO2_reduction_percentage = 100 - CO2_percentage
H2_percentage_volume = H2_fraction_volume*100
CH4_percentage_heat_input = CH4_fraction_heat_input*100
H2_percentage_heat_input = H2_fraction_heat_input*100
H2_percentage_mass = 100*H2_fraction_mass
CH4_percentage_mass = 100*CH4_fraction_mass
# Plots
fig3, ax3 = plt.subplots()
line3_0 = ax3.plot(H2_percentage_volume, CH4_percentage_heat_input, marker=marker, color='tab:blue', label=r'$CH_{4}$')
ax3.set_xticks(x_ticks)
ax3.set_yticks(y_ticks)
ax3.set_xlabel(r'$H_{2}$% (by volume)', fontsize=fontsize)
ax3.set_ylabel(r'$CH_{4}$% (by heat input)', fontsize=fontsize, color='tab:blue')
ax3.set_title('Heat input vs. volume percentage for a methane/hydrogen fuel blend')
ax3.grid()
ax3_1 = ax3.twinx()
line3_1 = ax3_1.plot(H2_percentage_volume, H2_percentage_heat_input, marker=marker, color='tab:orange', label=r'$H_{2}$')
ax3_1.set_ylabel(r'$H_{2}$% (by heat input)', fontsize=fontsize, color='tab:orange')
lines3 = line3_0 + line3_1
labels3 = [l.get_label() for l in lines3]
fig4, ax4 = plt.subplots()
ax4.plot(H2_percentage_heat_input, CO2_reduction_percentage, marker=marker)
ax4.set_xticks(x_ticks)
ax4.set_yticks(y_ticks)
ax4.set_xlabel(r'$H_{2}$% (by heat input)', fontsize=fontsize)
ax4.set_ylabel(r'$CO_2$ reduction [%]', fontsize=fontsize)
ax4.set_title(r'$CO_2$ emissions vs. hydrogen/methane fuel blends (heat input %)')
ax4.grid()
fig5, (ax5, ax5_1) = plt.subplots(2)
ax5.plot(H2_percentage_volume, CO2_percentage, marker=marker)
ax5.set_xticks(x_ticks)
ax5.set_yticks(y_ticks)
ax5.set_xlabel(r'$H_{2}$% (by volume)', fontsize=fontsize)
ax5.set_ylabel(r'$CO_2$ emissions [%]', fontsize=fontsize)
ax5.set_title(r'$CO_2$ emissions vs. hydrogen/methane fuel blends (volume %)')
ax5.grid()
for i, mixture in enumerate(mixtures):
if mixture.phi == phis[-1]:
NO_percentage_volume = mixture.X_NO*100
NO2_percentage_volume = mixture.X_NO2*100
ax5_1.bar(mixture.H2_percentage, NO_percentage_volume, bin_width, color='tab:red', label=r'$NO$' if i == 0 else "")
ax5_1.bar(mixture.H2_percentage, NO2_percentage_volume, bin_width, bottom=NO_percentage_volume, color='tab:blue', label=r'$NO_2$' if i == 0 else "")
ax5_1.set_title(r'$NO_x$ emissions for $\phi=$' + str(mixture.phi))
ax5_1.set_xlim(-5, 105)
ax5_1.set_xticks(x_ticks)
ax5_1.set_xlabel(r'$H_{2}$% (by volume)', fontsize=fontsize)
ax5_1.set_ylabel(r'$NO_x$ [%]', fontsize=fontsize)
ax5_1.grid()
fig6, ax6 = plt.subplots()
ax6.plot(H2_percentage_volume, H2_percentage_mass, marker=marker, color='tab:blue', label=r'$H_{2}$')
ax6.plot(H2_percentage_volume, CH4_percentage_mass, marker=marker, color='tab:orange', label=r'$CH_{4}$')
ax6.set_xticks(x_ticks)
ax6.set_yticks(y_ticks)
ax6.set_xlabel(r'$H_{2}$% (by volume)', fontsize=fontsize)
ax6.set_ylabel(r'wt.% (by mass)', fontsize=fontsize)
ax6.set_title(r'Weight vs. volume percentage for hydrogen/methane fuel blends')
ax6.grid()
fig7, ax7 = plt.subplots()
ax7.plot(H2_percentage_volume, fuel_energy_mass, lw=2, marker=marker, color='tab:red')
ax7.set_xticks(x_ticks)
ax7.set_xlabel(r'$H_{2}$% (by volume)', fontsize=fontsize)
ax7.set_ylabel(r'Fuel energy content [MJ.kg$^{-1}$]', fontsize=fontsize)
ax7.set_title(r'Energy content vs. volume percentage for hydrogen/methane fuel blends')
ax7.grid()
# Turn on legends
ax1.legend()
ax2.legend()
ax3.legend(lines3, labels3, loc='center left')
ax5.legend(bbox_to_anchor=(1, 1))
ax5_1.legend(bbox_to_anchor=(1, 1))
ax6.legend(bbox_to_anchor=(1, 1))
# Fix figures layout
fig1.tight_layout()
fig2.tight_layout()
fig3.tight_layout()
fig4.tight_layout()
fig5.tight_layout()
fig6.tight_layout()
fig7.tight_layout()
# Uncomment to save figures as .svg
# fig1.savefig('turbo3_1.svg')
# fig2.savefig('turbo3_2.svg')
# fig3.savefig('turbo3_3.svg')
# fig4.savefig('turbo3_4.svg')
# fig5.savefig('turbo3_5.svg')
# fig6.savefig('turbo3_6.svg')
# fig7.savefig('turbo3_7.svg')
########## PART D: ADJUST CODE HERE ##########
for mixture in mixtures:
# Equivalence of the mixture
phi = mixture.phi
# Density of the unburnt mixture (before entering the combustor)
rho_u = mixture.rho_u # units: kg.s^-1
# Volumtric fractions of hydrogen and methane
f_H2 = mixture.f_H2
f_CH4 = mixture.f_CH4
# Specific heat capacity (heat capacity per unit mass)
cp_t = mixture.cp_b
# Total mass flow rate
m_dot = None # units: kg.s^-1
# Velocity in the combustor
V = mixture.S_T # units: m.s^-1
# Area and diameter of the combustor
A = None # units: m^2
D = None # units: m
# ratio of mass fuel and mass air at stoichiometric conditions
m_f_over_m_a_stoich = None
# Mass flow of the fuel
m_f_dot = None
# Heat input or Heat of combustion
Q = None # units: W
# thermal cycle efficiency
eta_cycle = None
################# END PART D ##################
# -
| _build/jupyter_execute/assignment3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="FwYcqB8yWq8D" colab_type="text"
# # Aprende Python analizando partidas de Dota 1. Parte 1/3 Jupyter Notebooks & Google Colab
# > Una manera divertida de aprender Python mientras analizamos la informacion de lo replays de Dota 1
#
# - toc: true
# - badges: true
# - comments: true
# - author: <NAME>
# - image: images/dota1.jpg
# - categories: [Python, Dota, NLP]
# + [markdown] id="CUsIWT_hWq8R" colab_type="text"
# 
#
# En el mes de febrero, en los días de carnaval en Bolivia tuve la suerte de encontrar entre los archivos de mi computadora las repeticiones de dota 1 que había guardado automáticamente en los años que jugamos dota 1 junto con mis amigos.
#
# 
#
# Lo primero que se me vino a la mente al ver estos archivos fue el data que tenian dentro, en especial de Mensajes de Chats que estaban llenos de insultos en su mayoria de las partidas.
#
# Uno de mis intereses es el procesamiento del lenguaje natural o (NLP) en inglés. De esta manera, siento que estos datos pueden usarse para crear algun clasificador de insulto a un nivel básico.
# Estos mensajes de chat no están etiquetados, por lo que creo que se utilizara algun algoritmo de Machine Learning no supervisado.
#
#
#
# 
#
# **Imagen de un mensaje toxico en medio de una partida de Dota 1**
#
#
# El archivo que contiene los replays pesa aproximadamente ~900 MB. Y puede ser descargado de aca:
#
# [Link al archivo.](http://www.mediafire.com/file/4xmjki2xxy3kdgo/replays.zip/file)
#
#
# Como comentario adicional, existe una herramienta de Windows para abrir estos archivos, en general suelen ser utilizados por los moderadores de RGC.
#
#
# ## Procedimiento
#
# Este post esta dividido en tres partes los cuales serán:
#
# * Que son los Jupyter Notebooks y como manejarlos.
# * Introduccion a Python y como extraer los replays.
# * Analisis de los mensajes de Chat y el clasificador de Insultos (Machine Learning)
#
# No es necesario conocimiento previo a Python.
#
# Utilizare `Jupyter Notebook` para escribir este blog como tambien para trabajar con Python.
#
#
# + [markdown] id="dL9go6-9Wq8b" colab_type="text"
# # 1. Que son los Jupyter Notebooks y como manejarlos.
#
#
# Una manera facil de trabajar con Python es utilizando los Jupyter Notebooks de [Google Colab](https://colab.research.google.com/).
#
#
# 
#
# Dentro de un [Google Colab](https://colab.research.google.com/) existen *`celdas` donde podemos `escribir código`* o *`Texto normal` con el formato `Markdown`*.
#
# El texto que estoy escribiendo ahora mismo esta siguiendo el formato `Markdown`. Para saber mas que es `Markdown` pueden ver el siguiente [Link](https://github.com/adam-p/markdown-here/wiki/Markdown-Cheatsheet).
#
# Una caracteristica adicional de las celdas donde se puede `escribir código` es que tambien puedo escribir `comandos mágicos` de Jupyter Notebook que al final vienen siendo comandos de Linux.
#
# ### Comandos magicos:
#
# Utilizare los siguientes`comandos mágicos`:
#
#
#
# **Para descargar un archivo desde internet**
#
# ```unix
#
# # # !wget <LINK DEL ARCHIVO QUE QUIERO DESCARGAR>
#
# ```
#
# **Para listar los archivos que tengo en la carpeta donde tengo abierto este notebook.**
#
# ```unix
# # # !ls -lsh
# ```
#
# **Para descomprimir un archivo**
#
# ```unix
# # # !unzip <nombre del archivo> -d .
# ```
#
# + [markdown] id="ELATbrEK1nmF" colab_type="text"
# **Probando los comandos Mágicos**
#
# A continuacion empezare a descargar el archivo, listar la carpeta para ver si lo descargo y luego descomprimirlo.
#
#
# + [markdown] id="S41AVgN817PJ" colab_type="text"
# ## **Descarga**
#
# Para descargar el archivo necesitamos tener el link de descarga. Ir a
# http://www.mediafire.com/file/4xmjki2xxy3kdgo/replays.zip/file y luego hacer click derecho en `Copy link location` y obtener el link de descarga. Este link le pasamos al comando mágico `!wget`
# + id="kkQ0v9YseiqB" colab_type="code" outputId="1f9f8609-dd6d-4494-ec57-f3dc3ca6de2a" colab={"base_uri": "https://localhost:8080/", "height": 208}
# !wget http://download1639.mediafire.com/iveo702e3bxg/4xmjki2xxy3kdgo/replays.zip
# + [markdown] id="0nrBoodf1_0M" colab_type="text"
# **Listar**
#
# `ls` se refiere a la "listar" y `-lsh` son la *vanderas* o parametros que acepta este comando:
# * `l` listar con detalles
# * `s` mostrar el tamaño de los archivos.
# * `h` mostrar los resultados en formato que un humano pueda entenderlo
# + id="hlU3lK2rfx96" colab_type="code" outputId="6163ac5b-e541-4469-ffa6-6147bdc7930f" colab={"base_uri": "https://localhost:8080/", "height": 69}
# %ls -lsh
# + [markdown] id="Rc9KADAtWq8j" colab_type="text"
# Parece que mi archivo fue descargado correctamente. Voy a descomprmirlo
# + [markdown] id="wwg3UO3a7B2h" colab_type="text"
# **Descomprimir**
#
# `-d` se refiere al "destino" donde se va a descomprimir y el punto **.** se refiere a `a esta misma carpeta donde estoy actualmente`
# + id="s7gRdl_Te_mf" colab_type="code" outputId="f7499b7c-0949-4bb6-a15c-c19e9d72fe42" colab={"base_uri": "https://localhost:8080/", "height": 121}
# !unzip replays.zip -d .
# + [markdown] id="3iGd2f-f7qMm" colab_type="text"
# Si listo la carpeta decomprimida `/replays` voy a ver que tiene dentro.
# + id="zbPYNpX9icTW" colab_type="code" outputId="37a3aa01-43b4-477b-bce2-b3a8fa24ca2a" colab={"base_uri": "https://localhost:8080/", "height": 34}
# !ls replays/
# + [markdown] id="OpoEbekb70Dk" colab_type="text"
# Parece que internamente tiene mas archivos comprimidos. Voy a utilaar `-lsh` para ver en mas detalle estoys archivos.
# + id="b_gXTmUAinca" colab_type="code" outputId="8c459213-b740-4e94-b52c-4d4dacb2574c" colab={"base_uri": "https://localhost:8080/", "height": 104}
# !ls -lsh replays/
# + [markdown] id="tft_8Jw3i8vJ" colab_type="text"
# Parece que tenemos dentro otros archivos `.zip` y uno `.tar.gz`.
#
# Para practicar un poco más vamos a descomprimir el ` Multiplayer16.zip` de 275 MB y el otro `Multiplayer.tar.gz` de 510MB.
#
# Para descomprimir el `Multiplayer.tar.gz` utilizaremos en siguiente `comando mágico`.
#
# ```unix
#
# # # !tar xvzf <MI ARCHIVO TAR QUE QUIERO DESCOMPRIMIR>.tar.gz -C <Lugar donde quiero que se descomprima/>
#
# ```
# + [markdown] id="j9p-83li8PpG" colab_type="text"
# **Descomprimir el `.tar.gz`**
#
# Los parametros `xvzf` que toma el comando mágico `!tar` son por:
#
# * `x` extraer los archivos.
# * `v` *verbose* que se refiere a "mostrar cada archivo que se esta extrayendo"
# * `z` esta es muy importante y dice "descomprimir desde `gzip`"
# * `f` esto le dice a `tar` " cual es el archivo que vamos a descomprimir"
#
# Hay un parametro opcional que vamos a utilzar `-C`.
#
# * `C` , lugar donde quieres que se descomprima.
# + id="4y1m6qBajyI8" colab_type="code" outputId="8fed90cb-2490-4767-e984-e703f4675b8e" colab={"base_uri": "https://localhost:8080/", "height": 1000}
#collapse_show
# !tar xvzf replays/Multiplayer.tar.gz -C replays/
# + [markdown] id="YqzmDeNrkTZQ" colab_type="text"
# Vamos a ver que es lo que se descomprimio.
# + id="-UmEqE0jkRHH" colab_type="code" outputId="47f5ab69-6bfb-4673-de5d-b463c1c00da7" colab={"base_uri": "https://localhost:8080/", "height": 52}
# %ls replays/
# + id="ohPPJAS-kdLP" colab_type="code" outputId="2f4bef46-8a01-495b-b0ce-293c4923f32b" colab={"base_uri": "https://localhost:8080/", "height": 1000}
#collapse
# %ls replays/Multiplayer
# + [markdown] id="GRROGYCtlJU2" colab_type="text"
# Perfecto, ahora vamos a extraer el archivo `replays/Multiplayer16.zip` utilizando el mismo procedimiento anterior para el `replays.zip`, con la diferencia de que cambios el archivo que queremos descomprimir y donde lo vamos a descomprimir `-d replays/`
# + id="QAzVlvBO3m4S" colab_type="code" outputId="348f814b-b7f2-4242-8658-890d125267a3" colab={"base_uri": "https://localhost:8080/", "height": 1000}
#collapse
# !unzip replays/Multiplayer16.zip -d replays/
# + [markdown] id="7rtkO4X0AaLj" colab_type="text"
# Voy a revisar el peso de la carpeta `replays/Multiplayer` con el comando
#
# ```unix
# # # !du -lsh replays/Multiplayer
# ```
#
# + id="ZWDYcweT_yxq" colab_type="code" outputId="598b1308-4b22-49e0-baed-424dc1e617fd" colab={"base_uri": "https://localhost:8080/", "height": 34}
# !du -lsh replays/Multiplayer
# + [markdown] id="8NqpscgyAu4s" colab_type="text"
# Descomprimiendo el ultimo archivo `Multiplayer17.zip`
# + id="pUV4n-_GA7aV" colab_type="code" outputId="5eb7926c-b774-4962-8ce7-771f3282120e" colab={"base_uri": "https://localhost:8080/", "height": 1000}
#collapse
# !unzip replays/Multiplayer17.zip -d replays/
# + [markdown] id="gYfzDhiIBMtn" colab_type="text"
# Revisando el tamaño final de la carpeta `replays/Multiplayer`
# + id="dBRu6BoqBFwm" colab_type="code" outputId="15ff29e4-905e-40d0-f3f8-8b2a29c46def" colab={"base_uri": "https://localhost:8080/", "height": 34}
# !du -lsh replays/Multiplayer
# + [markdown] id="Pf3b8OY1BRyr" colab_type="text"
# Estos archivos dentro de la carpeta `replays/Multiplayer` seran con los que vamos a trabajar.
#
# A continuacion empezamos con Python.
# + [markdown] id="r50dnVRcBfes" colab_type="text"
# # 2. Intro a Python y como extraer los replays.
#
# 
#
# Comenzaremos con Python y tomaremos como ejemplo práctico *leer los archivos dentro de la carpeta `replays/Multiplayer` con Python*
#
# Para esto necesitamos entender los conceptos de:
#
# * `String`
# * `Variables` y `print()`
# * `Paquete de Python`
# * `import`
# * `Lista`
#
#
# ### String
#
# Un `String` es una cadena de texto cualquiera. Estos `Strings` en Python se los crea utilizando el "Double Quote" o "Comillas".
#
# ¿Donde se los utiliza?
#
# Un ejemplo claro de como utilizar un `String` es la ruta o direccion donde estan nuestros archivos y en Python seria algo como
#
# ```python
#
# "replays/Multiplayer"
#
# ```
#
# De esta manera hemos creado un `String` utilizando las "comillas".
#
# Un `String` no nos sirve de mucho. Para que un `String` tenga mas utilizad necesitamos asignar este `String` a una `Variable`.
#
# ## Variables y `print()`
#
# Una `Variable` puede tener el valor de nuestro `String`, como dice su nombre, esta `Variable` puede cambiar o variar su valor.
#
# *¿Como creamos una variable?*
#
# Para crear una variable es como en las matematicas, primero escogemos un nombre y luego con el simbolo de igual `=` asigmos un "valor" a esta variable, en nuestro caso le vamos a asignar un `String`.
#
# ```python
#
# mi_carpeta_de_replays = "replays/Multiplayer"
#
# ```
#
# De esta manera he utilizado la variable `mi_carpeta_de_replays` para asignarle el valor de un `String` que es `"replays/Multiplayer"`.
#
#
# Noten que he utilado mas de una palabra para crear el nombrede mi variable.
#
# `mi`, `carpeta`, `de`, `replays` unidos por una barra baja `_`.
#
# En Python y en general en otros lenguajes se suelen crear nombres de variables con mas de dos palabras para tener bien entendido que es lo que una `variable` representa. En Python se suele unir estas palabras con una `barra baja _`.
#
# A continuacion voy a crear una `celda` que corra el codigo de crear esta variable `mi_carpeta_de_replays`.
#
#
# + id="FPxu9zYmF5Yk" colab_type="code" colab={}
# Creando una variable que aloje el String de donde esta ubicado mis replays.
mi_carpeta_de_replays = "replays/Multiplayer"
# + [markdown] id="X4l9680tGFAE" colab_type="text"
# Funciono, he creado una variable que aloje la ruta de donde estan ubicados mis replays. Pero ahora.
#
# ***¿como verifico si esta variable funciona?***
#
# ### `print()`
# Aqui es donde entra una `funcion propia` de Python llamada `print()`.
#
# Lo llamo `funcion` porque representa el concepto de transformacion que tiene una funcion matematica, es decir...
#
#
# `y = f(x)` , `f` seria el `print` , los parentesis `( )`la manera en la que esta funcion acepta paremetros `x`. Y finalmente `y` es el resultado de la transformacion.
#
# ***¿Pero donde esta `x` en nuestro caso?***
#
# `x` son nuestras `Variables`.
#
#
# ***¿Que variables tenemos?***
#
# La unica variable que tenemos actualmente es `mi_carpeta_de_replays`, entonces probemos `print()` con esta variable `mi_carpeta_de_replays`.
# + id="oPb1CRJtHYV4" colab_type="code" outputId="a8ead3d4-3f21-4885-a704-c6b28e63ce99" colab={"base_uri": "https://localhost:8080/", "height": 34}
# Utilizando print()
print(mi_carpeta_de_replays)
# + [markdown] id="YX870FenHbhz" colab_type="text"
# Entonces `print(mi_carpeta_de_replays)` hace que te muestre el valor que tiene asignado una variable, en nuestro caso, el dato que tiene asignado `mi_carpeta_de_replays` es `"replays/Multiplayer"` un `String`.
#
# En conclucion con `print()` podemos ver que valores tienen asignados nuestras variables.
#
# `print()` es `un método` que Python ya trae integrado. Otros lenguajes de programacion tambien tiene su equivalente de `print()`, en `javascript` seria algo como:
#
# ```javascript
#
# console.log()
#
# ```
# + [markdown] id="FpkOf7LIIpIr" colab_type="text"
# ## Paquete de Python
#
# El concepto de `paquete de python` puede entenderse como herramientas construidas/programadas por alguien mas para solucionar un problema.
#
# Un problema que tenemos ahora mismo es como leer los archivos que hay dentro de la `Variable` `mi_carpeta_de_replays` que representa una carpeta fisica en el disco duro.
#
# Para esto utilizaremos el paquete `glob` que resulve este problema de manera muy sencilla. Para utilzar `glob` necesitamos `importarlo` dentro de este Jupyter Notebook y ahi es donde entra el siguiente concepto.
#
# ## `import`
#
# `import` es una palabra reservada por Python para llamar o cargar paquetes en la memeria del Notebook. Este `import` es una palabra recervada y por tal no debe de utilizarse como nombre de alguna variable por parte de nosotros.
#
# La manera en la que `import` funciona es:
#
# ```python
# import <el nombre del paquete>
# ```
#
# En nuestro caso *el nombre del paquete es glob* y deberia llamarse como
#
# ```python
# import glob
# ```
# Probemos el `import` en codigo.
#
#
# + id="z2an-gTSLWBn" colab_type="code" colab={}
# Voy a importar glob dentro del Notebook
import glob
# + [markdown] id="W85HRv3kLauE" colab_type="text"
# Parece que funciono, ¿que pasa si hago `print(glob)`?, probemos en una celda de codigo nuevamente.
# + id="XnuKYlqdLjUV" colab_type="code" outputId="b443d46c-29f4-4abe-aec9-d622ccc01994" colab={"base_uri": "https://localhost:8080/", "height": 34}
print(glob)
# + [markdown] id="c4VYJNtoLmE2" colab_type="text"
# Correctamente `print()` nos dice que este `glob` es un `modulo` y esta ubicado en `'/usr/lib/python3.6/glob.py'`
# + [markdown] id="JIK9VhWNL3Id" colab_type="text"
# Ahora...
#
# ***¿Como utilizamos `glob`?.***
#
# Para utilizar glob tenemos que entender el concepto de `suma de Strings`, es decir:
#
# ```python
#
# "esto es un String" + " ,Esto es Otro String" = "esto es un String ,Esto es Otro String
# ```
#
# dos o mas `Strings` se pueden sumar para crear un único `String`.
#
#
# ***Esto es importante para utilizar `glob`?***.
#
# Esto lo veremos utilizando `glob` a continacion.
#
# + id="V6fY-zQEMI2u" colab_type="code" outputId="05a58180-9754-49c5-dc13-57deaec11c28" colab={"base_uri": "https://localhost:8080/", "height": 72}
## Utilizando glob.
# Creando una variable con la suma de Strings que necesito para glob
todo_lo_que_este_dentro_de_la_carpet = mi_carpeta_de_replays + "/*"
# mostrando lo que es el resultado de la operacion de suma de Stirngs.
print(todo_lo_que_este_dentro_de_la_carpet)
# glob hace el trabajo de encontrar "todos" los archivos dentro de la carpeta "todo_lo_que_este_dentro_de_la_carpet"
mis_replays = glob.glob(todo_lo_que_este_dentro_de_la_carpet)
# Mostrar lo que glob encontro.
print(mis_replays)
# + [markdown] id="P5uUeSs4NkRD" colab_type="text"
# Noten que `glob` tiene un **punto** . despues de la palabra glob...
#
# ```python
#
# glob.glob
#
# ```
#
# esto es porque `glob` tiene un **método** que hace la operacion de listar los archivos, este **método** que se llama tambien `.glob` acepta como **parametro** un String, parecido al `print()`, con la diferencia de que
#
# `glob.glob(....string....)`
#
# nos devuelve **todo** **_*_** (noten el asterisco) lo que este dentro de de esa carpeta `/` en lugar de "mostrar" como lo hace `print()`. El asterisco **_*_** le dice al **método** `.glob(....)` "todo lo que este dentro"
# + [markdown] id="63FwbO-kL8PP" colab_type="text"
# El resultado de `glob.glob(todo_lo_que_este_dentro_de_la_carpet)` es:
# ```
# ['replays/Multiplayer/Replay_2013_12_27_2004.w3g', 'replay.......
# ```
# que se muestra con el `print(mis_replays)` , es una nuevo tipo de Objecto llamado "Lista"
#
#
# + [markdown] id="GF8rAPTkO_K2" colab_type="text"
# ### Listas
#
#
| _notebooks/2020-03-12-python-1-3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# 2.1: Built-in atomic data types in Python
# +
# 2.1.1: Numbers - floats and integers
# -
# 2.1.1.1: Declare a float
x = 55.5
print(x)
print(type(x))
# 2.1.1.2: Convert float to integer
x_int = int(x)
print(x_int)
print(type(x_int))
# 2.1.1.3: Convert integer to float
x_float = float(x_int)
print(x_float)
print(type(x_float))
# 2.1.1.4: Basic arithmetic for numbers
x = 3
y = 4
z = 5
result_lhs = x**2 + y**2 # power operator is not ^
result_rhs = z**2
print(result_lhs)
print(result_rhs)
# 2.1.1.5: Use of the math module to calculate a square root
from math import sqrt
x = 4
print(sqrt(x))
# 2.1.1.6: Use of the math module to calculate distributions
# Verify the 68/95/99.7 rule https://en.wikipedia.org/wiki/68%E2%80%9395%E2%80%9399.7_rule
from math import erf
cdf_0 = (1 + erf(0 / sqrt(2))) / 2
cdf_1 = (1 + erf(1 / sqrt(2))) / 2
cdf_2 = (1 + erf(2 / sqrt(2))) / 2
cdf_3 = (1 + erf(3 / sqrt(2))) / 2
one_std = 2 * (cdf_1 - cdf_0)
two_std = 2 * (cdf_2 - cdf_0)
three_std = 2 * (cdf_3 - cdf_0)
print(one_std)
print(two_std)
print(three_std)
# 2.1.1.7: Number formatting
x = 3.14159
y = 1000000
print('{:.3f}'.format(x)) # print to three decimal places. Colon designates as format string
print('${:,.2f}'.format(y)) # print as financial i.e. dollar sign, 2 decimals and comma thousands separator
# +
# 2.1.2: Strings
# -
# 2.1.2.1: Declare a string
x = "hi"
print(x)
print(type(x))
# 2.1.2.3: Convert to a string
x = str(1.9)
y = str(4)
print(x)
print(type(x))
print(y)
print(type(y))
# 2.1.2.2: String formatting
x = "A special right triangle has sides {first_leg}, {second_leg} and {hypotenuse}".format(
first_leg=3, second_leg=4, hypotenuse=5)
y = "A special right triangle has sides {}, {} and {}".format(3, 4, 5) # abbreviated style
print(x)
print(y)
# 2.1.2.3: Using index notation to return substrings - similar to Excel's MID formula
x = "123456789"
print(x[0:3]) # Indexing starts at 0. Second index is one more than the last element of the substring.
print(x[5:6]) # A substring can start in the middle of the string
print(x[-3:]) # Use a negative in the first index to start from the right
print(x[0:-4]) # Use a negative in the second index to eliminate the rightmost elements
# +
# 2.1.2.4: String split
# +
# 2.1.3: Dates
# -
# 2.1.3.1: Declare a date
from datetime import date
todays_date = date.today()
new_years_2040 = date(2040, 1, 1)
print(todays_date)
print(new_years_2040)
print(todays_date.year)
print(todays_date.month)
print(todays_date.day)
print(new_years_2040.weekday()) # Monday is 0 and Sunday is 6
# 2.1.3.2: Find the days between dates
date_difference = new_years_2040 - todays_date
print(type(date_difference))
print(date_difference.days)
month_difference = (new_years_2040.year - todays_date.year)*12 - (new_years_2040.month - todays_date.month)
print(month_difference)
# 2.1.3.3: Add/subtract days, weeks, years
from datetime import timedelta
today_plus_100_days = timedelta(days=100) + todays_date # Can also use weeks instead of days
print(today_plus_100_days)
print(todays_date)
years_to_add = 2
print(date(todays_date.year + years_to_add, todays_date.month, todays_date.day)) # Add two years
# 2.1.3.4: Format dates
print(date.strftime(todays_date, '%Y')) # %Y is long year
print(date.strftime(todays_date, '%y')) # %y is short year
print(date.strftime(todays_date, '%A')) # %A is weekday
print(date.strftime(todays_date, '%B')) # %B is month
print(date.strftime(todays_date, '%A, %B %d %Y')) # Full description
# +
# 2.1.4: Booleans
# -
# 2.1.4.1: Test equality
boolean_result = 2 == 2 # double equal sign is equality (same as in R)
print(boolean_result)
print(not boolean_result)
print(boolean_result and True) # other logical statments are the same as english
print(boolean_result and False)
print(boolean_result or False)
print(type(boolean_result))
# 2.1.4.2: None type
x = None # Similar to NULL in R or SQL
print(x)
print(type(x))
| Chapter_2_Core_Python/Lesson_1_Built-in_Atomic_Data_Types.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# This is the collection of codes that read food atlas datasets and CDC health indicator datasets from Github repository, integrate datasets and cleaning data
# +
#merge food atlas datasets into one
import pandas as pd
Overall_folder='C:/Users/cathy/Capstone_project_1/'
dfs=list()
url_folder='https://raw.githubusercontent.com/cathyxinxyz/Capstone_Project_1/master/Datasets/Food_atlas/'
filenames=['ACCESS','ASSISTANCE','HEALTH','INSECURITY','LOCAL','PRICES_TAXES','RESTAURANTS','SOCIOECONOMIC','STORES']
for i,filename in enumerate(filenames):
filepath=url_folder+filename+".csv"
d=pd.read_csv(filepath,index_col='FIPS',encoding="ISO-8859-1")
#append datasets to the list and drop the redundent columns:'State' and 'County'
if i!=0:
dfs.append(d.drop(['State', 'County'], axis=1))
else:
dfs.append(d)
#merge datasets
df_merge=pd.concat(dfs, join='outer', axis=1)
# -
print (df_merge.head(5))
# Check columns for missing values
df_merge.describe()
number_null_values_percol=df_merge.isnull().sum(axis=0)
#columns with over 100 missing values
cols_with_over_10_percent_null_values=number_null_values_percol[number_null_values_percol>0.1*df_merge.shape[0]]
print (cols_with_over_10_percent_null_values.index)
#drop these columns first
df_merge=df_merge.drop(list(cols_with_over_10_percent_null_values.index), axis=1)
df_merge.shape
#check number of remaining columns
print (df_merge.columns)
# categorizes columns into three groups: category data ('State' and 'County'), count data, percent data, # per 1000 pop, and percent change
#
# columns to keep: category data ('State' and 'County'), percent data, # per 1000 pop, and percent change; remove count data because it is not adjusted by population size
#
# Each column name is highly abstract and unreadable, need to extract info from the variable information provided by Food_atlas
url='https://raw.githubusercontent.com/cathyxinxyz/Capstone_Project_1/master/Datasets/Food_atlas/variable_info.csv'
var_info_df=pd.read_csv(url,encoding="ISO-8859-1", index_col='Variable Code')
# further filter varaibles based on following principles:
# i. keep variables that are adjusted by population size: '% change', 'Percent', '# per 1,000 pop','Percentage points';
# ii. keep variables that are mostly valuable for analysis
# iii. keep variables where values are valid: e.g. no negative values for variables with units as 'Percent' or '# per 1,000 pop'.
#
#units to keep: '% change', 'Percent', '# per 1,000 pop','Percentage points'
#var_info_df['Units'].isin(['Percent', '# per 1,000 pop','Dollars'])
var_info_df_subset=var_info_df[var_info_df['Units'].isin(['Percent', '# per 1,000 pop','Dollars'])]
var_subset=list(var_info_df_subset.index)
var_subset.extend(['State', 'County'])
#print (var_subset)
df_subset=df_merge.loc[:, var_subset]
#print (df_merge.shape)
print (df_subset.shape)
# +
#check weather each column has valid values:
####### columns with units 'Percent' should have values between 0 and 100, any value that fall out of this range should be changed to NaN values
######
######
######
#Replace invalid values with np.nan
import numpy as np
for c in df_subset.columns:
if c in var_info_df.index:
if var_info_df.loc[c]['Units'] =='Percent':
df_subset[c][(df_subset[c]<0)|(df_subset[c]>100)]=np.nan
elif var_info_df.loc[c]['Units'] =='# per 1,000 pop':
df_subset[c][(df_subset[c]<0)|(df_subset[c]>1000)]=np.nan
elif var_info_df.loc[c]['Units'] =='Dollars':
df_subset[c][(df_subset[c]<0)]=np.nan
# +
#break df_tp into two sets: variables measured at the earlier time point: df_tp_1; and variables measured at the earlier time point: df_tp_2
#group the same measure into tuples, the same measure share the same name except the last two digits which indicate the year of the measure
var_grouped_by_measures=defaultdict(list)
measures=list()
for var in df_subset.columns:
if var not in ['State','County']:
var_grouped_by_measures[(var_info_df.loc[var]['Category Name'],var_info_df.loc[var]['Sub_subcategory Name'])].append(var)
#df_subset=df_subset[measures]
# -
n=1
var_info=list()
vars_to_keep=list()
for measures in var_grouped_by_measures.values():
var_name='var'+str(n)
df_subset[var_name]=sum([df_subset[m] for m in measures])/len(measures)
var_info.append([var_name, var_info_df.loc[measures[0]]['Category Name'],
var_info_df.loc[measures[0]]['Category Code'],
var_info_df.loc[measures[0]]['Subcategory Name'],
var_info_df.loc[measures[0]]['Sub_subcategory Name'],
var_info_df.loc[measures[0]]['Units']])
vars_to_keep.append('var'+str(n))
n+=1
df_subset[['PCT_LACCESS_POP10','PCT_LACCESS_POP15','var1']].head(5)
var_name_info_df=pd.DataFrame(np.array(var_info))
var_name_info_df.columns=['var_name', 'Category Name','Category Code', 'Subcategory Name', 'Sub_subcategory Name','Units']
var_name_info_df.set_index('var_name')
var_name_info_df.to_csv('C:/Users/cathy/Capstone_project_1/Datasets/Food_atlas/Var_name_info.csv')
#further truncate less relevant variables: var25,26,42,43,45-57
var_id_to_prune=[25,26,42,43]
var_id_to_prune.extend(range(45,58))
vars_subset=list(set(vars_to_keep).difference(['var'+str(i) for i in var_id_to_prune]))
df_subset=df_subset[vars_subset]
df_subset.shape
df_subset.to_csv(Overall_folder+'Datasets/food_environment.csv')
# Integrate CDC Datasets together
import pandas as pd
dfs=list()
sub_folder=Overall_folder+'/Datasets/CDC/'
filenames=['Diabetes_prevalence',
'Obesity_prevalence',
'Physical_inactive_prevalence']
for filename in filenames:
filepath=sub_folder+filename+".csv"
df=pd.read_csv(filepath,index_col='FIPS')
if 'Diabetes' in filename:
df.columns=df.columns.astype(str)+'_db'
elif 'Obesity' in filename:
df.columns=df.columns.astype(str)+'_ob'
elif 'Physical' in filename:
df.columns=df.columns.astype(str)+'_phy'
dfs.append(df)
#merge datasets
CDC_merge=pd.concat(dfs, join='outer', axis=1)
CDC_merge.info()
#Find out the non numeric entries in CDC_merge
for c in CDC_merge.columns:
num_non_numeric=sum(CDC_merge.applymap(lambda x: isinstance(x, (int, float)))[c])
if num_non_numeric>0:
print(c, num_non_numeric, CDC_merge[pd.to_numeric(CDC_merge[c], errors='coerce').isnull()])
#It turns out that some entries are 'No Data' or NaN, so I replace the 'No Data' with NaN values
CDC_merge=CDC_merge.replace('No Data', np.nan)
CDC_merge=CDC_merge.astype(float)
#now check the CDC_merge
CDC_merge.info()
#choose the latest prevalence of diabetes, obesity and physical inactivity to merge with df_tp
CDC_subset=CDC_merge[['2013_db','2013_ob','2011_phy','2012_phy','2013_phy']]
CDC_subset['prevalence of physical inactivity']=(CDC_subset['2011_phy']+CDC_subset['2012_phy']+CDC_subset['2013_phy'])/3
CDC_subset.head(5)
CDC_subset.rename(columns={'2013_db': 'prevalence of diabetes', '2013_ob': 'prevalence of obesity'}, inplace=True)
CDC_subset[['prevalence of diabetes', 'prevalence of obesity', 'prevalence of physical inactivity']].to_csv(Overall_folder+'Datasets/Db_ob_phy.csv')
# Integrating geography dataset
df=pd.read_excel(Overall_folder+'Datasets/geography/ruralurbancodes2013.xls')
df.head(5)
df=df.set_index('FIPS')
df.to_csv(Overall_folder+'Datasets/rural_urban_codes.csv')
df[['RUCC_2013']].to_csv(Overall_folder+'Datasets/RUCC_codes.csv')
# clean risk factors datasets
# +
filenames=['DEMOGRAPHICS','RISKFACTORSANDACCESSTOCARE']
dfs=list()
for i,filename in enumerate(filenames):
filepath=Overall_folder+'Datasets/CHSI_dataset/'+filename+".csv"
d=pd.read_csv(filepath)
#append datasets to the list and drop the redundent columns:'State' and 'County'
dfs.append(d)
# -
print (dfs[0].head(10))
print (dfs[1].head(10))
dfs[1][dfs[1]<0]=np.nan
dfs[1].info()
#combine State FIPS code and county FIPS code to be FIPS code
#state FIPS code is interpreted as integer values so the zero before some single digits are ignored
# similarly, county FIPS code is also interpreted as integers
# to combine both as FIPS code, we first need to tranform both codes to string and fill in the zeros
# to make all state FIPS code to be two digits and all county FIPS code to be three digits
dfs[0]['FIPS']=dfs[0]['State_FIPS_Code'].apply((lambda x:('0'+str(x))[-2:]))+dfs[0]['County_FIPS_Code'].apply((lambda x:('00'+str(x))[-3:]))
dfs[1]['FIPS']=dfs[1]['State_FIPS_Code'].apply((lambda x:('0'+str(x))[-2:]))+dfs[1]['County_FIPS_Code'].apply((lambda x:('00'+str(x))[-3:]))
#I need the population size info from demographic file
dfs[0]["Population_Size"].describe()
#I need the number of uninsured population info from risk factors file
dfs[1]['Uninsured'].describe()
sum(dfs[1]['Uninsured'].isnull())
#there are three missing values, which is pretty small number as compared to over 3000 counties
dfs[0]=dfs[0].set_index('FIPS')
dfs[1]=dfs[1].set_index('FIPS')
df_combined=pd.concat([dfs[0][['Population_Size']], dfs[1][['Uninsured']]], axis=1, join='inner')
df_combined
df_combined['frac_uninsured']=df_combined['Uninsured']/df_combined['Population_Size']
df_combined
df_combined[['frac_uninsured']].to_csv(Overall_folder+'Datasets/Uninsured.csv')
# Integrate all datasets
# +
filenames=['food_environment', 'Db_ob_phy', 'Uninsured', 'RUCC_codes']
dfs=list()
for filename in filenames:
df=pd.read_csv(Overall_folder+'Datasets/'+filename+'.csv', index_col='FIPS')
dfs.append(df)
df_merge=pd.concat(dfs, axis=1, join='inner')
# -
df_merge.info()
df_merge.to_csv(Overall_folder+'Datasets/combined.csv')
# combine state, county, fips code file into one for map
df=pd.read_csv(Overall_folder+'Datasets/Food_atlas/Supplemental_data_county.csv',encoding="ISO-8859-1", index_col='FIPS')
df.info()
df['State']=df['State'].apply((lambda x:x.lower()))
df['County']=df['County'].apply((lambda x:x.lower()))
df[['State', 'County']].to_csv(Overall_folder+'Datasets/state_county_name.csv')
| Codes/.ipynb_checkpoints/Data Integration-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + _cell_guid="32c83f2a-74d1-4407-a5d8-5c9b7d1289ca" _uuid="245e48b54470656b3e992b7f1b14daa3735f51a3"
import numpy as np
import pandas as pd
# %matplotlib inline
pd.set_option('display.max_rows', 100)
df = pd.read_csv("../input/cwurData.csv")
df
# + _cell_guid="b2e1ea0e-e0db-415b-829f-3370f33e95a0" _uuid="770c8adf3dbac48be0ebe5d24aa94e61703f90c6"
#Tabeli read, mis käivad Eesti ülikoolide kohta
df[df["country"] == "Estonia"]
# + _cell_guid="e33c7ad2-1286-4f0f-93bb-8a9b4beb56cc" _uuid="7ec08e9fab421be1b7f440585689fb56deb212b0"
#keskmine hariduse kvaliteedi näitaja grupeerituna riikide kaupa
df.groupby(["country"])["quality_of_education"].mean()
#all on teise kujundusega tabel, eemaldage "#"
#df.groupby("country").aggregate({"quality_of_education": ["mean"]})
# + _cell_guid="e3980bb0-807f-4d09-b13b-8415d26fb581" _uuid="6bbcb2381863fa4372c94884b46af84c051d5871"
#Riikide keskmise hariduse kvaliteedi näitaja tulemuse järgi kahanevalt
a = df.groupby(["country"])["quality_of_education"].mean()
a.sort_values(ascending=False)
# + _cell_guid="6b01aec2-d4ad-4b97-ab19-784bdbf3146e" _uuid="571195d5ad0ec90885811ec5d7804d9663080208"
#Mitu korda iga riigi ülikoole tabelis esineb
koolid = df.groupby(["country"])["year"].count()
koolid.sort_values(ascending=False)
# + _cell_guid="74283431-4763-4e2c-82a8-3f4723406061" _uuid="9bd635fdacce9a0bf0c9b4165a391ef16918426d"
#Mitu korda iga riigi ülikoole tabelis esineb ainult 2015. aasta tulemuste kohta
a2015 = df[df["year"] == 2015]
koolid2015 = a2015.groupby(["country"])[("year")].count()
koolid2015.sort_values(ascending=False)
| downloaded_kernels/university_rankings/kernel_184.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="IryROr9SmZTe" colab_type="code" outputId="f4e015b6-f2bf-496d-81de-64e6efc02d03" colab={"base_uri": "https://localhost:8080/", "height": 122}
from google.colab import drive
drive.mount('/content/drive')
# + [markdown] id="ZfjLcphiY20p" colab_type="text"
# # Getting the dataset and installing necessary libraries
# Dataset link : https://data.vision.ee.ethz.ch/cvl/gfanelli/head_pose/head_forest.html#db
# + id="Kw8pJITNfrIl" colab_type="code" outputId="3ec56c42-add2-4293-9f36-6ac196214441" colab={"base_uri": "https://localhost:8080/", "height": 272}
# !wget http://data.vision.ee.ethz.ch/cvl/gfanelli/kinect_head_pose_db.tgz
# + [markdown] id="hC4afLsOaE0-" colab_type="text"
# Extracting the .tgz file
# + id="jnBw54DqfvMV" colab_type="code" colab={}
# !tar -xf '/content/kinect_head_pose_db.tgz'
# + id="d5JmXRWwfDjN" colab_type="code" colab={}
import os
import cv2
from math import atan2, asin
import numpy as np
import pandas as pd
import dlib
import math
import matplotlib.pyplot as plt
from tqdm import tqdm_notebook as tqdm
from torch.utils.data import DataLoader, Dataset, sampler
from torch.optim.lr_scheduler import ReduceLROnPlateau
from albumentations.pytorch import ToTensor
from albumentations import (HorizontalFlip, ShiftScaleRotate, Normalize, Resize, Compose, GaussNoise)
import torch
from torchvision import transforms
import torch.nn as nn
from torch.nn import functional as F
import torch.optim as optim
import torch.backends.cudnn as cudnn
import torchvision.models as models
import time
# + id="PNjjv_XuZIPN" colab_type="code" outputId="880e2400-5c64-4102-ab53-6a662852917d" colab={"base_uri": "https://localhost:8080/", "height": 476}
# !wget http://dlib.net/files/mmod_human_face_detector.dat.bz2
!7z x "mmod_human_face_detector.dat.bz2"
detector = dlib.cnn_face_detection_model_v1("mmod_human_face_detector.dat")
# + [markdown] id="yzmazpEIZIqP" colab_type="text"
# # Useful functions
# + [markdown] id="IowtBOkKcJGs" colab_type="text"
# Function to get the path
# + id="pk0MxqldjQYH" colab_type="code" colab={}
def PathList(path):
Img_Path = []
for root, dirs, files in os.walk(path, topdown=False):
for filename in files:
if(filename[-3:] == 'png'):
Img_Path.append(root+'/'+filename)
return Img_Path
# + [markdown] id="BMEeRsYkZe34" colab_type="text"
# Here ImagesPath is a list that contains all the .png files from the dataset
# + id="asB6nok3Zdx6" colab_type="code" colab={}
ImagesPath = PathList("hpdb/")
# + [markdown] id="Btbc9Ol-Zni2" colab_type="text"
# This function is used to convert the transformation matrix from
# all the .txt files to Yaw, Pitch and Roll
# + id="EHZrxlFBZWYK" colab_type="code" colab={}
def getAngles(path):
rot_mat = []
with open(path, "r") as f:
l = f.read()
l = l.split("\n")
for i in range(0, 3):
rot_mat.append(l[i].split(" ")[:3])
rot_mat = np.asarray(rot_mat)
rot_mat = rot_mat.astype(float)
roll = atan2(rot_mat[2][1], rot_mat[2][2])*(180/math.pi);
pitch = asin(rot_mat[2][0])*(180/math.pi);
yaw = -atan2(rot_mat[1][0], rot_mat[0][0])*(180/math.pi);
return yaw, pitch, roll
# + id="1Ta4UV4Yaiux" colab_type="code" colab={}
# !mkdir cropped_Images
# + id="ozn0FvGV6Yqo" colab_type="code" colab={}
for i in range(1,25):
path = "cropped_Images/"+"{:02d}".format(i)
if not os.path.exists(path):
os.mkdir(path)
# + colab_type="code" id="VCV9ZE-LIkwk" colab={}
a = np.load("/content/drive/My Drive/CropSizeList.npy")
# + id="-VLRSx4ephBz" colab_type="code" colab={}
def SaveImages(CropSizeList, ImagesPath):
for i in tqdm(range(len(ImagesPath))):
PersonNumber = int(ImagesPath[i][5:7])
top, bottom, left, right = CropSizeList[PersonNumber - 1]
img = cv2.imread(ImagesPath[i])[top:bottom, left:right]
cv2.imwrite("cropped_Images/" + ImagesPath[i][5:], img)
# + id="xOhPTnQwq-CF" colab_type="code" outputId="949792d2-1287-4b27-b8ab-e062a6250306" colab={"base_uri": "https://localhost:8080/", "height": 66, "referenced_widgets": ["ce5e77170e45441a9ecc2aa9db5d8dda", "2edf041302ce4b3db4777e11eaac0bc3", "8466e28e14d74cf9ac266ea2e4129668", "104ee4415f19460c8d119f1b8f4d678d", "0e6f64b7a9ed4caa8ae0dc0ae496b0cb", "2d2ee7bf8e93429e86ef8e0a96eb2f43", "7ef66550b9914bed990a32c20c1b390c", "7e01aa1d092a4dc7945a49ba23553291"]}
SaveImages(a, ImagesPath)
# + id="44clDxZwrWkm" colab_type="code" colab={}
CroppedImagesPath = PathList("cropped_Images/")
# + [markdown] id="j-HK9CCcaNJb" colab_type="text"
# This function is used to crop out unnecessary background from the .png files which will later be used for the face extraction
# + id="vzH2JcdvLz0I" colab_type="code" colab={}
# def crop():
# j = 1
# k = 1
# rows = 24
# columns = 2
# fig=plt.figure(figsize=(20, 100))
# for i in tqdm(range(1, rows*columns + 1)):
# if i%2 == 0:
# img = plt.imread("hpdb/" + "{:02d}".format(j) + "/frame_00005_rgb.png")
# j += 1
# else:
# img = plt.imread("hpdb/" + "{:02d}".format(k) + "/frame_00005_rgb.png")[65:-55,200:540]
# k += 1
# fig.add_subplot(rows, columns, i)
# plt.imshow(img,cmap = 'gray')
# plt.show()
# + id="W2GAoP8SiDSG" colab_type="code" colab={}
# crop()
# + id="ursQx83NKNNN" colab_type="code" colab={}
# # !rm -r cropped_Images
# + [markdown] id="POPhqLQ0cV7j" colab_type="text"
# Making a seperate directory to store the faces
# + [markdown] id="r5oixSc8cak3" colab_type="text"
# This function will get the faces and save them in the directory created above
# + id="ewwWC3kGJcKl" colab_type="code" colab={}
# def ExtractAndSaveFaces(detector,ImagesPath):
# jpg_crop_path = []
# count = 0
# count_fail = 0
# for i in tqdm(range(len(ImagesPath))):
# image = cv2.imread(ImagesPath[i])[100:-75,200:540]
# dets = detector(image, 0)
# if len(dets) == 1:
# crop_img = image[dets[0].rect.top()-30:dets[0].rect.bottom()+30,dets[0].rect.left()-30:dets[0].rect.right()+30] # cropping the image with 15 extra pixels on all side
# if crop_img.size == 0:
# count_fail+=1
# continue
# cv2.imwrite("cropped_Images/" + ImagesPath[i][5:] , crop_img) # writing the image to directory
# jpg_crop_path.append("cropped_Images/" + ImagesPath[i][5:])
# count = count+1
# print(str(count) + " image written")
# print(str(count_fail) + " image failed")
# return jpg_crop_path
# + [markdown] id="zRkF-fmgpt0K" colab_type="text"
# Getting the path of final images that will be fed to the model
# + id="Grqp7QbGP7jf" colab_type="code" colab={}
# CroppedImagesPath = ExtractAndSaveFaces(detector,ImagesPath)
# + [markdown] id="Qz6_ujSOcp0u" colab_type="text"
# This function will return a dataframe which contains the image id along with its yaw pitch and roll
# + id="haLaP3HiqTld" colab_type="code" colab={}
def getDataframe(CroppedImagesPath):
df_train = pd.DataFrame(columns = ['Person','image','pitch','yaw','roll'])
df_val = pd.DataFrame(columns = ['Person','image','pitch','yaw','roll'])
df_test = pd.DataFrame(columns = ['Person','image','pitch','yaw','roll'])
for i in tqdm(range(len(CroppedImagesPath))):
yaw, pitch, roll = getAngles("hpdb/"+CroppedImagesPath[i][15:30]+"pose.txt")
if(int(CroppedImagesPath[i][15:17]) < 17):
df_train = df_train.append({'Person':CroppedImagesPath[i][15:17],'image':CroppedImagesPath[i][18:29],'pitch':pitch,'yaw':yaw,'roll':roll},ignore_index = True)
elif(int(CroppedImagesPath[i][15:17]) >= 17 and int(CroppedImagesPath[i][15:17]) <= 20):
df_val = df_val.append({'Person':CroppedImagesPath[i][15:17],'image':CroppedImagesPath[i][18:29],'pitch':pitch,'yaw':yaw,'roll':roll},ignore_index = True)
else:
df_test = df_test.append({'Person':CroppedImagesPath[i][15:17],'image':CroppedImagesPath[i][18:29],'pitch':pitch,'yaw':yaw,'roll':roll},ignore_index = True)
return df_train, df_val, df_test
# + [markdown] id="AqjPRxQg7V4h" colab_type="text"
# # Dataset Class
# + id="9oMnTqGGiZ4W" colab_type="code" colab={}
class BIWIDataset(Dataset):
def __init__(self, df, path, phase):
self.df = df
self.phase = phase
self.transform = ToTensor()
self.path = path
self.train = [path[i] for i in range(len(path)) if int(path[i][15:17]) <= 16]
self.val = [path[i] for i in range(len(path)) if int(path[i][15:17]) > 16 and int(path[i][15:17]) <= 20]
self.fnames = self.df.index.tolist()
def __getitem__(self, idx):
if(self.phase == 'train'):
Image = cv2.imread(self.train[idx])
Angle = np.array(self.df.iloc[idx,2:])
elif(self.phase == 'val'):
Image = cv2.imread(self.val[idx])
Angle = np.array(self.df.iloc[idx,2:])
Angle = Angle.astype('float64')
Image = cv2.resize(Image,(120,120))
Image = Image.transpose(2,0,1) # Making Channel First
Image = torch.from_numpy(Image).type(torch.FloatTensor)
Angle = torch.from_numpy(Angle).type(torch.FloatTensor)
return Image, Angle
def __len__(self):
return len(self.fnames)
# + id="Bt2YH2U8uB7v" colab_type="code" outputId="2fd70951-6869-450b-d6a2-3f94a1f1e125" colab={"base_uri": "https://localhost:8080/", "height": 66, "referenced_widgets": ["7f367f484cfd4b808fb0159b9601a9c1", "<KEY>", "7cf1f98f96ed406fa3b6e5825de40f95", "<KEY>", "8f6deb5db82e4a82ac05d45e21ecef07", "d2a1c5de653c4d3faf1801f784aef6dc", "a6ea9ffbd9eb41ec97bb31c402ec5316", "692ec0aa411848b1a2ccda747fcd3e9b"]}
df_train, df_val, df_test = getDataframe(CroppedImagesPath)
# + [markdown] id="WmlOBO6f7d1i" colab_type="text"
# This function provides the training and validation dataloader accordingly
# + id="tJo17moB8K8F" colab_type="code" colab={}
def provider(path, phase,df_train, df_val, batch_size=8, num_workers=0):
df = df_train if phase == "train" else df_val
image_dataset = BIWIDataset(df, path, phase)
dataloader = DataLoader(
image_dataset,
batch_size=batch_size,
num_workers=num_workers,
pin_memory=True,
shuffle=True,
)
return dataloader
# + [markdown] id="ZSicllGN7lnd" colab_type="text"
# # Trainer Class
# + id="yshhTEvEiarj" colab_type="code" colab={}
class Trainer(object):
'''This class takes care of training and validpathation of our model'''
def __init__(self, model, path, df_train, df_val):
self.num_workers = 0
self.batch_size = {"train": 16, "val": 4}
self.accumulation_steps = 32 // self.batch_size['train']
self.lr = 5e-5
self.num_epochs = 25
self.best_loss = float("inf")
self.phases = ["train", "val"]
self.device = torch.device("cuda:0")
torch.set_default_tensor_type("torch.cuda.FloatTensor")
self.net = model
self.df_train = df_train
self.df_val = df_val
self.criterion = torch.nn.L1Loss()
self.optimizer = optim.Adam(self.net.parameters(), lr=self.lr)
self.scheduler = ReduceLROnPlateau(self.optimizer, mode="min", patience=3, verbose=True)
self.net = self.net.to(self.device)
cudnn.benchmark = True
self.dataloaders = {
phase: provider(
path = path,
phase=phase,
df_train = df_train,
df_val = df_val,
batch_size=self.batch_size[phase],
num_workers=self.num_workers
)
for phase in self.phases
}
self.losses = {phase: [] for phase in self.phases}
def forward(self, images, targets):
images = images.to(self.device)
target = targets.to(self.device)
outputs = self.net(images)
loss = self.criterion(outputs, target)
return loss, outputs
def iterate(self, epoch, phase):
start = time.strftime("%H:%M:%S")
print(f"Starting epoch: {epoch} | phase: {phase} | ⏰: {start}")
batch_size = self.batch_size[phase]
self.net.train(phase == "train")
dataloader = self.dataloaders[phase]
running_loss = 0.0
total_batches = len(dataloader)
tk0 = tqdm(dataloader, total=total_batches)
self.optimizer.zero_grad()
for itr, batch in enumerate(tk0): # replace `dataloader` with `tk0` for tqdm
images, targets = batch
loss, outputs = self.forward(images, targets)
loss = loss / self.accumulation_steps
if phase == "train":
loss.backward()
if (itr + 1 ) % self.accumulation_steps == 0:
self.optimizer.step()
self.optimizer.zero_grad()
running_loss += loss.item()
outputs = outputs.detach().cpu()
epoch_loss = (running_loss * self.accumulation_steps) / total_batches
torch.cuda.empty_cache()
print(f'loss:{epoch_loss}')
return epoch_loss
def train_end(self):
train_loss = self.losses["train"]
val_loss = self.losses["val"]
df_data=np.array([train_loss,val_loss]).T
df = pd.DataFrame(df_data,columns = ['train_loss','val_loss'])
df.to_csv("Training_log.csv")
def start(self):
for epoch in range(self.num_epochs):
train_loss = self.iterate(epoch, "train")
self.losses["train"].append(train_loss)
state = {
"epoch": epoch,
"best_loss": self.best_loss,
"state_dict": self.net.state_dict(),
"optimizer": self.optimizer.state_dict(),
}
with torch.no_grad():
val_loss = self.iterate(epoch, "val")
self.losses["val"].append(val_loss)
self.scheduler.step(val_loss)
if val_loss < self.best_loss:
print("******** New optimal found, saving state ********")
state["best_loss"] = self.best_loss = val_loss
torch.save(state, "./model.pth")
print()
self.train_end()
# + [markdown] id="p-6-9Ic87wRh" colab_type="text"
# #Getting the model and Training
# + id="YAvySwnnbBtL" colab_type="code" outputId="c12befbf-8b08-4808-80bf-35889abf2933" colab={"base_uri": "https://localhost:8080/", "height": 224}
# !pip install efficientnet_pytorch
# + id="QR3KC3QxnvrB" colab_type="code" colab={}
from efficientnet_pytorch import EfficientNet
# + id="vLz1_p1AnvuF" colab_type="code" outputId="5d26c6e2-bbb7-40b6-a235-acc3c25890f1" colab={"base_uri": "https://localhost:8080/", "height": 68}
model = EfficientNet.from_pretrained('efficientnet-b3')
# + id="9_nkhKX9tjs5" colab_type="code" colab={}
# model = models.resnet50(pretrained = True)
# + id="QzHzcXXAJBiP" colab_type="code" colab={}
num_ftrs = model._fc.in_features
model._fc = nn.Linear(num_ftrs, 3)
# + id="WeHEI7Qziatm" colab_type="code" outputId="eb7a1cb1-05b2-4899-dcca-409677a51593" colab={"base_uri": "https://localhost:8080/", "height": 1000, "referenced_widgets": ["152be1d7340841fcaa93a9ad204776fe", "edfdf4be29b5415d9677aceb62071ade", "<KEY>", "3de439991c064eb8ae7ee392a0867fb5", "7915c14aadde4eb2bdc8402e9a427c8f", "171b014ef0fe41ccaf8f9f80ec088c11", "<KEY>", "eade662aaedb4e579fd4322f1e2432d9", "a71f11e427fc401899361cc0ea4a874d", "d5eee4a368b5434ca6410b5bb1327689", "<KEY>", "85338b0915824d7a9bc0c73a3720088b", "7e35fe7bc6c042fe98b0b99ea5617880", "dd2e858ec65d4c12b0b525e0231b19ac", "db69af0145a442e8baf239f7940deceb", "d74ef9f3a2084ee48e1cd735ccbe9256", "60300449d56043b0b22fed21afba50c7", "3e5c44956abb4413a5e93e80d89881d6", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "b1b37977e06d4bb894c15c3e72948c58", "76e5a5c12432485ea07b07e642c7f61f", "<KEY>", "0756731d90d04c79a96872543d37cacb", "cf39ebeceb76420096dab16a7dad44b5", "<KEY>", "4895403a981b468a8cc5574935a38279", "<KEY>", "287826880f08441f81407a3efdf0e292", "42ee9f3e7a0e473f9eb0b023c102e5f1", "<KEY>", "<KEY>", "728c6e1bd0df4daba1ded2c62322ecba", "<KEY>", "477b07287938482eb306123212404d2d", "<KEY>", "c55864fa74fd4ed8a9bd764d06864c4c", "21fa4995019a483387010f173ab77773", "<KEY>", "<KEY>", "<KEY>", "a58464ce328148d1a02f45a31cbb9b68", "<KEY>", "<KEY>", "a1c0e5df767843cda2e116519186a81f", "2e6d6d5906a7451abe896a4a858031d3", "<KEY>", "<KEY>", "<KEY>", "700da18e62b342ed8c91658150749afa", "1fd63d551f544feeb3ecb9f49e8e96d2", "2ba5d15810434fa4b58122add4bb61f6", "3f2632d3040f47de81e171f5b8a49064", "a6799fc3ec6f461c91aa7cbb63a330ca", "<KEY>", "f04a18e4d3754adfa7060e0ce41cb3b5", "90466ca6d55348b684dcc2f7966e1e08", "<KEY>", "<KEY>", "367e531904a4403794bbe209b0d0b916", "5756a95208524fd9b137a8d6f81da564", "<KEY>", "6a90a16af2ba40818de3bf722a81be47", "<KEY>", "b30a0978ca474563b5154668e848eafc", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "7fe3adee5fd5481babb8cbe0d7cfdafe", "<KEY>", "fa86c7e477d04699b4513a548220ff83", "<KEY>", "<KEY>", "02ec8af643ca43a19f8678475e9c9e48", "b81262d1eb914476831977b64e1df8d0", "<KEY>", "<KEY>", "ee2369ea3b6a473797c8219553d9b067", "a0ccaf9b74404a899078a446789d792b", "dcc70768596e4d0f9c61561ee77cd813", "fcbcab29a1594edd9453f46ece4f1d4f", "d7fab85bc9ef452091dcbb9e4cca1db6", "<KEY>", "<KEY>", "4074ac8ee33d4f38815f3c955a96ee8d", "<KEY>", "5b3aa3b874ac4a73b9a4da1cb6103026", "<KEY>", "<KEY>", "<KEY>", "696ee5b7d9504dc187caae68e657ac5e", "<KEY>", "<KEY>", "5dfda91dbb524859af84d66d715296fa", "ee61d823f48e40418fdb299d1a519930", "<KEY>", "<KEY>", "<KEY>", "fd914f9023ef49939b6478f901488499", "<KEY>", "<KEY>", "<KEY>", "cdde3d540cf847739a2d2588305c2921", "<KEY>", "<KEY>", "<KEY>", "033f02673a9e477688619c5690ddec94", "<KEY>", "0e1be812903e4c0ebc419a77387db4fe", "<KEY>", "175c4239810a41468f443a465faa5249", "798abe6ca77148b1ad1bf1b9e828400d", "<KEY>", "28a035577e52438b853a42bfeedb11e4", "6ac355a65e6e4d7eb16287ef419ac22f", "<KEY>", "cdf24ad639784d42a5ad0fb6dc965433", "<KEY>", "6e8d941f95284280acde3fdd7337aa84", "b8536082d29d4de59ff7ff655d67fbd4", "11cde82f8d3647618604b01e80d096a6", "<KEY>", "68ccd502e2a94cefa3273433e9f07b0c", "15d9650f02964ba0be86391b265be9f0", "<KEY>", "<KEY>", "62d7df372a0645d3817199c87a1a884f", "27aa5d0f9a4c4b2ba451662c91aa2cac", "7055a127207942c7b175ec4cae1fdce7", "<KEY>", "af4c42076d92400a8f03eb2e50c47d41", "61561c9c590c4ea3ad909429417d2779", "<KEY>", "<KEY>", "029595df469c432ebd1a1cf3ece0fec9", "<KEY>", "<KEY>", "8defd36be5e84469877aa3abdee04727", "1ed325498acc4ecf8f3fe051f9808b5a", "<KEY>", "<KEY>", "70807dced0dc401eb108ffd84f3d7879", "5e0a7204750c45c3a23c0d1805403c73", "99e9a99f8f6c453c96c78e85505390ba", "e112aa2266b644519f8bed0e3b462402", "a4f827ecc8eb4cf682e2f45135ee8b76", "<KEY>", "<KEY>", "<KEY>", "0ab1174d36e94018a199ba6bd46a8c57", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "afc48e951a084857ae862ef3f974a7d2", "<KEY>", "<KEY>", "89a32ac8cbce4e4b8505a4588d898b06", "<KEY>", "75d7b4b5b85d4b288e2aa8ffaf4940fa", "<KEY>", "19b5bae7de8b4d40adaf2da085745880", "f71515b8f88a44baa607d563a8f8f980", "94d6b4a52e9549cf8f67f96473e0cde0", "<KEY>", "4f39cbfd1d03421fb7f5458efafe6fa8", "fc5ef7ebd6fa4905a6506d235ca88f0a", "9e59fbfaddbe462da427ef208c592c95", "<KEY>", "a31340a694134a5dbc265e394369de61", "947fef841e354d2eae2bbc1ac993f1b9", "<KEY>", "b3c52ea5302b476e9b3cdf71f2dc8af9", "<KEY>", "899a29106a4d49d1b06c13e78ba1f66a", "<KEY>", "<KEY>", "87a3036894c94f3397f5d730e932f3ff", "fb334664f5314b34ace926ecce622d10", "57db44bd24de45f8a0e9a63fdf2c311a", "<KEY>", "5d6f494652b441a18b7074dc896e8ea1", "<KEY>", "de0f38221cd1439d87d1e4beef4ee195", "<KEY>", "7f7ea6f724894fbb94053a1c7018d215", "<KEY>", "729ba53713914d9fa9da7398a61776c3", "<KEY>", "0c84714eca0742bfad8b862d46736e41", "c5853ec3d2614803a2dc5ce89e3e717b", "<KEY>", "ca7184fb9e4b4ea39445b4400e86e9b2", "<KEY>", "9abac9a7c2394b5ab475ac2d66095af5", "<KEY>", "ec5f017a5caa4dc988200d1628e0f088", "83ca67ad19e24e5b90915a0187758618", "<KEY>", "aea1638355a540fa9e9e5766f4f1ed4e", "d42f5a6791a548778da79caa86a0da2b", "<KEY>", "943ae2e11d104e6db461ee54f6b51ff5", "<KEY>", "dc1e1d339ce44ffd9937e7cf911db25b", "522c3cc0259343f994c1ec9c42ca7ce6", "<KEY>", "5cc80be92dd24ce2b3e45511b522d72d", "dfff52e4c6504b9f9fca2bef4004daca", "96794f1e203d4350baf1e2d76a0d68a8", "20982500aea644689c02fe8f1ca8e31a", "f250225fa4b047dda3d8f9b9a014aa13", "5593196a691a4ebdb8a48380b19abe26", "4dcd71f29eb544279a4edd30fe418afd", "63b19b9596e7465098f5c497f1493b3e", "c9ed25b79d7144d38de69e215d545931", "77545950ff62453fa16a84d23fb6eaac", "9be1653765814db48d428ff85aaa6580", "b07b4931e6fd492d9c47fa50b4ec05f9", "919a2b5c9574466687dea2f41cde7ac6", "<KEY>", "b569a87952d44fd9b77098031073d9c5", "289c5ad3e4ea41b2b8e7ced5cf00ae90", "58f57599d7a643b78955acfe0ba5656b", "<KEY>", "ac30ce9211a940a9b0b31f7782f9126a", "f063051056c74ec5aa7e8e12218d217d", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "0542f61d2b1a474f8440f486bbd017e8", "15341d1596dd4907ac9e2a4e6b789add", "9f853d518051485b92a8cd5028f33eae", "<KEY>", "46d15d9b5c5c428f96fafb25ced35045", "2e3c4555a5854c5a9738b675da609a08", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "2d76e15721ea4051b8420bd77437a630", "5c6a1385fbd049048fe0f31be6da8aeb", "<KEY>", "d22232ececc44a74afa91969a3bb7c91", "<KEY>", "97ba00500d5644aabe58c64fac2d2f14", "<KEY>", "be16c818881a4c98aba307a789613c43", "13820f25f5374a75a2c0b42ec3263256", "<KEY>", "10d2dba8ba1f4a77bc42268a65bd1f12", "d5ea79f7db7748d4a0657d517b952127", "e0258fe1511d411b9fa9ff1be286a182", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "47f631dbbe9e4a18bfe46c7565cfda81", "f35e084aca934f49829532135f6efdbf", "6252da5bb9e047bf956c251e9add11ff", "641bbb22386742e78097acf391935224", "<KEY>", "ce946ed3afaa4fafab1221e24a93592d", "e64613eadb534a16a6548484da786080", "<KEY>", "946eb67088bd483ca20021d975f6616e", "dd847b1bd581420ab330a6f6cedecb07", "e433e88507034e15959a99a7776b7dee", "<KEY>", "f071b56d46024234a1562e595c5e389f", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "e86e4d7e6a5f4befbeefe3007106dc65", "<KEY>", "<KEY>", "ff2fc86354934509902e787b85f6e69a", "812b22e2fc404a64b06eeae1fbae6dbe", "eade11cdfe6e4471820e93b66f9f28d5", "a7ea6c68f27b44eeb7615927316e9c97", "c176e5f0006e4f80929e0dc7b81e28ca", "<KEY>", "5e4aea1d4e524ca78084efc50a69d599", "e3f6e11700804c849a8da1ee667605c4", "97fcea04893a47e38f1e7ac561895344", "<KEY>", "6d5f8463a14746e4882ead459ac7400f", "<KEY>", "<KEY>", "11ec5980a75d4bd6925864edd94768a5", "099c62376f2b41a59b8eab501065780a", "<KEY>", "68fbe7942eb743ba9f98f74750b8c153", "2c356bb1852d402e82a76b8e42246236", "4c66f9c13557496e8af5ff08efd1797b", "<KEY>", "<KEY>", "<KEY>", "186bd5eb81d5402e8d43a18b38f5ac7c", "7ff39d28f46748349ef2de452c96e6ac", "d8d2c648d348474693f8474306e6deb6", "4b3883c6c3844e968921b160a51f1302", "a880e58357c14e96993dc27da4866d3e", "fd49e70126894ce8ba6264f51dbe9244", "<KEY>", "1f4ade7d395e4a5181ad8aaf5aafa47c", "<KEY>", "<KEY>", "9de1eef4b6844e4c89c43c02da6ccac9", "<KEY>", "<KEY>", "<KEY>", "7e7a476eb21d4bfe876a95ca0cb55acc", "<KEY>", "020a29ce13834f529c0ff4413ad9def9", "<KEY>", "<KEY>", "<KEY>", "802683ba720b4642b6d7b512e8f3d886", "<KEY>", "e2214e5536f8436bbd5edbacfeefc4a8", "<KEY>", "01ca5d0c1cb2464b90c7a7850d9a04cc", "<KEY>", "<KEY>", "476b396842b142d8974055a406f0c0ed", "948cb7e07c3748409ff476f26dfee4b1", "ecc4726afe9645cdb0e8084c3bd8ec7b", "646b61a1e93948e69f9a54fa04d15e5e", "f0c4c7a953984ddbb50ea7e65c8368ed", "<KEY>", "<KEY>", "b87cc9a34d164e67929ef312b0ff2d7e", "5570bb1b2d1e4be7b1d290fad9f02129", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "089532a748d442278c07dbc20e9a3b19", "b946ef764db74382ac433715a83cf3d1", "fb16db6bec1d44269c1331e1f00dddee", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "ce875cf2a73a467186eb57e1b6a61b39", "f108691971ca4c678ae04b5dd72a49ca", "<KEY>", "cedf696fe6c24ec59bf83e7e157a52d1", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "9806e9c55ede4689a57f2581799c0e8c", "<KEY>", "<KEY>", "8febc215ca1342b29a5d31c8acb15f1a", "953f01194a2d4f89a554c18647adfc68", "869fdfc02cee4e0bb70f5557f450a332", "<KEY>", "6d5dd57121514481907f4ebede5402a9", "<KEY>", "1d2b62c4b7b94290840e23a527c007e9", "b59d40b604d0490d82b6e29c01e7327d", "4dfe83d394134615adba5fbe070ddc64", "<KEY>", "<KEY>", "09def4a5570f459ca2aae3b54678a636", "<KEY>", "c56fa802d172409eba530f3118df9c72", "<KEY>", "<KEY>", "de12c50fe7ab4087a681e4c938c784ad", "<KEY>", "<KEY>", "<KEY>", "9554ef08f878473899b95f23ada5c925", "<KEY>", "<KEY>", "40a319dcdcc14918a69f34dad7306678", "<KEY>", "<KEY>", "6767d1004f8f456bbd363a4dfa4bfe26", "b80ed8d0687a4cd2ac8da0d0f9f6dda2", "2ee5c780813c4f71ba975eb986ef24fc", "fa58a56d523241eba7945eaf946584bc", "810158461f0e4a7dbef5b87d82fd0405"]}
model_trainer = Trainer(model, CroppedImagesPath, df_train, df_val)
model_trainer.start()
# + id="PKOpfAxxnvxL" colab_type="code" outputId="492e00f9-09e4-45b1-e020-26979a5b5b64" colab={"base_uri": "https://localhost:8080/", "height": 1000}
model.eval()
# + id="VylGPyuNBuHA" colab_type="code" colab={}
| HeadPoseEstimation_V4.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Make a theta-square plot
#
# This is a basic example to analyze some events and make a $\Theta^2$ plot
# %matplotlib inline
# +
from astropy import units as u
from astropy.coordinates.angle_utilities import angular_separation
from astropy.coordinates import SkyCoord, AltAz
import matplotlib.pyplot as plt
import numpy as np
from ctapipe.io import event_source
from ctapipe.visualization import CameraDisplay
from ctapipe.instrument import CameraGeometry
from ctapipe.calib import CameraCalibrator
from ctapipe.reco import HillasReconstructor
from ctapipe.image import hillas_parameters, tailcuts_clean
from ctapipe.utils import datasets
# -
# Get source events in MC dataset. Here we stop at 10 events, just to make this example run fast, but for real use, one would need more statistics.
filename = datasets.get_dataset_path("gamma_test_large.simtel.gz")
source = event_source(filename, allowed_tels={1, 2, 3, 4}, max_events=10)
reco = HillasReconstructor()
calib = CameraCalibrator(subarray=source.subarray)
horizon_frame = AltAz()
off_angles = []
for event in source:
# calibrating the event
calib(event)
hillas_params = {}
# pointing direction of the telescopes
telescope_pointings = {}
subarray = event.inst.subarray
# get hillas params for each event in different telescopes
for tel_id in event.dl0.tels_with_data:
# telescope pointing direction
telescope_pointings[tel_id] = SkyCoord(
alt=event.mc.tel[tel_id].altitude_raw * u.rad,
az=event.mc.tel[tel_id].azimuth_raw * u.rad,
frame=horizon_frame
)
# Camera Geometry required for hillas parametrization
camgeom = subarray.tel[tel_id].camera.geometry
# note the [0] is for channel 0 which is high-gain channel
image = event.dl1.tel[tel_id].image
# Cleaning of the image
cleaned_image = image
# create a clean mask of pixels above the threshold
cleanmask = tailcuts_clean(
camgeom, image, picture_thresh=10, boundary_thresh=5
)
# set all rejected pixels to zero
cleaned_image[~cleanmask] = 0
# Calulate hillas parameters
# It fails for empty pixels
try:
hillas = hillas_parameters(camgeom, cleaned_image)
except Exception as e:
print(e)
pass
if hillas.width.value > 0:
hillas_params[tel_id] = hillas
if len(hillas_params) < 2:
continue
array_pointing = SkyCoord(
az=event.mcheader.run_array_direction[0],
alt=event.mcheader.run_array_direction[1],
frame=horizon_frame
)
reco_result = reco.predict(hillas_params, event.inst, array_pointing, telescope_pointings)
# get angular offset between reconstructed shower direction and MC
# generated shower direction
off_angle = angular_separation(event.mc.az, event.mc.alt, reco_result.az, reco_result.alt)
# Appending all estimated off angles
off_angles.append(off_angle.to(u.deg).value)
# calculate theta square for angles which are not nan
off_angles = np.array(off_angles)
thetasquare = off_angles[np.isfinite(off_angles)]**2
# ## Plot the results
plt.hist(thetasquare, bins=10, range=[0,0.4])
plt.xlabel(r'$\theta^2$ (deg)')
plt.ylabel("# of events")
plt.show()
# again, this plot is not beautiful since we have such low stats
| docs/tutorials/theta_square.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#read car-parts data
df_car=pd.read_csv('../data/out.csv')
df_car=df_car.drop(df_car.columns[0], axis=1)
#read dent prediction data
df_dent=pd.read_csv('dent_data_out.csv')
df_car=df_car.rename(columns={"name": "image_name"})
df_car.head()
df_dent.head()
df = pd.concat([df_car, df_dent], axis=1, sort=False)
df.head(10)
df['precision']=df['tp']/(df['tp']+df['fp'])
df['recall']=df['tp']/(df['tp']+df['fn'])
df['f1-score']=2*(df['precision']*df['recall'])/ (df['precision']+df['recall'])
df=df.fillna(0)
result_num_parts = df.groupby('num').mean()[['precision','recall','f1-score']]
result_num_parts['num_parts']=result_num_parts.index
result_num_parts
num_hist
# +
fig = plt.figure(figsize = (10, 5))
num_hist=df['num'].value_counts().sort_index()
plt.bar(num_hist.index.tolist(), num_hist.values.tolist())
plt.xlabel("Number of car-parts")
plt.ylabel("Counts")
plt.title("Number of car-parts vs Counts")
plt.show()
# -
num_hist.values
# +
fig = plt.figure(figsize = (10, 5))
# creating the bar plot
plt.bar(result_num_parts['num_parts'], result_num_parts['precision'], color ='blue',
width = 0.4)
plt.xlabel("Number of car-parts")
plt.ylabel("Precision")
plt.title("Number of car-parts vs Precision")
plt.show()
# +
fig = plt.figure(figsize = (10, 5))
# creating the bar plot
plt.bar(result_num_parts['num_parts'], result_num_parts['recall'], color ='blue',
width = 0.4)
plt.xlabel("Number of car-parts")
plt.ylabel("Recall")
plt.title("Number of car-parts vs Recall")
plt.show()
# +
fig = plt.figure(figsize = (10, 5))
# creating the bar plot
plt.bar(result_num_parts['num_parts'], result_num_parts['f1-score'], color ='blue',
width = 0.4)
plt.xlabel("Number of car-parts")
plt.ylabel("Recall")
plt.title("Number of car-parts vs f1-score")
plt.show()
# -
| demo/error-analyis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="GkcyolF9AE8B"
# # Create Sub-saharan Africa polygon for sampling coordinates
#
# ## Setup and load data
# + colab={"base_uri": "https://localhost:8080/"} id="OZgGR8Sn19Zu" executionInfo={"status": "ok", "timestamp": 1622237016992, "user_tz": 240, "elapsed": 3385, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiGHdh6BtwcVBOlBTfpiwHfHij6pb9p8XlIjneTpg=s64", "userId": "11790179745156165663"}} outputId="397b7168-47a9-403f-f377-d414d65d9282"
from google.colab import files, drive
drive.mount('/content/drive')
# ! pip install geopandas
# + colab={"base_uri": "https://localhost:8080/"} id="kZZohwDW3Yr9" executionInfo={"status": "ok", "timestamp": 1622237016993, "user_tz": 240, "elapsed": 14, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiGHdh6BtwcVBOlBTfpiwHfHij6pb9p8XlIjneTpg=s64", "userId": "11790179745156165663"}} outputId="fe39c111-1311-429f-945d-a7af22d19064"
# ! ls drive/MyDrive/detecting-poverty/data/shapes
# + colab={"base_uri": "https://localhost:8080/"} id="scPSXf-K2PMj" executionInfo={"status": "ok", "timestamp": 1622237017299, "user_tz": 240, "elapsed": 313, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiGHdh6BtwcVBOlBTfpiwHfHij6pb9p8XlIjneTpg=s64", "userId": "11790179745156165663"}} outputId="16cb1256-9123-4eba-c3c7-f88115d337ca"
# ! mkdir data
# ! cp drive/MyDrive/detecting-poverty/data/shapes data/. -r
# ! ls data/shapes
# + id="i2JK2URb3Ffl" executionInfo={"status": "ok", "timestamp": 1622242500166, "user_tz": 240, "elapsed": 134, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiGHdh6BtwcVBOlBTfpiwHfHij6pb9p8XlIjneTpg=s64", "userId": "11790179745156165663"}}
import geopandas as gpd
from shapely.geometry import Point, box, mapping, shape, MultiPolygon, Polygon
import json
# + colab={"base_uri": "https://localhost:8080/"} id="PLyGbSaqLyJx" executionInfo={"status": "ok", "timestamp": 1622242441745, "user_tz": 240, "elapsed": 110, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiGHdh6BtwcVBOlBTfpiwHfHij6pb9p8XlIjneTpg=s64", "userId": "11790179745156165663"}} outputId="03b46a9d-8e93-4b2a-d2c5-86aac8dd0d30"
MultiPolygon
# + colab={"base_uri": "https://localhost:8080/"} id="oavBirex4TU7" executionInfo={"status": "ok", "timestamp": 1622237017300, "user_tz": 240, "elapsed": 10, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiGHdh6BtwcVBOlBTfpiwHfHij6pb9p8XlIjneTpg=s64", "userId": "11790179745156165663"}} outputId="b13f3366-a8aa-4078-b768-a96c1c8f8c65"
raw_countries = gpd.read_file('data/shapes')
print(raw_countries.shape)
raw_countries.columns
# + colab={"base_uri": "https://localhost:8080/", "height": 349} id="6gJ6mwnG4p5g" executionInfo={"status": "ok", "timestamp": 1622237017301, "user_tz": 240, "elapsed": 9, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiGHdh6BtwcVBOlBTfpiwHfHij6pb9p8XlIjneTpg=s64", "userId": "11790179745156165663"}} outputId="f45112ae-ccc8-47b7-c74e-eeeda0bd6da3"
raw_countries.sample(3)
# + colab={"base_uri": "https://localhost:8080/", "height": 227} id="2xDTyakX4-16" executionInfo={"status": "ok", "timestamp": 1622237018547, "user_tz": 240, "elapsed": 1253, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiGHdh6BtwcVBOlBTfpiwHfHij6pb9p8XlIjneTpg=s64", "userId": "11790179745156165663"}} outputId="cc3033dc-1473-4832-db4a-39a85c3973d2"
raw_countries.plot()
# + [markdown] id="zS6VAAVvAUuo"
# ## Subset shapes and dissolve to single polygon
# + id="BvK_oukXyQRQ" executionInfo={"status": "ok", "timestamp": 1622237508278, "user_tz": 240, "elapsed": 183, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiGHdh6BtwcVBOlBTfpiwHfHij6pb9p8XlIjneTpg=s64", "userId": "11790179745156165663"}}
# from
# https://heart.bmj.com/content/heartjnl/early/2018/03/13/heartjnl-2017-312384/DC1/embed/inline-supplementary-material-1.pdf?download=true
subsaharan_countries = [
"AGO","BDI","BEN","BFA","BWA","CAF","CIV","CMR","COD","COG","COM","DJI",
"ETH","GAB","GHA","GIN","GMB","GNB","GNQ","KEN","LBR","LSO","MOZ",
"MWI","NAM","NGA","RWA","SEN","SLE","SOM","SWZ","TGO","TZA","UGA","ZAF",
"ZMB","ZWE","SSD"
]
# + id="BFnzzWlg3GwN" executionInfo={"status": "ok", "timestamp": 1622237146293, "user_tz": 240, "elapsed": 134, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiGHdh6BtwcVBOlBTfpiwHfHij6pb9p8XlIjneTpg=s64", "userId": "11790179745156165663"}}
raw_countries.set_index("isoA3", inplace=True)
# + colab={"base_uri": "https://localhost:8080/", "height": 282} id="bN0uCu9l3muP" executionInfo={"status": "ok", "timestamp": 1622237511046, "user_tz": 240, "elapsed": 356, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiGHdh6BtwcVBOlBTfpiwHfHij6pb9p8XlIjneTpg=s64", "userId": "11790179745156165663"}} outputId="3809b578-d9fb-4482-f886-6b24f8fd7afc"
raw_countries.loc[subsaharan_countries,"geometry"].plot()
# + id="IoezbvOL5Tj_" executionInfo={"status": "ok", "timestamp": 1622237877590, "user_tz": 240, "elapsed": 108, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiGHdh6BtwcVBOlBTfpiwHfHij6pb9p8XlIjneTpg=s64", "userId": "11790179745156165663"}}
subsahara_shape = raw_countries.loc[subsaharan_countries,"geometry"].unary_union
# + colab={"base_uri": "https://localhost:8080/", "height": 121} id="6fjmW5kb4h1O" executionInfo={"status": "ok", "timestamp": 1622237943673, "user_tz": 240, "elapsed": 184, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiGHdh6BtwcVBOlBTfpiwHfHij6pb9p8XlIjneTpg=s64", "userId": "11790179745156165663"}} outputId="38af29d6-ccbd-402c-8926-a6a40e4ad4dd"
subsahara_shape
# + colab={"base_uri": "https://localhost:8080/"} id="Ae215KmV7HvP" executionInfo={"status": "ok", "timestamp": 1622238185312, "user_tz": 240, "elapsed": 127, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiGHdh6BtwcVBOlBTfpiwHfHij6pb9p8XlIjneTpg=s64", "userId": "11790179745156165663"}} outputId="ea262c55-e41d-45f7-c851-37697b3aeaa2"
subsahara_shape.contains(Point(20,0))
# + colab={"base_uri": "https://localhost:8080/"} id="J8QgSUxh7p3y" executionInfo={"status": "ok", "timestamp": 1622239053462, "user_tz": 240, "elapsed": 111, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiGHdh6BtwcVBOlBTfpiwHfHij6pb9p8XlIjneTpg=s64", "userId": "11790179745156165663"}} outputId="4030893d-16a0-42da-98ea-a4813aa35ad3"
minx, miny, maxx, maxy = subsahara_shape.bounds
print(minx, miny, maxx, maxy)
# + [markdown] id="CTWoSJEjA3hl"
# ## Save shape
# + id="4C-Hbdyc8iEP" executionInfo={"status": "ok", "timestamp": 1622239289848, "user_tz": 240, "elapsed": 133, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiGHdh6BtwcVBOlBTfpiwHfHij6pb9p8XlIjneTpg=s64", "userId": "11790179745156165663"}}
gpd.GeoSeries([subsahara_shape]).to_file('drive/MyDrive/detecting-poverty/data/subsahara.geojson', driver='GeoJSON')
# + colab={"base_uri": "https://localhost:8080/"} id="nDBYuS1HJWsQ" executionInfo={"status": "ok", "timestamp": 1622247402646, "user_tz": 240, "elapsed": 128, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiGHdh6BtwcVBOlBTfpiwHfHij6pb9p8XlIjneTpg=s64", "userId": "11790179745156165663"}} outputId="054809aa-e9b8-45d1-f5ab-7ac88b517256"
type(subsahara_shape)
# + id="P3Z9HDfLIYNd" executionInfo={"status": "ok", "timestamp": 1622247410723, "user_tz": 240, "elapsed": 121, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiGHdh6BtwcVBOlBTfpiwHfHij6pb9p8XlIjneTpg=s64", "userId": "11790179745156165663"}}
output = {
"bbox" : [[minx, miny], [maxx, maxy]],
"shape" : mapping(subsahara_shape)
}
with open("drive/MyDrive/detecting-poverty/data/subsahara_shapes.json", "w") as f:
json.dump(output, fp=f)
# + colab={"base_uri": "https://localhost:8080/"} id="wNkyEAnLKTkS" executionInfo={"status": "ok", "timestamp": 1622242734227, "user_tz": 240, "elapsed": 111, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiGHdh6BtwcVBOlBTfpiwHfHij6pb9p8XlIjneTpg=s64", "userId": "11790179745156165663"}} outputId="26002a69-b3a3-4d72-dd5a-dda3c6e55b51"
with open("drive/MyDrive/detecting-poverty/data/subsahara_shapes.json", "r") as f:
subsahara_test = json.load(f)
shape_test = subsahara_test['shape']
(minx, miny), (maxx, maxy) = subsahara_test['bbox']
print((minx, miny), (maxx, maxy))
# + colab={"base_uri": "https://localhost:8080/", "height": 121} id="iZXM6332M8da" executionInfo={"status": "ok", "timestamp": 1622242969286, "user_tz": 240, "elapsed": 197, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiGHdh6BtwcVBOlBTfpiwHfHij6pb9p8XlIjneTpg=s64", "userId": "11790179745156165663"}} outputId="77cfed7c-56e1-4842-dfd6-5a6ad3a1b9b7"
shape(shape_test)
# + [markdown] id="NQBIhYSzA5wj"
# ## Sampling sample
# + id="0JxXd3-MBDa5" executionInfo={"status": "ok", "timestamp": 1622239635450, "user_tz": 240, "elapsed": 4, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiGHdh6BtwcVBOlBTfpiwHfHij6pb9p8XlIjneTpg=s64", "userId": "11790179745156165663"}}
import random
# + colab={"base_uri": "https://localhost:8080/"} id="hYtY26f0BGcr" executionInfo={"status": "ok", "timestamp": 1622240199810, "user_tz": 240, "elapsed": 117, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GiGHdh6BtwcVBOlBTfpiwHfHij6pb9p8XlIjneTpg=s64", "userId": "11790179745156165663"}} outputId="396ea8e4-157b-4068-9ecb-cacf63790901"
x = random.uniform(minx,maxx)
y = random.uniform(miny,maxy)
subsahara_shape.contains(Point(x, y))
| preprocessing/generate-points/create-subsaharan-africa-shape.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Before your start:
# - Read the README.md file
# - Comment as much as you can and use the resources in the README.md file
# - Happy learning!
# +
# Import your libraries:
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
pd.set_option('display.max_columns', None)
pd.set_option('display.max_rows', None)
# -
# # Challenge 1 - Import and Describe the Dataset
#
# In this lab, we will use a dataset containing information about customer preferences. We will look at how much each customer spends in a year on each subcategory in the grocery store and try to find similarities using clustering.
#
# The origin of the dataset is [here](https://archive.ics.uci.edu/ml/datasets/wholesale+customers).
# loading the data:
wholesale = pd.read_csv('c:/Users/denis/Desktop/Ironhack/ironhackLabs/module-3/Supervised-Learning/data/Wholesale.csv')
wholesale.head()
wholesale.dtypes
wholesale['Channel'] = wholesale["Channel"].astype('category')
wholesale['Region'] = wholesale["Region"].astype('category')
wholesale.isnull().sum()
wholesale.describe()
correlation = wholesale.corr()
plt.figure(figsize=(20,10))
plt.title('Correlation Heat Map')
sns.heatmap(correlation,vmax=1,annot=True)
wholesale.hist(bins=40, figsize=(20,20))
plt.show()
# +
# Need to remove outliers because they exist
# Need to remove column detergent paper
# -
# #### Explore the dataset with mathematical and visualization techniques. What do you find?
#
# Checklist:
#
# * What does each column mean?
# * Any categorical data to convert?
# * Any missing data to remove?
# * Column collinearity - any high correlations?
# * Descriptive statistics - any outliers to remove?
# * Column-wise data distribution - is the distribution skewed?
# * Etc.
#
# Additional info: Over a century ago, an Italian economist named <NAME> discovered that roughly 20% of the customers account for 80% of the typical retail sales. This is called the [Pareto principle](https://en.wikipedia.org/wiki/Pareto_principle). Check if this dataset displays this characteristic.
# +
# # Your code here:
# # wholesale['cumulative_sum'] = wholesale.total.cumsum()
# # wholesale["cumpercentage"] =
# a = ['Fresh','Milk','Grocery','Detergents_Paper','Frozen','Delicassen']
# # for x in wholesale[a]:
# wholesale['pareto'] = 100 *wholesale['Fresh'].cumsum() / wholesale['Fresh'].sum()
# fig, axes = plt.subplots()
# ax1 = wholesale.plot(use_index=True, y='Fresh', kind='bar', ax=axes)
# ax2 = wholesale.plot(use_index=True, y='pareto', marker='D', color="C1", kind='line', ax=axes, secondary_y=True)
# ax2.set_ylim([0,110])
# # wholesale['cumulative_perc'] = 100*wholesale.cumulative_sum/wholesale.total.sum()
# # from paretochart import pareto
# # fig, axes = plt.subplots(2, 2)
# # pareto(wholesale, axes=axes[0, 0])
# # plt.title('Basic chart without labels', fontsize=10)
# -
wholesale['Fresh'].value_counts()
# +
# Your observations here
# -
# # Challenge 2 - Data Cleaning and Transformation
#
# If your conclusion from the previous challenge is the data need cleaning/transformation, do it in the cells below. However, if your conclusion is the data need not be cleaned or transformed, feel free to skip this challenge. But if you do choose the latter, please provide rationale.
# +
# Your code here
# Need to remove outliers because they exist
df = wholesale.drop(['Channel', 'Region'], axis=1)
Q1 = df.quantile(0.25)
Q3 = df.quantile(0.75)
IQR = Q3 - Q1
df1 = df[~((df < (Q1 - 1.5 * IQR)) |(df > (Q3 + 1.5 * IQR))).any(axis=1)]
# -
df1.hist(bins=40, figsize=(20,20))
plt.show()
# Your comment here
# Need to remove column detergent paper
wholesale = wholesale.drop(['Detergents_Paper'], axis=1)
# # Challenge 3 - Data Preprocessing
#
# One problem with the dataset is the value ranges are remarkably different across various categories (e.g. `Fresh` and `Grocery` compared to `Detergents_Paper` and `Delicassen`). If you made this observation in the first challenge, you've done a great job! This means you not only completed the bonus questions in the previous Supervised Learning lab but also researched deep into [*feature scaling*](https://en.wikipedia.org/wiki/Feature_scaling). Keep on the good work!
#
# Diverse value ranges in different features could cause issues in our clustering. The way to reduce the problem is through feature scaling. We'll use this technique again with this dataset.
#
# #### We will use the `StandardScaler` from `sklearn.preprocessing` and scale our data. Read more about `StandardScaler` [here](https://scikit-learn.org/stable/modules/generated/sklearn.preprocessing.StandardScaler.html#sklearn.preprocessing.StandardScaler).
#
# *After scaling your data, assign the transformed data to a new variable `customers_scale`.*
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
df1[['Fresh','Milk','Grocery','Frozen','Delicassen']] = scaler.fit_transform(df1[['Fresh','Milk','Grocery','Frozen','Delicassen']].to_numpy())
df1
# +
# pd.DataFrame(scaler.fit_transform(df1[['Fresh','Milk','Grocery','Frozen','Delicassen']]))
# -
# # Challenge 3 - Data Clustering with K-Means
#
# Now let's cluster the data with K-Means first. Initiate the K-Means model, then fit your scaled data. In the data returned from the `.fit` method, there is an attribute called `labels_` which is the cluster number assigned to each data record. What you can do is to assign these labels back to `customers` in a new column called `customers['labels']`. Then you'll see the cluster results of the original data.
# +
# Your code here:
from sklearn.cluster import KMeans
kmeans = KMeans(n_clusters=3)
bread_clusters = kmeans.fit(df1)
print(bread_clusters.cluster_centers_)
df1['labels'] = bread_clusters.fit_predict(df1)
# df1.reset_index(inplace=True)
df1.head(20)
# -
kmeans = KMeans(n_clusters=3)
bread_clusters = kmeans.fit(df1)
# Count the values in `labels`.
df1 = df1.drop(['index'], axis=1)
# Your code here:
df1.labels.value_counts()
# # Challenge 4 - Data Clustering with DBSCAN
#
# Now let's cluster the data using DBSCAN. Use `DBSCAN(eps=0.5)` to initiate the model, then fit your scaled data. In the data returned from the `.fit` method, assign the `labels_` back to `customers['labels_DBSCAN']`. Now your original data have two labels, one from K-Means and the other from DBSCAN.
# +
# Your code here
import matplotlib.pyplot as plt
from sklearn.cluster import DBSCAN
# %matplotlib inline
dbscan = DBSCAN(eps=0.5, min_samples=10).fit(df1.loc[:, df1.columns != 'labels'])
dbscan.labels_
df1['labels_DBSCAN'] = dbscan.labels_
df1
# moons = make_moons(df1.loc[:, df1.columns != 'labels'], shuffle=True, noise=0.5)
# plt.scatter(moons[0][:,0], moons[0][:,1])
# -
# Count the values in `labels_DBSCAN`.
# Your code here
df1['labels_DBSCAN'].value_counts()
# # Challenge 5 - Compare K-Means with DBSCAN
#
# Now we want to visually compare how K-Means and DBSCAN have clustered our data. We will create scatter plots for several columns. For each of the following column pairs, plot a scatter plot using `labels` and another using `labels_DBSCAN`. Put them side by side to compare. Which clustering algorithm makes better sense?
#
# Columns to visualize:
#
# * `Detergents_Paper` as X and `Milk` as y
# * `Grocery` as X and `Fresh` as y
# * `Frozen` as X and `Delicassen` as y
# Visualize `Detergents_Paper` as X and `Milk` as y by `labels` and `labels_DBSCAN` respectively
# +
# Your code here:
# can't I dropped Detergents as redundant
# -
# Visualize `Grocery` as X and `Fresh` as y by `labels` and `labels_DBSCAN` respectively
# Your code here:
ax = sns.scatterplot(x="Grocery", y="Fresh", hue="labels",data=df1)
ax = sns.scatterplot(x="Grocery", y="Fresh", hue="labels_DBSCAN",data=df1)
# Visualize `Frozen` as X and `Delicassen` as y by `labels` and `labels_DBSCAN` respectively
# Your code here:
ax = sns.scatterplot(x="Frozen", y="Delicassen", hue="labels",data=df1)
ax = sns.scatterplot(x="Frozen", y="Delicassen", hue="labels_DBSCAN",data=df1)
# Let's use a groupby to see how the mean differs between the groups. Group `customers` by `labels` and `labels_DBSCAN` respectively and compute the means for all columns.
# Your code here:
df1.groupby('labels').mean()
df1.groupby('labels_DBSCAN').mean()
# Which algorithm appears to perform better?
# +
# Your observations here
# it seems that dbscan managed to group items by mean a bit better than kmeans
# -
# # Bonus Challenge 2 - Changing K-Means Number of Clusters
#
# As we mentioned earlier, we don't need to worry about the number of clusters with DBSCAN because it automatically decides that based on the parameters we send to it. But with K-Means, we have to supply the `n_clusters` param (if you don't supply `n_clusters`, the algorithm will use `8` by default). You need to know that the optimal number of clusters differs case by case based on the dataset. K-Means can perform badly if the wrong number of clusters is used.
#
# In advanced machine learning, data scientists try different numbers of clusters and evaluate the results with statistical measures (read [here](https://en.wikipedia.org/wiki/Cluster_analysis#External_evaluation)). We are not using statistical measures today but we'll use our eyes instead. In the cells below, experiment with different number of clusters and visualize with scatter plots. What number of clusters seems to work best for K-Means?
# +
# Your code here
kmeans = KMeans(n_clusters=8)
bread_clusters = kmeans.fit(df1)
bread_clusters.cluster_centers_
df1['labels'] = bread_clusters.fit_predict(df1)
# df1.reset_index(inplace=True)
# df1.head(20)
# -
df1.groupby('labels').mean()
# Your comment here
ax = sns.scatterplot(x="Frozen", y="Delicassen", hue="labels",data=df1)
# # Bonus Challenge 3 - Changing DBSCAN `eps` and `min_samples`
#
# Experiment changing the `eps` and `min_samples` params for DBSCAN. See how the results differ with scatter plot visualization.
# +
# Your code here
dbscan = DBSCAN(eps=0.8, min_samples=10).fit(df1.loc[:, df1.columns != 'labels'])
dbscan.labels_
df1['labels_DBSCAN'] = dbscan.labels_
# df1
# -
# Your comment here
ax = sns.scatterplot(x="Grocery", y="Fresh", hue="labels_DBSCAN",data=df1)
df1.groupby('labels_DBSCAN').mean()
| module-3/Unsupervised-Learning/your-code/main.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: tf2
# language: python
# name: tf2
# ---
# +
from tensorflow.keras.models import Sequential, Model, load_model
import os
import pickle
import numpy as np
import pandas as pd
import scipy.sparse as sp
import scipy.io as spio
import matplotlib.pyplot as plt
from scrambler.models import *
from scrambler.utils import OneHotEncoder, get_sequence_masks
from scrambler.visualizations import plot_dna_logo, plot_dna_importance_scores
from apa_utils import load_apa_data, load_apa_predictor_cleavage_logodds, animate_apa_examples
# +
#Load APA data and predictor
encoder = OneHotEncoder(seq_length=205, channel_map={'A' : 0, 'C' : 1, 'G' : 2, 'T' : 3})
data_path = 'apa_doubledope_cached_set.csv'
x_train, y_train, x_test, y_test = load_apa_data(data_path, encoder)
predictor_path = 'saved_models/aparent_plasmid_iso_cut_distalpas_all_libs_no_sampleweights_sgd.h5'
predictor = load_apa_predictor_cleavage_logodds(predictor_path)
# +
#Print predictor input/output details
print("predictor.inputs = " + str(predictor.inputs))
print("predictor.outputs = " + str(predictor.outputs))
# +
#Define sequence template and background
sequence_template = 'CTTCCGATCT$$$$$$$$$$$$$$$$$$$$CATTACTCGCATCCA$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$CAGCCAATTAAGCC$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$CTAC'
pseudo_count = 1.0
onehot_template = encoder(sequence_template)[None, ...]
sequence_mask = get_sequence_masks([sequence_template])[0]
x_mean = (np.sum(x_train, axis=(0, 1)) + pseudo_count) / (x_train.shape[0] + 4. * pseudo_count)
# +
#Visualize background sequence distribution
plot_dna_logo(np.copy(x_mean), sequence_template=sequence_template, figsize=(14, 0.65), logo_height=1.0, plot_start=0, plot_end=205)
# +
#Calculate mean training set kl-divergence against background
x_train_clipped = np.clip(np.copy(x_train[:, 0, :, :]), 1e-8, 1. - 1e-8)
kl_divs = np.sum(x_train_clipped * np.log(x_train_clipped / np.tile(np.expand_dims(x_mean, axis=0), (x_train_clipped.shape[0], 1, 1))), axis=-1) / np.log(2.0)
x_mean_kl_divs = np.sum(kl_divs * sequence_mask, axis=-1) / np.sum(sequence_mask)
x_mean_kl_div = np.mean(x_mean_kl_divs)
print("Mean KL Div against background (bits) = " + str(x_mean_kl_div))
# +
#For the sake of the example, lets transform x to a 1d shape
x_train = x_train[:, 0, ...]
x_test = x_test[:, 0, ...]
print(x_train.shape)
print(x_test.shape)
# +
#Create extra inputs that the predictor model expects
feat_1_train = np.zeros((x_train.shape[0], 13))
feat_1_test = np.zeros((x_test.shape[0], 13))
feat_1_train[:, 4] = 1.
feat_1_test[:, 4] = 1.
feat_2_train = np.ones((x_train.shape[0], 1))
feat_2_test = np.ones((x_test.shape[0], 1))
print(feat_1_train.shape)
print(feat_2_train.shape)
# +
#Scrambler network configuration
network_config = {
'n_groups' : 5,
'n_resblocks_per_group' : 4,
'n_channels' : 32,
'window_size' : 3,
'dilation_rates' : [1, 2, 4, 2, 1],
'drop_rate' : 0.0,
'norm_mode' : 'instance',
'mask_smoothing' : True,
'mask_smoothing_window_size' : 5,
'mask_smoothing_std' : 1.,
'mask_drop_scales' : [1, 5],
'mask_min_drop_rate' : 0.0,
'mask_max_drop_rate' : 0.5,
'label_input' : False
}
# +
#Train scrambler(s) to maximize cleavage logodds at different positions
save_dir = 'saved_models'
for cut_pos in [0, 1, 2, 3] :
print("Training scrambler for cleavage position = " + str(cut_pos) + ".")
#Initialize scrambler
scrambler = Scrambler(
scrambler_mode='inclusion',
input_size_x=None,
input_size_y=205,
n_out_channels=4,
n_classes=4,
input_templates=[onehot_template],
input_backgrounds=[x_mean],
batch_size=32,
n_samples=32,
sample_mode='gumbel',
zeropad_input=False,
mask_dropout=False,
network_config=network_config
)
#y_pred_scrambled.shape = (batch_size, n_samples, n_classes)
def maximize_cleavage_logodds(y_pred_non_scrambled, y_pred_scrambled, cut_pos=cut_pos) :
return -K.mean(y_pred_scrambled[..., cut_pos], axis=-1)
n_epochs = 10
_ = scrambler.train(
predictor,
x_train,
y_train,
x_test,
y_test,
n_epochs,
extra_input_train=[feat_1_train, feat_2_train],
extra_input_test=[feat_1_test, feat_2_test],
monitor_test_indices=None,
custom_loss_func=maximize_cleavage_logodds,
entropy_mode='target',
entropy_bits=0.1,
entropy_weight=20.
)
#Save scrambler checkpoint
model_name = 'apa_inclusion_scrambler_smooth_target_bits_01_epochs_10_deeper_cut_pos_' + str(cut_pos)
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
model_path = os.path.join(save_dir, model_name + '.h5')
scrambler.save_model(model_path)
# +
#Load models and interpret test patterns for all cleavage positions
save_dir = 'saved_models'
pwm_test = []
sample_test = []
importance_scores_test = []
for cut_pos in [0, 1, 2, 3] :
print("Interpreting for cleavage position = " + str(cut_pos) + ".")
model_name = 'apa_inclusion_scrambler_smooth_target_bits_01_epochs_10_deeper_cut_pos_' + str(cut_pos)
model_path = os.path.join(save_dir, model_name + '.h5')
scrambler.load_model(model_path)
#Interpret the test set using the trained scrambler
pwm_t, sample_t, importance_scores_t = scrambler.interpret(x_test)
pwm_test.append(pwm_t[None, ...])
sample_test.append(sample_t[None, ...])
importance_scores_test.append(importance_scores_t[None, ...])
pwm_test = np.concatenate(pwm_test, axis=0)
sample_test = np.concatenate(sample_test, axis=0)
importance_scores_test = np.concatenate(importance_scores_test, axis=0)
# +
#Visualize a few reconstructed sequence patterns
importance_scores_test *= sequence_mask[None, None, :, None]
plot_examples = [3, 5, 6]
save_examples = []
cuts = [76 + 5, 76 + 15, 76 + 25, 76 + 35]
for test_ix in plot_examples :
print("Test sequence " + str(test_ix) + ":")
save_figs = False
if save_examples is not None and test_ix in save_examples :
save_figs = True
plot_dna_logo(x_test[test_ix, :, :], sequence_template=sequence_template, figsize=(14, 0.65), plot_start=0, plot_end=205, plot_sequence_template=True, save_figs=save_figs, fig_name=model_name + "_test_ix_" + str(test_ix) + "_orig_sequence")
#Plot interpretation PWM for each cleavage position
for cut_ix, cut_pos in enumerate([0, 1, 2, 3]) :
#Mark the position where we are maximizing cleavage
cut_template = 'N' * 205
cut_template = cut_template[:cuts[cut_ix]] + 'CCC' + cut_template[cuts[cut_ix]+1:]
plot_dna_logo(np.zeros((205, 4)), sequence_template=cut_template, figsize=(14, 0.65), plot_start=0, plot_end=205, plot_sequence_template=True, save_figs=save_figs, fig_name=model_name + "_test_ix_" + str(test_ix) + "_scrambld_pwm")
#Plot Scrambler interpretation (PWM)
plot_dna_logo(pwm_test[cut_ix, test_ix, :, :], sequence_template=sequence_template, figsize=(14, 0.65), plot_start=0, plot_end=205, plot_sequence_template=True, save_figs=save_figs, fig_name=model_name + "_test_ix_" + str(test_ix) + "_scrambld_pwm")
| examples/dna/scrambler_apa_example_custom_loss.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from tensorflow import keras
from tensorflow.keras.datasets import cifar10
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten
from tensorflow.keras.layers import Conv2D, MaxPooling2D
from tensorflow.keras.optimizers import RMSprop, Adam
import os
# +
batch_size = 128 # batch 的大小,如果出現 OOM error,請降低這個值
num_classes = 10 # 類別的數量,Cifar 10 共有 10 個類別
epochs = 10 # 訓練的 epochs 數量
# 讀取資料並檢視
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# 對 label 進行 one-hot encoding (y_trian 原本是純數字)
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
# -
# ## 首先我們使用一般的 DNN (MLP) 來訓練
# 由於 DNN 只能輸入一維的資料,我們要先將影像進行攤平,若 (50000, 32, 32, 3) 的影像,攤平後會變成 (50000, 32*32*3) = (50000, 3072)
# +
# 將資料攤平成一維資料
x_train = x_train.reshape(50000, 3072)
x_test = x_test.reshape(10000, 3072)
# 將資料變為 float32 並標準化
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# +
model = Sequential()
model.add(Dense(512, activation='relu', input_shape=(3072,)))
model.add(Dropout(0.2))
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(num_classes, activation='softmax'))
model.summary()
model.compile(loss='categorical_crossentropy',
optimizer=RMSprop(),
metrics=['accuracy'])
history = model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
# -
# ## 接下來我們使用 CNN 來訓練神經網路
# CNN 的原理非常適合處理影像類的資料,就讓我們來看看,同樣的訓練條件,CNN 是否顯著優於 DNN 呢?
# +
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
# +
model = Sequential()
model.add(Conv2D(32, (3, 3), padding='same',
input_shape=x_train.shape[1:]))
model.add(Activation('relu'))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes))
model.add(Activation('softmax'))
model.summary()
model.compile(loss='categorical_crossentropy',
optimizer=RMSprop(),
metrics=['accuracy'])
history = model.fit(x_train, y_train,
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test, y_test))
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
# -
# ## 同樣運算 10 個 epochs,但 CNN 在 test data 的準確率顯著優於 DNN!
| 2nd-ML100Days/homework/D-097/Day097_Keras_CNN_vs_DNN.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Lambdify in symbolic module
# ### Importing required modules
# +
import sympy
from sympy.abc import x, y
from sympy import symbols
from einsteinpy.symbolic import BaseRelativityTensor
sympy.init_printing()
# -
# ### Calculating a Base Relativity Tensor
syms = symbols("x y")
x, y = syms
T = BaseRelativityTensor([[x, 1],[0, x+y]], syms, config="ll")
# ### Calling the lambdify function
args, func = T.tensor_lambdify()
args
# `args` indicates the order in which arguments should be passed to the returned function `func`
#
# ### Executing the returned function for some value
func(2, 1)
| docs/source/examples/Lambdify symbolic calculation.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/BARARTH26/assignment/blob/master/Untitled5.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="_FKOVnbbzWuN" colab_type="text"
# Hi,My name is P.PERIYASAMY,From Coimbatore(TAMILNADU)
# + [markdown] id="uGqjUjgfz4pD" colab_type="text"
# ASSIGNMENT 1 - DAY 6
#
# Create a bank balance class for find the Deposit and Withdrawl Details.
#
#
# + id="CI2T82NyrLkF" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 119} outputId="df503952-a583-4ca5-970c-b9f91ff2854e"
class Bank_Balance():
def __init__(self,ownername,Available_balance):
self.ownername = ownername
self.Available_balance = Available_balance
def deposit(self,Amt_Deposit):
self.Amt_Deposit = Amt_Deposit
print("Ownername--> ",self.ownername)
print("Available Balance--> ",self.Available_balance)
print("Deposit_amt--> ",self.Amt_Deposit)
# Total_Balance = self.Available_balance+self.Amt_Deposit
print("Total balance --> ",self.Available_balance+self.Amt_Deposit)
def withdrawl(self,Initial_Amt,withdrawl_Amt):
self.withdrawl_Amt = withdrawl_Amt
self.Initial_amt = Initial_Amt
print("withdrawl Amount--> ",self.withdrawl_Amt)
self.a = (self.Available_balance+self.Amt_Deposit)-self.withdrawl_Amt
if self.a < 0:
print ("YOU ARE NOT GET THE MONEY BECAUSE YOUR BALANCE IS LOW")
print("Your Total Balance--> ",self.Available_balance+self.Amt_Deposit)
else:
print("Total_Balance-->",(self.Available_balance+self.Amt_Deposit)-self.withdrawl_Amt)
Account = Bank_Balance("barath",1000)
Account.deposit(0)
Account.withdrawl(0,1000)
# + [markdown] id="xjoJJpQr7vXq" colab_type="text"
# ASSIGNMENT 2 - DAY 6
#
# Create a cone class for find the Volume and Surface Area
#
#
# + id="tuC1_hGDpEGp" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 68} outputId="d6914363-e5b7-4f8b-ca1e-c1994a611b33"
class cone():
def __init__(self,Radius,Height):
self.Radius = Radius
self.Height = Height
def Volume(self):
print("VOLUME OF CONE --> ",3.14 * (self.Radius*self.Radius) *(self.Height/3))
def Surface_Area(self):
print("BASE OF CONE --> ",3.14 * (self.Radius*self.Radius))
print("SIDE OF CONE--> ",3.14 * self.Radius * (0.5**((self.Radius**2)+(self.Height**2))))
part_volume = cone(4,7)
part_volume.Volume()
part_Surface_Area= cone(3,5)
part_Surface_Area.Surface_Area()
# + id="kPCC2l2W2QE5" colab_type="code" colab={}
| Untitled5.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: venv
# language: python
# name: venv
# ---
# import libraries
import numpy as np
import pandas as pd
import tensorflow as tf
import matplotlib.pyplot as plt
# ## Load and prepare the data
# path to data locations
data_path_hours = "datasets/hourly_data.csv"
data_path_days = "datasets/daily_data.csv"
# upload data into pandas data frame
data_df = pd.read_csv(data_path_hours)
data_df
data_df.info()
# This dataset has the number of riders for each hour of each day from January 1 2011 to December 31 2012. The number of riders is split between casual and registered, summed up in the cnt column. You can see the first few rows of the data above.
#
# The data includes some categorical variables like season, weather, month. To feed those data to the neural network we need to One-Hot encoding. For this we can use get_dummies() function in pandas.
# +
from utils import one_hot_encode
categorical_fields = ["mnth", 'season', 'weathersit','hr' , 'weekday']
fields_to_drop = ['mnth', 'season', 'weathersit','hr' , 'weekday', 'instant', 'atemp']
data_df = one_hot_encode(data_df, categorical_fields, fields_to_drop)
# -
data_df.head()
data_columns = list(data_df.columns)
# viewing some data for one week period
data_df[:24*7].plot(x='dteday', y='cnt')
# +
from utils import train_test_split_df
# divide into training and testing dataframes
df_train, df_test = train_test_split_df(data_df)
# +
from utils import standardize
# scale data
columns_to_scale = ['cnt', 'temp','hum','windspeed']
df_train, scaler = standardize(df_train, columns_to_scale)
df_train.head(3)
# +
from utils import divide_train_target
# get x,y data for training and testing
target_fields = ['cnt']
date_fields = ['dteday', 'yr', 'casual', 'registered']
x, y = divide_train_target(df_train, data_columns, date_fields, target_fields)
y.shape
# +
from utils import train_validation_split
# split data to train and test
X_train, X_valid, y_train, y_valid = train_validation_split(x, y)
# -
y_valid.shape
X_train.shape
# ## Build the network
# +
###### building the tf dense model
model = tf.keras.models.Sequential([
tf.keras.layers.Input(shape=(56,)),
tf.keras.layers.Dense(256),
tf.keras.layers.Dense(128),
tf.keras.layers.Dense(64),
tf.keras.layers.Dense(1),
])
# WE USE SGD for this problem and specify lr and momentum
model.compile(optimizer=tf.keras.optimizers.SGD(0.001, 0.9), loss='mse')
def schedule(epoch, lr):
if epoch >= 10:
return 0.0001
else:
return 0.001
schedular = tf.keras.callbacks.LearningRateScheduler(schedule)
r = model.fit(X_train, y_train, epochs=20, validation_data=(X_valid,y_valid), callbacks =[schedular])
# -
plt.plot(r.history['loss'], label='Training loss')
plt.plot(r.history['val_loss'], label='Validation loss')
plt.legend()
model.predict(X_test)
| Bike_sharing_pred_notebook.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
# +
import tensorflow as tf
import numpy as np
tf.enable_eager_execution()
# -
X = np.load("/data/elsa/elsa_ja_X.npy", allow_pickle=True).astype(np.int32)
y = np.load("/data/elsa/elsa_ja_y.npy", allow_pickle=True).astype(np.int32)
train_dataset = tf.data.Dataset.from_tensor_slices((X, y))
train_dataset = train_dataset.batch(32)
train_dataset
train_dataset.output_types
iterator = train_dataset.make_one_shot_iterator()
iterator
next_data = iterator.get_next()
next_data[0].shape
with tf.python_io.TFRecordWriter("/data/elsa/test.tfrecords") as writer:
for idx in range(len(X)):
feature = {
"X": tf.train.Feature(int64_list=tf.train.Int64List(value=X[idx])),
"Y": tf.train.Feature(int64_list=tf.train.Int64List(value=y[idx]))
}
features = tf.train.Features(feature=feature)
example = tf.train.Example(features=features)
serialized = example.SerializeToString()
writer.write(serialized)
for serialized_example in tf.python_io.tf_record_iterator('/data/elsa/test.tfrecords'):
example = tf.train.Example()
example.ParseFromString(serialized_example)
x_1 = np.array(example.features.feature['X'].int64_list.value)
y_1 = np.array(example.features.feature['Y'].int64_list.value)
print(example)
break
raw_dataset = tf.data.TFRecordDataset(['/data/elsa/test.tfrecords'])
raw_dataset
for record in raw_dataset.take(3):
print(repr(record))
# +
feature_description = {
'X': tf.FixedLenFeature([], tf.int64, default_value=0),
'Y': tf.FixedLenFeature([], tf.int64, default_value=0),
}
def _parse_function(example_proto):
features = tf.parse_single_example(example_proto, feature_description)
X = tf.cast(features['X'], tf.int32)
Y = tf.cast(features['Y'], tf.int32)
return X, Y
# -
parsed_dataset = raw_dataset.map(_parse_function)
parsed_dataset
iterator = parsed_dataset.make_one_shot_iterator()
data = iterator.next()
for record in parsed_dataset.take(3):
print(record)
| elsa/np_tfrecord_example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # data preprocessing
# # step1: import libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# # step 2 import data set
dataset=pd.read_csv('Housing_data.csv')
dataset
# # step3: create feature matrix(X) and dependent variable vector(Y)
x=dataset.iloc[:,:-1].values
y=dataset.iloc[:,-1].values
x
y
# # step4: replacing missing data
from sklearn.impute import SimpleImputer
imputer=SimpleImputer(missing_values=np.nan,strategy='mean' )
imputer.fit(x[:,:])
x[:,:]=imputer.transform(x[:,:])
# # step5: encoding(not required)
# # step6: splitting to data set into training data set and testing data set¶
from sklearn.model_selection import train_test_split
xtrain,xtest,ytrain,ytest=train_test_split(x,y,test_size=0.2,random_state=1)
# # step7:feature scaling (not required)
# # stepB:build a multiple regression model
# # step8: training my linear model
from sklearn.linear_model import LinearRegression
regression=LinearRegression()
regression.fit(xtrain,ytrain)
yest=regression.predict(xtest)
print(yest)
print(ytest)
yest.reshape(len(yest),1)
ytest.reshape(len(ytest),1)
np.concatenate((yest.reshape(len(yest),1),ytest.reshape(len(ytest),1)),1)
# # coefficients of the regressor
regression.coef_
regression.intercept_
| Python-Week 4/26 august 2021 Day 14 multiple LR.ipynb |
% ---
% jupyter:
% jupytext:
% text_representation:
% extension: .m
% format_name: light
% format_version: '1.5'
% jupytext_version: 1.14.4
% kernelspec:
% display_name: Matlab
% language: matlab
% name: matlab
% ---
% +
cd ..
clc; clear all; close all;
addpath(genpath(pwd));
zones_Prec = get_zones_Prec_weekly();
OISST = dlmread('data/OISST_19811101-20161116.dat');
uwnd = dlmread('data/uwnd_WIND_CCMP_merge_OISST_weekly.dat');
vwnd = dlmread('data/vwnd_WIND_CCMP_merge_OISST_weekly.dat');
% Intercept SST to make them time consistent
OISST = OISST(1:size(zones_Prec, 1), :);
uwnd = uwnd(1:size(zones_Prec, 1), :);
vwnd = vwnd(1:size(zones_Prec, 1), :);
x_train = [OISST uwnd vwnd];
y_train = zones_Prec;
% -
whos
% idx hiddenLayerSize delay_weeks trainPerformance valPerformance testPerformance performance trainR valR testR R region1 region2 region3 region4 region5 region6 region7 region8 region9
% result = tdnn_train(X, Y, delay_times, hiddenLayerSize)
result = [];
for delay_times = 1:30
for repeat = 1:4
result_one = tdnn_train(x_train, y_train, delay_times, 30);
result = [result; result_one];
end
end
dlmwrite('result/20161218_1_result_repeat_4_delay_1-30_hid_30.dat', result, 'delimiter', '\t');
whos
% idx hiddenLayerSize delay_weeks trainPerformance valPerformance testPerformance performance trainR valR testR R region1 region2 region3 region4 region5 region6 region7 region8 region9
% result = tdnn_train(X, Y, delay_times, hiddenLayerSize)
result = [];
for delay_times = 21:30
for repeat = 1:10
result_one = tdnn_train(x_train, y_train, delay_times, 30);
result = [result; result_one];
end
end
dlmwrite('result/20161220_1_result_repeat_10_delay_21-30_hid_30.dat', result, 'delimiter', '\t');
whos
dlmwrite('result/20161220_1_result_repeat_10_delay_21-30_hid_30.dat', result, 'delimiter', '\t');
result_one(2:3)
| 5-TDNN/TNDD_0.3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: py37
# language: python
# name: py37
# ---
# +
from transformers import (
BertConfig,
BertForTokenClassification,
BertTokenizer,)
import torch
from transformers import AutoModel
import transformers
model_name = "bert-base-cased"
model = AutoModel.from_pretrained(model_name)
configuration = model.config
# -
torch.__version__
transformers.__version__
# python
from transformers import BertTokenizer
# tokenizer = BertTokenizer.from_pretrained("bert-base-cased")
tokenizer = BertTokenizer.from_pretrained("bert-base-cased")
import pandas as pd
from sklearn import preprocessing
import numpy as np
data_path = "../NER_test/multiple_tag_train.csv"
# +
def process_csv(data_path):
df = pd.read_csv(data_path, encoding="latin-1")
df.loc[:, "Sentence #"] = df["Sentence #"].fillna(method="ffill")
sentences = df.groupby("Sentence #")["Word"].apply(list).values
tags = df.groupby("Sentence #")["multiTag"].apply(list).values
return sentences, tags
# -
sentences, tags = process_csv(data_path)
df = pd.read_csv(data_path, encoding="latin-1")
df.loc[:, "Sentence #"] = df["Sentence #"].fillna(method="ffill")
# +
sentences = df.groupby("Sentence #")["Word"].apply(list).values
tags = df.groupby("Sentence #")["multiTag"].apply(list).values
# -
df = pd.read_csv(data_path, encoding="latin-1")
df.head(50)
# +
from torch.utils.data import Dataset
from sklearn.preprocessing import OneHotEncoder
class NER_Dataset(Dataset):
# 讀取前處理後的 tsv 檔並初始化一些參數
def __init__(self, mode, tokenizer, data_path, labels):
assert mode in ["train", "test"] # 一般訓練你會需要 dev set
self.mode = mode
# 大數據你會需要用 iterator=True
self.sentences, self.tags = process_csv(data_path)
self.len = len(self.sentences)
if mode != "test":
self.label_map = {}
for i in range(len(labels)):
self.label_map[labels[i]] = i
possible_labels = np.array(range(len(labels))).reshape(-1, 1)
self.oneHotEncoder = OneHotEncoder()
self.oneHotEncoder.fit(possible_labels)
else:
self.label_map = None
self.tokenizer = tokenizer # 我們將使用 BERT tokenizer
self.O_label = self.label_map["O"]
# 定義回傳一筆訓練 / 測試數據的函式
def __getitem__(self, idx):
if self.mode == "test":
label_tensor = None
else:
label = ["O"] + self.tags[idx] + ["O"]
label = np.array(label)
label = label.reshape(-1,1)
label = np.apply_along_axis(self.split_one_hot_multiTags, 1, label)
label_tensor = torch.tensor(label, dtype = torch.float32)
# 建立第一個句子的 BERT tokens 並加入分隔符號 [SEP]
word_pieces = ['[CLS]']
word_pieces += self.sentences[idx]
word_pieces += ['[SEP]']
ids = self.tokenizer.convert_tokens_to_ids(word_pieces)
tokens_tensor = torch.tensor(ids)
# 將第一句包含 [SEP] 的 token 位置設為 0
segments_tensor = torch.zeros_like(tokens_tensor)
return (tokens_tensor, segments_tensor, label_tensor)
def __len__(self):
return self.len
def split_one_hot_multiTags(self, tags):
# tags = ['B-org|Party|String']
tags = tags[0]
tags = tags.split("|")
tags_num = list(map(lambda x: self.label_map[x], tags))
#[5, 20, 23]
tags_num = np.array(tags_num).reshape(-1,1)
tags_one_hot = self.oneHotEncoder.transform(tags_num).toarray()
tags_one_hot = tags_one_hot.sum(axis = 0)
#return torch.tensor(tags_one_hot, dtype = torch.float32)
return tags_one_hot
# 初始化一個專門讀取訓練樣本的 Dataset,使用中文 BERT 斷詞
df = pd.read_csv(data_path, encoding="latin-1")
labels = np.unique("|".join(list(df.multiTag)).split("|"))
print(f"labels: {labels}")
trainset = NER_Dataset("train", tokenizer=tokenizer, data_path=data_path, labels= labels)
# -
trainset.label_map
from torch.utils.data import DataLoader
from torch.nn.utils.rnn import pad_sequence
def create_mini_batch(samples):
tokens_tensors = [s[0] for s in samples]
segments_tensors = [s[1] for s in samples]
# 測試集有 labels
if samples[0][2] is not None:
label_ids = [s[2] for s in samples]
label_ids = pad_sequence(label_ids,
batch_first=True)
else:
label_ids = None
# zero pad 到同一序列長度
tokens_tensors = pad_sequence(tokens_tensors,
batch_first=True)
segments_tensors = pad_sequence(segments_tensors,
batch_first=True)
# attention masks,將 tokens_tensors 裡頭不為 zero padding
# 的位置設為 1 讓 BERT 只關注這些位置的 tokens
masks_tensors = torch.zeros(tokens_tensors.shape,
dtype=torch.long)
masks_tensors = masks_tensors.masked_fill(
tokens_tensors != 0, 1)
return tokens_tensors, segments_tensors, masks_tensors, label_ids
# BATCH_SIZE = 64: 10883MiB
BATCH_SIZE = 16*4
trainloader = DataLoader(trainset, batch_size=BATCH_SIZE,
collate_fn=create_mini_batch)
# +
data = next(iter(trainloader))
tokens_tensors, segments_tensors, \
masks_tensors, label_ids = data
print(f"""
tokens_tensors.shape = {tokens_tensors.shape}
{tokens_tensors}
------------------------
segments_tensors.shape = {segments_tensors.shape}
{segments_tensors}
------------------------
masks_tensors.shape = {masks_tensors.shape}
{masks_tensors}
------------------------
label_ids.shape = {label_ids.shape}
{label_ids}
""")
# -
#
# NUM_LABELS = len(labels)
#
# model = BertForTokenClassification.from_pretrained(
# model_name, num_labels=NUM_LABELS)
from transformers import BertPreTrainedModel, BertModel
from torch import nn
import transformers
from transformers.modeling_outputs import TokenClassifierOutput
# +
class BertForTokenMultiLabelClassification(BertPreTrainedModel):
_keys_to_ignore_on_load_unexpected = [r"pooler"]
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.bert = BertModel(config)
self.dropout = torch.nn.Dropout(config.hidden_dropout_prob)
self.classifier = torch.nn.Linear(config.hidden_size, config.num_labels)
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
):
r"""
labels (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels -
1]``.
"""
return_dict = return_dict if return_dict is not None else True#self.config.use_return_dict
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
#inputs_embeds=inputs_embeds,
#output_attentions=output_attentions,
#output_hidden_states=output_hidden_states,
#return_dict=return_dict,
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output)
logits = self.classifier(sequence_output)
logits = torch.sigmoid(logits)
loss = None
if labels is not None:
loss_fct = torch.nn.BCELoss()
# Only keep active parts of the loss
if attention_mask is not None:
active_loss = attention_mask.view(-1) == 1
#active_logits = logits.view(-1, self.num_labels)
#active_labels = torch.where(
# active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)
#)
active_logits = logits.view(-1, self.num_labels)[attention_mask.view(-1)== 1]
active_labels = labels.view(-1, self.num_labels)[attention_mask.view(-1)== 1]
loss = loss_fct(active_logits, active_labels)
else:
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
# -
NUM_LABELS = len(labels)
model = BertForTokenMultiLabelClassification.from_pretrained(model_name, num_labels=NUM_LABELS)
model
# high-level show modules
print("""
name module
----------------------""")
for name, module in model.named_children():
if name == "bert":
for n, _ in module.named_children():
print(f"{name}:{n}")
else:
print("{:15} {}".format(name, module))
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print("device:", device)
model = model.to(device)
with torch.no_grad():
# test_run
for data in trainloader:
data = [t.to("cuda:0") for t in data if t is not None]
tokens_tensors, segments_tensors, masks_tensors = data[:3]
outputs = model(input_ids=tokens_tensors,
token_type_ids=segments_tensors,
attention_mask=masks_tensors)
break
# +
# %%time
# 訓練模式
model.train()
# 使用 Adam Optim 更新整個分類模型的參數
optimizer = torch.optim.Adam(model.parameters(), lr=1e-5)
EPOCHS = 16 #
for epoch in range(EPOCHS):
running_loss = 0.0
for data in trainloader:
tokens_tensors, segments_tensors, \
masks_tensors, labels = [t.to(device) for t in data]
#
optimizer.zero_grad()
# forward pass
outputs = model(input_ids=tokens_tensors,
token_type_ids=segments_tensors,
attention_mask=masks_tensors,
labels=labels)
loss = outputs[0]
# backward
loss.backward()
optimizer.step()
# add to batch loss
running_loss += loss.item()
#_, acc = get_predictions(model, trainloader, compute_acc=True)
acc = 0 # no accurate calculation to save training time.
#print('[epoch %d] loss: %.3f, acc: %.3f' %
# (epoch + 1, running_loss, acc))
print('[epoch %d] loss: %.3f' %
(epoch + 1, running_loss))
# -
print('[epoch %d] loss: %.3f' %
(epoch + 1, running_loss))
torch.save(model.state_dict(), "./test_models/0704_multi_label_16_epoch")
torch.save(model.state_dict(), "./test_models/0704_16_epoch")
# +
label_id_mapping = trainset.label_map
id_label_mapping = dict()
for key in label_id_mapping.keys():
id_label_mapping[label_id_mapping[key]] = key
def test_model(model, sentence, device = "cpu"):
tokenized_sentence = torch.tensor([tokenizer.encode(sentence)])
pos = torch.tensor([[0] * len(tokenized_sentence)])
tags = torch.tensor([[1] * len(tokenized_sentence)])
model = model.to(device)
outputs = model(input_ids=tokenized_sentence.to(device),
token_type_ids=pos.to(device),
attention_mask=tags.to(device))
logits = outputs[0]
_, pred_labels = torch.max(logits, 2)
out_labels = []
for row in pred_labels:
result = list(map(lambda x: id_label_mapping[int(x)], row))
out_labels.append(result)
#return tokenizer.tokenize(sentence), out_labels[0], logits
return tokenizer.tokenize(sentence), out_labels[0][1:-1], logits[:, 1:-1]
# -
model2 = BertForTokenMultiLabelClassification.from_pretrained(model_name, num_labels=NUM_LABELS)
model2.load_state_dict(torch.load("./test_models/0704_multi_label_16_epoch"))
sentence = "Dan will be deemed to have completed its delivery obligations before 2021-07-05 if in Niall's opinion, the Jeep Car satisfies the Acceptance Criteria, and Niall notifies Dan in writing that it is accepting the Jeep Car."
sen, pred, logits = test_model(model2, sentence, device = 'cpu')
np.array(sen)
np.array(pred)
logits.shape
for i in range(len(sen)):
print(f"{sen[i]}: {pred[i]}")
out = logits[0]
from __future__ import print_function
from ipywidgets import interact, interactive, fixed, interact_manual
import ipywidgets as widgets
def interact_word(i):
print(i)
print(sen[i])
target = out[i]
for i in range(len(target)):
print(f"{i} {id_label_mapping[i].ljust(6)} \t: {target[i]:.5f}")
sen[12]
interact(lambda x: interact_word(x), x=widgets.IntSlider(min=0, max=len(sen)-1, step=1, value=0))
# +
i = 3
print(sen[i])
target = out[i]
for i in range(len(target)):
print(f"{i} {id_label_mapping[i].ljust(6)} \t: {target[i]:.5f}")
# +
i = 5
print(sen[i-1])
target = out[i]
for i in range(len(target)):
print(f"{i} {id_label_mapping[i].ljust(6)} \t: {target[i]:.5f}")
# +
i = 6
print(sen[i-1])
target = out[i]
for i in range(len(target)):
print(f"{i} {id_label_mapping[i].ljust(6)} \t: {target[i]:.5f}")
# -
| Practice/PyTorch/BERT_practice/BERT_BASE_Custom_model_multi_label.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Calcualting the stellar wind
# +
import numpy as np
import matplotlib.pyplot as plt
import astropy.io.fits as fits
import os
import glob
from astropy.table import Table
from astropy.io import ascii
import astropy.units as u
import astropy.constants as const
from scipy.interpolate import interpolate
from craftroom import resample
from astropy.convolution import convolve, Box1DKernel
from scipy.io.idl import readsav
from astropy import constants as const
#matplotlib set up
# %matplotlib inline
from matplotlib import rcParams
rcParams["figure.figsize"] = (14, 5)
rcParams["font.size"] = 20
# -
rates = Table.read('tables/pceb_fit_results_rates.csv')
rates
els = ['si', 'c', 'o', 's']
el_mass = [28.085, 12.011, 15.999, 32.06]
sol_frac = [6.649e-4, 2.365e-3,5.733e-3, 3.093e-4]
# +
def find_v(a, Rrd, Mrd, Mwd, p):
"""
velocity of the stellar wind at distance a
"""
a, Rrd, Mrd, Mwd, p = a.to(u.m), Rrd.to(u.m), Mrd.to(u.kg), Mwd.to(u.kg), p.to(u.s)
vorb = (2*np.pi/p) * a * (Mrd/(Mrd+Mwd)) #orbital velocity of wd
# print('vorb=', vorb)
vwind = ((2*const.G*Mrd)/Rrd)**0.5 #approximate as escape velocity
# print('vwind=' , vwind)
vrel = (vwind**2+vorb**2)**0.5
return vrel
def mass_loss(mdot, Mwd, a, v):
"""mass loss rate of mdwarf to produce an accretion rate mdot onto a white dwarf of mass Mwd at separation a"""
# m_loss = (((2*a*Mrd)/(Rrd*Mwd))**2) * mdot
# print(Mwd, Mrd)
mdot, Mwd, a, v = mdot.to(u.g/u.s), Mwd.to(u.kg), a.to(u.m), v.to(u.m/u.s)
# print(v)
# v= 600000*u.m/u.s
# print(a.to(u.Rsun))
# print(v)
# print (mdot, Mwd, a, v)
scale = (v**4*a**2)/(const.G**2*Mwd**2)
# print('scale=',scale)
m_loss = mdot*scale
return m_loss
def total_infer(mdot, el):
# print(el)
els = np.genfromtxt('solar_abd.tsv', names=True, delimiter='\t', dtype=None, encoding=None)
mf = els['mass_fraction'][np.where(els['Atom_X']==el)][0]
total_mdot = mdot/mf
return total_mdot
def p_to_a(m1, m2, p):
m1, m2, p = m1.to(u.kg), m2.to(u.kg), p.to(u.s)
a = ((const.G*(m1+m2) * p**2)/(4*np.pi**2))**(1/3)
# print(a.to(u.Rsun))
return a
def find_wind(mdot, Mwd,p, Rrd, Mrd, el):
# print(el)
# print(mdot)
a = p_to_a(Mwd, Mrd, p)
v = find_v(a, Rrd, Mrd, Mwd, p)
rate = mass_loss(mdot, Mwd, a, v)
# print(rate)
# print(rate)
total = total_infer(rate, el.capitalize())
# print(total)
return(total)
# +
wind_rates = []
wind_errors = []
masses = []
sptypes = []
for row in rates:
# if row['Target'] == 'WD0710+741':
if row['Teff'] < 30000:
masses.append(row['mass_est'])
sptypes.append(row['spt'])
mdots = []
for i, el in enumerate(els):
if row['{}_e'.format(el)] > 0.0:
acc = row['F{}'.format(el)]*(u.g/u.s)
Mwd, Mrd = row['mass']*u.M_sun, row['mass_est']*u.M_sun
Rrd = row['rad_est']*u.R_sun
p = row['porb_min']*u.min
# print(p)
mdot = find_wind(acc, Mwd,p, Rrd, Mrd, el)
mdots.append(mdot.value)
# print(mdots)
# mdots = np.array(mdots)*u.g/u.s
# print(np.mean(mdots))
wind_rates.append(np.mean(mdots))
wind_errors.append(np.std(mdots))
# print(wind_rates)
wind_rates = np.array(wind_rates)*(u.g/u.s).to(u.M_sun/u.yr)
wind_errors = np.array(wind_errors)*(u.g/u.s).to(u.M_sun/u.yr)
log_wind_rates = np.log10(wind_rates)
log_wind_errors = 0.434 * (wind_errors/wind_rates)
# print(np.log10(wind_rates))
masses= np.array(masses)
plt.errorbar(masses, log_wind_rates, yerr=log_wind_errors, marker='o', ls='none')
plt.xlim(0.49, 0.051)
plt.axhline(np.log10(2e-14), c='C1', ls='--')
plt.xlabel(r'Mass (M$_{\odot}$)')
plt.ylabel(r'\.M (M$_{\odot}$yr$^{-1}$)')
plt.tight_layout()
# print(len(masses))
#print(masses)
# -
rates_tab= np.genfromtxt('wind_measurements.csv', delimiter=',', dtype=None, encoding=None, names=True)
# +
sol_n = 2e-14
sol = np.log10(2e-14)
# new_types = [4, 5, 3, 4.0, 4.5, 3.5, 2.5, 4.5, 3.0, 3.5, 5.0, 6, 4.0, 4.5]
# row_1 = [4, 5, 3, 4.5, 3.5, 2.5, 3.5, 6, 8]
# row_2 = [4.0, 4.5,3.0, 5.0,]
# row_3 = [4.0, 4.5]
data_m = rates_tab[rates_tab['e_mdot'] != 0]
data_u = rates_tab[rates_tab['e_mdot'] == 0]
fig, ax =plt.subplots(figsize=(10,6))
plt.errorbar(data_m['spn'][data_m['method'] =='pceb'], data_m['log_mdot'][data_m['method'] =='pceb'],
yerr= data_m['e_mdot'][data_m['method'] =='pceb'], marker='o', ls='none', label='Binaries')
plt.errorbar(data_m['spn'][data_m['method'] !='pceb'], data_m['log_mdot'][data_m['method'] !='pceb'],
yerr= data_m['e_mdot'][data_m['method'] !='pceb'], marker='o', ls='none', label='Single Stars')
plt.errorbar(data_u['spn'][data_u['method'] !='pceb'], data_u['log_mdot'][data_u['method'] !='pceb'], yerr=0.5, marker='o', ls='none', uplims=True, c='C1')
newtypes = [float(spt[1:]) for spt in sptypes]
plt.errorbar(newtypes+np.random.normal(0, 0.1, len(newtypes)), log_wind_rates, yerr=log_wind_errors, marker='o', ls='none', c='C3', label ='This work')
plt.xlabel('Spectral Type (M$n$)')
plt.ylabel('log\,[\.M] ($\mathrm{M}_{\odot}\mathrm{yr}^{-1}$)')
ax.tick_params(
axis='x', # changes apply to the x-axis
which='minor', # both major and minor ticks are affected
top=False,
bottom=False) # ticks along the bottom edge are off
plt.xlim(1.5, 8.5)
plt.axhline(sol, ls='--', c='C2', alpha =0.7, label='Solar')
plt.ylim(-16.9, -13.1)
# plt.scatter(row_1, np.full(len(row_1), -13), marker='x', c='C3', label = 'New Systems')
# plt.scatter(row_2, np.full(len(row_2), -13.2), marker='x', c='C3')
# plt.scatter(row_3, np.full(len(row_3), -12.8), marker='x', c='C3')
plt.legend(frameon=True, loc=4)
plt.tight_layout()
# plt.savefig('../plots/wind_sota_150.pdf', dpi=150)
# -
[float(spt[1:]) for spt in sptypes]
a[0].upper()
const.G
# +
a= 0.7*u.Rsun
Mwd = 0.389*u.Msun
vs = np.arange(50, 601, 50)*u.km/u.s
scales = np.array([(v**4*a**2)/(const.G**2*Mwd**2) for v in vs])
plt.plot(vs, scales)
plt.axhline(1, c='C1')
# -
| .ipynb_checkpoints/windv1-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: ddc_env_final
# name: ddc_env_final
# ---
# +
# %load_ext autoreload
# %autoreload 2
# Occupy a GPU for the model to be loaded
# %env CUDA_DEVICE_ORDER=PCI_BUS_ID
# GPU ID, if occupied change to an available GPU ID listed under !nvidia-smi
# %env CUDA_VISIBLE_DEVICES=0
import numpy as np
import rdkit
from rdkit import Chem
import h5py, ast, pickle
from ddc_pub import ddc_v3 as ddc
# -
# Load dataset
dataset_filename = "datasets/CHEMBL25_TRAIN_MOLS.h5"
with h5py.File(dataset_filename, "r") as f:
binmols = f["mols"][:]
# +
# All apriori known characters of the SMILES in the dataset
charset = "Brc1(-23[nH])45C=NOso#FlS67+89%0"
# Apriori known max length of the SMILES in the dataset
maxlen = 128
# Name of the dataset
name = "ChEMBL25_TRAIN"
dataset_info = {"charset": charset, "maxlen": maxlen, "name": name}
# +
# Initialize a model
model = ddc.DDC(x = binmols, # input
y = binmols, # output
dataset_info = dataset_info, # dataset information
noise_std = 0.1, # std of the noise layer
lstm_dim = 512, # breadth of LSTM layers
dec_layers = 3, # number of decoding layers
codelayer_dim = 128, # dimensionality of latent space
batch_size = 128) # batch size for training
# -
model.fit(epochs = 100, # number of epochs
lr = 1e-3, # initial learning rate for Adam, recommended
model_name = "new_model", # base name to append the checkpoints with
checkpoint_dir = "", # save checkpoints in the notebook's directory
mini_epochs = 10, # number of sub-epochs within an epoch to trigger lr decay
save_period = 50, # checkpoint frequency (in mini_epochs)
lr_decay = True, # whether to use exponential lr decay or not
sch_epoch_to_start = 500, # mini-epoch to start lr decay (bypassed if lr_decay=False)
sch_lr_init = 1e-3, # initial lr, should be equal to lr (bypassed if lr_decay=False)
sch_lr_final = 1e-6, # final lr before finishing training (bypassed if lr_decay=False)
patience = 25) # patience for Keras' ReduceLROnPlateau (bypassed if lr_decay=True)
# Save the final model
model.save("new_model")
| demo_heteroencoder_train.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Credit Card Fraud
# We will be detecting credit card fraud based on the different features of our dataset with 3 different models. Here is the Logistic Regression one.
#
# We're looking to minimize the False Negative Rate or FNR.
#
# Since the dataset is unbalanced, we can try two techniques that may help us have better predictions:
#
# - Adding some noise (gaussian) to the fraud data to create more and reduce the imbalance
# - Randomly sample the fraud data and train k models and average them out (or choose the best)
#
#
import numpy as np
import sklearn as sk
import pandas as pd
import matplotlib.pyplot as plt
from pandas_ml import ConfusionMatrix
import pandas_ml as pdml#pandas, scikit-learn and xgboost integration
from sklearn.preprocessing import scale
import random
# +
# May have to do this...
# #!pip install imblearn
# #!pip install --upgrade sklearn
# -
print(pd.__version__)
df = pd.read_csv('creditcard.csv', low_memory=False)
print(type(df))
print(df.shape)
print(df.columns)
# print(df.describe())
df.head(5)
df = df.sample(frac=1).reset_index(drop=True)
print
df.head()
# pandas.DataFrame.loc
# Access a group of rows and columns by label(s) or a boolean array.
frauds = df.loc[df['Class'] == 1]
non_frauds = df.loc[df['Class'] == 0]
print("We have", len(frauds), "fraud data points and", len(non_frauds), "nonfraudulent data points.")
frauds.plot.scatter(x='Amount', y='Class', color='Orange', label='Fraud')
non_frauds.plot.scatter(x='Amount', y='Class', color='Blue', label='Normal')
plt.show()
# print("This feature looks important based on their distribution with respect to class.")
# print("We will now zoom in onto the fraud data to see the ranges of amount just for fun.")
ax = frauds.plot.scatter(x='Amount', y='Class', color='Orange', label='Fraud')
non_frauds.plot.scatter(x='Amount', y='Class', color='Blue', label='Normal', ax=ax)
plt.show()
print("This feature looks important based on their distribution with respect to class.")
print("We will now zoom in onto the fraud data to see the ranges of amount just for fun.")
bx = frauds.plot.scatter(x='Amount', y='Class', color='Orange', label='Fraud')
plt.show()
ax = frauds.plot.scatter(x='V22', y='Class', color='Orange', label='Fraud')
non_frauds.plot.scatter(x='V22', y='Class', color='Blue', label='Normal', ax=ax)
plt.show()
print("This feature may not be very important because of the similar distribution.")
# # Logistic Regression (vanilla)
from sklearn import datasets, linear_model
from sklearn.preprocessing import PolynomialFeatures
from sklearn.model_selection import train_test_split
# +
X = df.iloc[:,:-1]
y = df['Class']
print("X and y sizes, respectively:", len(X), len(y))
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.35)
print("Train and test sizes, respectively:", len(X_train), len(y_train), "|", len(X_test), len(y_test))
print("Total number of frauds:", len(y.loc[df['Class'] == 1]), len(y.loc[df['Class'] == 1])/len(y))
print("Number of frauds on y_test:", len(y_test.loc[df['Class'] == 1]), len(y_test.loc[df['Class'] == 1]) / len(y_test))
print("Number of frauds on y_train:", len(y_train.loc[df['Class'] == 1]), len(y_train.loc[df['Class'] == 1])/len(y_train))
# -
logistic = linear_model.LogisticRegression(C=1e5)
logistic.fit(X_train, y_train)
print("Score: ", logistic.score(X_test, y_test))
y_predicted = np.array(logistic.predict(X_test))
y_right = np.array(y_test)
confusion_matrix = ConfusionMatrix(y_right, y_predicted)
print("Confusion matrix:\n%s" % confusion_matrix)
confusion_matrix.plot(normalized=True)
plt.show()
confusion_matrix.print_stats()
print("FNR is {0}".format(confusion_matrix.stats()['FNR']))
# # Logistic Regression with SMOTE over-sampling
df2 = pdml.ModelFrame(X_train, target=y_train)
sampler = df2.imbalance.over_sampling.SMOTE()
sampled = df2.fit_sample(sampler)
print("Size of training set after over sampling:", len(sampled))
# +
X_train_sampled = sampled.iloc[:,1:]
y_train_sampled = sampled['Class']
# NOTE: Scaling makes it worse.
# X_train_sampled = scale(X_train_sampled)
logistic = linear_model.LogisticRegression(C=1e5)
logistic.fit(X_train_sampled, y_train_sampled)
print("Score: ", logistic.score(X_test, y_test))
# +
y_predicted1 = np.array(logistic.predict(X_test))
y_right1 = np.array(y_test)
confusion_matrix1 = ConfusionMatrix(y_right1, y_predicted1)
print("Confusion matrix:\n%s" % confusion_matrix1)
confusion_matrix1.plot(normalized=True)
plt.show()
confusion_matrix1.print_stats()
# -
print("FNR is {0}".format(confusion_matrix1.stats()['FNR']))
# # Logistic Regression with balanced class weights
best_c, best_fnr = 1, 1
for _ in range(20):
c = random.uniform(1, 10000)
logistic = linear_model.LogisticRegression(C=c, class_weight="balanced")
logistic.fit(X_train, y_train)
#print("Score: ", logistic.score(X_test, y_test))
y_predicted2 = np.array(logistic.predict(X_test))
y_right2 = np.array(y_test)
confusion_matrix2 = ConfusionMatrix(y_right2, y_predicted2)
#print("Confusion matrix:\n%s" % confusion_matrix2)
#confusion_matrix2.plot(normalized=True)
#plt.show()
#confusion_matrix2.print_stats()
fnr = confusion_matrix2.stats()['FNR']
if fnr < best_fnr:
best_fnr = fnr
best_c = c
print("Best C is {0} with best FNR of {1}.".format(best_c, best_fnr))
| kaggle/ml-fraud-detection-master/logistic-regression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import classification_report, accuracy_score
from sklearn.preprocessing import OneHotEncoder, LabelEncoder
from sklearn import preprocessing
from numpy import unique
import matplotlib.pyplot as plt
import imblearn
data = pd.read_csv('training_data_raw.csv')
use_hot_encoding = True # False == LabelEncoding
def isnumber(x):
try:
float(x)
return x
except:
return 0
d_columns = ['Agency', 'Policy Service Status', 'Admin System', 'Line of Business', 'Product Group', 'Product Name'
, 'Product Type', 'Policy Age', 'Annualized Billed Premium', 'Face Amount', 'DI Benefit Amount'
, 'Initial LTC Benefit Amount', 'Daily LTC Benefit Amount', 'Fund Balance', 'Insureds Customer Status'
, 'Insureds Death Status', 'Insureds Age', 'Insureds Gender', 'Owners Customer Status', 'Owners Death Status'
, 'Owners Age', 'Owners Gender', 'Owner\'s Income']
encode_columns = ['Agency', 'Policy Service Status', 'Policy Service Status', 'Admin System', 'Line of Business'
, 'Product Group','Product Type', 'Product Name', 'Issue Date', 'Insureds Customer Status'
, 'Insureds Death Status', 'Insureds Gender', 'Insureds City', 'Insureds State'
, 'Owners Customer Status', 'Owners Death Status', 'Owners Gender']
replace_dollar_sign = ['Annualized Billed Premium', 'Face Amount', 'DI Benefit Amount', 'Policy Age'
, 'Insured\'s Income', 'Owner\'s Income', 'Owners Age'
, 'Initial LTC Benefit Amount', 'Daily LTC Benefit Amount', 'Fund Balance']
# Replace $ and , from the columns mentioned above
for col in replace_dollar_sign:
data[col].replace(regex=True, inplace=True, to_replace=r'[^0-9.\-]', value=r'')
data[col] = data[col].map(isnumber)
data[col] = data[col].astype(float)
for i in data.columns:
print(i)
print("Unique = ", data[i].value_counts().count())
print('Type = ', data[i].dtype)
print("-------------------------")
data = data[d_columns]
data = data.dropna()
data = data.reset_index(drop=True)
if not use_hot_encoding:
le = preprocessing.LabelEncoder()
for col in data.columns:
if data[col].dtype == object:
data[col] = le.fit_transform(data[col].values)
else:
data['Product Name'] = le.fit_transform(data['Product Name'].values)
data = pd.get_dummies(data)
# +
for i in [20, 9]:
data = data.drop(data[data['Product Name'] == i].index)
X = data.iloc[:,data.columns != 'Product Name']
y = data['Product Name']
X = preprocessing.scale(X)
# smote = imblearn.over_sampling.SMOTE()
# X, y = smote.fit_resample(X, y)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)
# -
neigh = KNeighborsClassifier(n_neighbors=10)
neigh.fit(X_train, y_train)
y_pred = neigh.predict(X_test)
print(classification_report(y_test, y_pred, target_names = [str(integer) for integer in neigh.classes_]))
accuracy_score(y_test, y_pred)
ax = sns.countplot(y,label="Product Name")
data['Product Name'].value_counts()
unique(data['Product Name'])
from sklearn.model_selection import cross_validate, RepeatedStratifiedKFold, cross_val_score
model = KNeighborsClassifier()
print(cross_val_score(model, X, y, cv=3))
data['Product Name'].dtype == object
from xgboost import XGBClassifier
from xgboost import plot_importance
model = XGBClassifier()
model.fit(X_train,y_train)
plot_importance(model)
plt.show()
y_pred = model.predict(X_test)
target_names = [str(integer) for integer in neigh.classes_]
print(classification_report(y_test, y_pred, target_names=target_names))
data
data = preprocessing.scale(data)
| product-name/excercise.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + deletable=true editable=true
# %matplotlib inline
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import numpy as np
# + deletable=true editable=true
base_scores = [
19.1489,
18.3099,
19.5969,
20.7447,
18.0791,
]
league_scores = [
20.2128,
18.662,
17.9246,
20.2128,
17.7966,
]
base_f1s = [
73.1343,
73.7374,
72.0721,
70.8955,
75.5725,
]
league_f1s = [
71.2121,
73.0964,
75.1092,
71.4286,
76.2264,
]
x_vals = [0, 1, 2, 3, 4]
plt.xlabel('Leagues')
plt.xticks(x_vals, ['Premier League', 'Bundesliga', 'La Liga', 'Ligue Un', 'Serie A'])
plt.ylabel('% Score')
plt.plot(x_vals, base_scores, 'r', label='Base Misclassification')
plt.plot(x_vals, league_scores, 'b', label='League-Specific Misclassification')
plt.plot(x_vals, base_f1s, 'g', label='Base F1')
plt.plot(x_vals, league_f1s, 'y', label='League-Specific F1')
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.axis([0, len(x_vals), 0, 100])
plt.show()
# + deletable=true editable=true
base_scores = [
19.1489,
18.3099,
19.5969,
20.7447,
18.0791,
]
league_scores = [
20.2128,
18.662,
17.9246,
20.2128,
17.7966,
]
base_f1s = [
73.1343,
73.7374,
72.0721,
70.8955,
75.5725,
]
league_f1s = [
71.2121,
73.0964,
75.1092,
71.4286,
76.2264,
]
fig, ax = plt.subplots()
index = np.arange(len(base_scores))
bar_width = 0.35
opacity = 0.75
base_rects = plt.bar(index, base_scores, bar_width, alpha=opacity, color='b', label='Base')
base_rects = plt.bar(index + bar_width, league_scores, bar_width, alpha=opacity, color='r', label='League')
plt.xlabel('Test Dataset')
plt.ylabel('Misclassification Score')
plt.title('Misclassification Scores - Base and by League')
plt.xticks(index + bar_width, ('EPL', 'Bundesliga', 'La Liga', 'Ligue Un', 'Serie A'))
plt.legend()
plt.tight_layout()
plt.show()
# + deletable=true editable=true
base_scores = [
19.1489,
18.3099,
19.5969,
20.7447,
18.0791,
]
league_scores = [
20.2128,
18.662,
17.9246,
20.2128,
17.7966,
]
base_f1s = [
73.1343,
73.7374,
72.0721,
70.8955,
75.5725,
]
league_f1s = [
71.2121,
73.0964,
75.1092,
71.4286,
76.2264,
]
fig, ax = plt.subplots()
index = np.arange(len(base_scores))
bar_width = 0.35
opacity = 0.75
base_rects = plt.bar(index, base_f1s, bar_width, alpha=opacity, color='b', label='Base')
base_rects = plt.bar(index + bar_width, league_f1s, bar_width, alpha=opacity, color='r', label='League')
plt.xlabel('Test Dataset')
plt.ylabel('F1 Score')
plt.title('F1 Scores - Base and by League')
plt.xticks(index + bar_width, ('EPL', 'Bundesliga', 'La Liga', 'Ligue Un', 'Serie A'))
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.tight_layout()
plt.show()
# + deletable=true editable=true
gradient_boosting = [
20.0234,
19.9063,
20.6647,
19.6721,
]
random_forest = [
22.3068,
21.1944,
21.8384,
23.0094,
]
svm = [
22.9508,
19.6721,
23.5948,
23.5948,
]
neural_net = [
19.4379,
19.9063,
19.9649,
18.911,
]
x_vals = [0, 1, 2, 3]
plt.xlabel('Season Ending')
plt.xticks(x_vals, ['\'13', '\'14', '\'15', '\'16'])
plt.ylabel('Misclassification Score')
plt.plot(x_vals, gradient_boosting, 'r', label='Gradient Boosting')
plt.plot(x_vals, random_forest, 'b', label='Random Forest')
plt.plot(x_vals, svm, 'g', label='SVM')
plt.plot(x_vals, neural_net, 'y', label='Neural Network')
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.axis([0, len(x_vals), 15, 30])
plt.show()
| ExperimentAnalysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Demo 3: POI Clustering
# +
import warnings
warnings.filterwarnings('ignore')
import sys
sys.path.append('..')
# %load_ext autoreload
# %autoreload 2
import loci as lc
from loci import io
from loci import clustering
from loci import plots
# -
# ## Create a POI GeoDataFrame from a remote CSV file produced by OSMWrangle
bound = lc.io.retrieve_osm_loc('Athens, Greece', buffer_dist=10000)
remote_file = 'http://download.slipo.eu/results/osm-to-csv/europe/europe_greece-pois.osm.csv.zip'
pois = lc.io.import_osmwrangle(remote_file, bound=bound, target_crs='EPSG:2100')
pois.head()
# ## Compute clusters
# +
# use DBSCAN
alg = 'dbscan'
min_pts = 15
eps = 100
# use HDBSCAN
# alg = 'hdbscan'
# min_pts = 15
# eps = None
pois_in_clusters, eps_per_cluster = lc.clustering.compute_clusters(pois, alg=alg, min_pts=min_pts, eps=eps, n_jobs=-1)
# -
# ## Show the POIs of a specific cluster
selected_cluster = 2
selected_pois = pois_in_clusters.loc[pois_in_clusters['cluster_id'] == selected_cluster]
plots.map_points(selected_pois)
# ## Compute cluster shapes and show clusters on the map
cluster_borders = lc.clustering.cluster_shapes(pois_in_clusters, 1, eps_per_cluster)
plots.map_choropleth(cluster_borders, id_field='cluster_id', value_field='size')
cluster_borders = lc.clustering.cluster_shapes(pois_in_clusters, 2, eps_per_cluster)
plots.map_choropleth(cluster_borders, id_field='cluster_id', value_field='size')
cluster_borders = lc.clustering.cluster_shapes(pois_in_clusters, 3, eps_per_cluster)
plots.map_choropleth(cluster_borders, id_field='cluster_id', value_field='size')
# ## Show buildings and streets in a specific cluster
selected_cluster = cluster_borders.loc[cluster_borders['cluster_id'] == selected_cluster]
m = plots.map_cluster_contents_osm(selected_cluster)
m
# ## Compute AOIs for 'tourism' and 'restaurant' and compare them on the map
# +
pois_restaurant = lc.analytics.filter_by_kwd(pois, kwd_filter='restaurant')
pois_tourism = lc.analytics.filter_by_kwd(pois, kwd_filter='tourism')
alg = 'dbscan'
min_pts = 10
eps = 100
aois_restaurant, eps_rest = lc.clustering.compute_clusters(pois_restaurant, alg=alg, min_pts=min_pts, eps=eps, n_jobs=-1)
aois_tourism, eps_tour = lc.clustering.compute_clusters(pois_tourism, alg=alg, min_pts=min_pts, eps=eps, n_jobs=-1)
aois_restaurant = lc.clustering.cluster_shapes(aois_restaurant, 2, eps_rest)
aois_tourism = lc.clustering.cluster_shapes(aois_tourism, 2, eps_rest)
# -
plots.map_choropleth(aois_restaurant, id_field='cluster_id', value_field='size')
plots.map_choropleth(aois_tourism, id_field='cluster_id', value_field='size')
m = lc.plots.map_cluster_diff(aois_restaurant, aois_tourism)
m
| notebooks/Demo_03_Clustering.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: nlp
# language: python
# name: nlp
# ---
from keras.models import Model, load_model
from keras.layers import Input, Convolution1D, Dot, Dense, Activation, Concatenate
from keras.utils import Sequence
import numpy as np
import random
import json
from typing import List, Dict, Tuple
from tqdm.auto import tqdm
# ## Load the model
with open('options.json', encoding='utf-8') as f:
options = json.load(f)
print(options)
latent_dim = 256
model = load_model('s2s.h5')
user_inputs = [
"There are numerous weaknesses with the bag of words model especially when applied to natural language processing tasks that graph ranking algorithms such as TextRank are able to address.",
"Since purple yams happen to be starchy root vegetables, they also happen to be a great source of carbs, potassium, and vitamin C.",
"Recurrent Neural Networks (RNNs) have been used successfully for many tasks involving sequential data such as machine translation, sentiment analysis, image captioning, time-series prediction etc.",
"Improved RNN models such as Long Short-Term Memory networks (LSTMs) enable training on long sequences overcoming problems like vanishing gradients.",
"However, even the more advanced models have their limitations and researchers had a hard time developing high-quality models when working with long data sequences.",
"In machine translation, for example, the RNN has to find connections between long input and output sentences composed of dozens of words.",
"It seemed that the existing RNN architectures needed to be changed and adapted to better deal with such tasks.",
"Wenger ended his 22-year Gunners reign after the 2017-18 season and previously stated he intended to take charge of a new club in early 2019.",
"It will not prevent the Frenchman from resuming his career in football.",
"However 12 months out of work has given him a different outlook and may influence his next move.",
]
def user_input_to_inputs(ui: List[str]):
max_encoder_seq_length = options['max_encoder_seq_length']
num_encoder_tokens = options['num_encoder_tokens']
input_token_index = options['input_token_index']
encoder_input_data = np.zeros(
(len(ui), max_encoder_seq_length, num_encoder_tokens),
dtype='float32')
for i, input_text in enumerate(ui):
for t, char in enumerate(input_text):
encoder_input_data[i, t, input_token_index[char]] = 1.
return encoder_input_data
inputs = user_input_to_inputs(user_inputs)
def print_predictions(inputs:np.array, user_inputs: List[str]):
max_decoder_seq_length = options['max_decoder_seq_length']
num_decoder_tokens = options['num_decoder_tokens']
input_token_index = options['input_token_index']
target_token_index = options['target_token_index']
# Define sampling models
reverse_input_char_index = dict(
(i, char) for char, i in input_token_index.items())
reverse_target_char_index = dict(
(i, char) for char, i in target_token_index.items())
in_encoder = inputs
in_decoder = np.zeros(
(len(in_encoder), max_decoder_seq_length, num_decoder_tokens),
dtype='float32')
in_decoder[:, 0, target_token_index["\t"]] = 1
predict = np.zeros(
(len(in_encoder), max_decoder_seq_length),
dtype='float32')
for i in tqdm(range(max_decoder_seq_length - 1), total=max_decoder_seq_length-1):
predict = model.predict([in_encoder, in_decoder])
predict = predict.argmax(axis=-1)
predict_ = predict[:, i].ravel().tolist()
for j, x in enumerate(predict_):
in_decoder[j, i + 1, x] = 1
for seq_index in range(len(in_encoder)):
# Take one sequence (part of the training set)
# for trying out decoding.
output_seq = predict[seq_index, :].ravel().tolist()
decoded = []
for x in output_seq:
if reverse_target_char_index[x] == "\n":
break
else:
decoded.append(reverse_target_char_index[x])
decoded_sentence = "".join(decoded)
print('-')
print('Input sentence:', user_inputs[seq_index])
print('Decoded sentence:', decoded_sentence)
print_predictions(inputs, user_inputs)
| use_pretrained_cnn_model.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %reload_ext autoreload
# %autoreload 2
# %matplotlib inline
# # Mini Batch Training
# > Basic training loop, Parameters, and Optim
# We're going to start training!
# +
#export
from exp.nb_02 import *
import torch.nn as nn
import torch.nn.functional as F
from torch import optim
from torch.utils.data import DataLoader, SequentialSampler, RandomSampler
# -
# ## Data
x_train,y_train,x_valid,y_valid = get_data()
x_train.shape
n, m = x_train.shape
c = y_train.max()+1;c
# ## Basic Model and Preds
nh = 50
class Model(nn.Module):
def __init__(self, n_in, nh, n_out):
super().__init__()
self.layers = [nn.Linear(n_in, nh), nn.ReLU(), nn.Linear(nh, n_out)]
def __call__(self, x):
for l in self.layers: x = l(x)
return x
model = Model(m, nh, 10)
pred = model(x_train)
# ## Loss Function: Cross Entropy Loss
# Each grayscale image of a handwritten shape in our dataset has exactly 1 correct answer - an integer between 0 and 9 - these are called the labels or targets.
#
# Our `y_train` is a tensor of integers that map to the `x_train` images.
#
# We can index into the `y_train` to see the labels to images 0, 1, and 2.
y_train[:3]
# We could also think of these labels as one-hot encoded vectors of length 10 where the label corresponds to a 1 at the label's index and everything else is a zero.
y0 = torch.zeros(10)
y0[y_train[0]] = 1
y0
# The model's output is simply a length 10 vector for every example that is the result of numerous matrix multiplications:
pred[0]
# ### Softmax
# What we would like is a probability distribution over each of our 10 classes: each class gets a probabilty, the highest corresponds to the class the model has learned is the most "correct". The model most therefore learn and adjust its parameters by quantifying how wrong its guess was.
#
# In order to accomplish this for our multi-class problem we'll use cross entropy loss.
#
# The first step is to scale the outputs by putting them through a softmax function:
# <p>
# $$\hbox{softmax(x)}_{i} = \frac{e^{x_{i}}}{\sum_{0 \leq j \leq n-1} e^{x_{j}}}$$
# </p>
#
# This turns our length 10 output vector into a probability distribution.
#
# For example, for the numerator, to raise the first two rows to the e is just:
pred[:2].exp()
# The denominator is trickier because we don't want to sum all of the rows together. We need divide each exponentiated value by its own row.
#
# Therefore, this won't work because it lumps everything together:
pred[:2].exp().sum()
# Instead we need to pass `keepdim=True`
pred[:2].exp().sum(-1, keepdim=True)
# Finally, here are the first two rows softmaxed:
soft_preds = pred[:2].exp() / pred[0].exp().sum(-1, keepdim=True); soft_preds
# And if we sum a row we get 1:
soft_preds[0].sum()
# Here is our function:
def log_softmax(x): return (x.exp() / (x.exp().sum(-1, keepdim=True))).log()
soft_preds1 = log_softmax(pred)
# We can immediately refactor this by remembering that:
#
# <p>
# $$ \log{\frac{a}{b}} = \log{a} - \log{b} $$
#
# </p>
#
# Therefore:
#
# <p>
# $$ \displaystyle \log{\frac{e^x_i}{\sum_{j=0}^{n-1} e^x_j}} = \log{e^x} - \log{\sum_{j=0}^{n-1} e^x_j} = x - \log{\sum_{j=0}^{n-1} e^x_j} $$
# </p>
#
# In code this is:
def log_softmax(x): return x - x.exp().sum(-1,keepdim=True).log()
soft_preds = log_softmax(pred)
test_near(soft_preds, soft_preds1)
# Now that we have an output vector of predictions, $\hat{y}$, in the form of a probability distribution over the possible classes of $y$ (0-9) we can use cross entropy loss to caculate just how far off our prediction is from the target value.
#
# We're trying to find how dissimilar our prediction is to the target. So we are comparing the two distributions.
#
# Assuming our $y$ is one-hot encoded, we calculate the cross entropy loss for a single example by taking the dot product of the two vectors:
#
# <p>
# $$ L = -y \cdot \log{\hat{y}} $$
# </p>
-(y0 @ soft_preds[0])
# But we can do this another way without having to one-hot encode our labels.
#
# We'll use integer array indexing - we can pass a list of integers for each dimension and get back those specific rows/columns.
-soft_preds[[0],[y_train[0]]]
# Now we turn that into a proper loss function which takes averages all of the negative loss logs over the entire output.
def nll(input, target): return -input[range(target.shape[0]), target].mean()
loss = nll(soft_preds, y_train); loss
# #### LogSumExp Trick
# Then, there is a way to compute the log of the sum of exponentials in a more stable way, called the [LogSumExp trick](https://en.wikipedia.org/wiki/LogSumExp). The idea is to use the following formula:
#
# <p>
# $$\log \left ( \sum_{j=1}^{n} e^{x_{j}} \right ) = \log \left ( e^{a} \sum_{j=1}^{n} e^{x_{j}-a} \right ) = a + \log \left ( \sum_{j=1}^{n} e^{x_{j}-a} \right )$$
# </p>
#
#
# where a is the maximum of the $x_{j}$.
def logsumexp(x):
m = x.max(-1)[0] # grab the largest number in x
print(m)
return m + (x-m[:,None]).exp().sum(-1).log() # subtract it out and then add it back in the end
# ### Pytorch F.cross_entropy
# We've now built our loss function so let's go ahead and use the Pytorch version which combines `log_softmax` and `nll_loss` in a single function.
pyloss = F.cross_entropy(pred, y_train); pyloss
test_near(pyloss, loss)
# It works!
# ## Basic Training Loop
# We have all the parts now to create an algorithm!
#
# The training loop is combines everything we have done so far into an interative process. We loop over the data again and again to fine-tune our model's parameters.
#
# Here is what we need the training loop to do:
#
# - Get a batch of inputs and pass them to the model to get a batch of outputs
# - Compute the loss of by comparing the outputs to the labels
# - Calculate the gradients of the loss function with respect to the model parameters
# - Finally update the parameters using those gradients and a learning rate
# ### Single Batch
loss_func = F.cross_entropy
# It would be nice if we had some sort of metric to follow to see how many of the training examples we are getting correct.
#
# We'll start with accuracy.
torch.argmax(pred, dim=1)[:4]
#export
def accuracy(yh, y): return (torch.argmax(yh, dim=1)==y).float().mean()
# Let's create a single mini-batch to test that it works:
bs=128 # batch size
xb = x_train[0:bs] # a mini-batch from x
preds = model(xb) # predictions
# This will give us 128 predictions - each prediction here being a vector of length 10.
preds[0], preds.shape
# Now we can put these predictions through our loss function with our labels and get some sort of measurement as to how far off they are:
yb = y_train[0:bs]
loss = loss_func(preds, yb); loss
accuracy(preds, yb)
# About 10% accuracy, that's basically choosing randomly.
#
# We have a long way to go but at least everything appears to be working.
#
# Now let's look at our model's weights:
model.layers[0].weight
model.layers[0].weight.grad
# At this stage the gradients with respect to the loss have not been computed.
#
# Pytorch only computes them once `.backward` is called. Let's do that:
loss.backward()
model.layers[0].weight.grad[0][:100]
# ### Training Loop
# Let's finally begin to train.
#
# Three hyperparameters we need to set are the batch size, the learning rate and the number of epochs (the number of times we iterate through the entire dataset)
bs = 64
lr = 0.5
epochs = 1
for epoch in range(epochs):
for i in range(0,n,bs):
xb = x_train[i:i+bs]
yb = y_train[i:i+bs]
loss = loss_func(model(xb), yb)
loss.backward()
with torch.no_grad():
for l in model.layers:
if hasattr(l, 'weight'):
l.weight -= lr * l.weight.grad
l.bias -= l.bias.grad * lr
l.weight.grad.zero_()
l.bias .grad.zero_()
loss_func(model(xb), yb)
# And it's >90% accuracy. Not terrible for a simple neural net.
accuracy(model(x_train[:512]), y_train[:512])
# ## Parameters
# ### Parameters
# We'll start by no longer treating our Relu as a separate layer. Instead we'll use `F.relu` which is the functional form that returns activations.
#
# We then have two linear layers from `nn` these linear layers are automatically registered by the `nn.Module` class as the parameters of the model.
#
# We can call `model.parameters()` now and it will return a generator that does essentially what we were doing manually by iterating through the list of layers and checking for the `weight` attribute.
nh = 64
class Model(nn.Module):
def __init__(self, n_in, n_out):
super().__init__()
self.l1 = nn.Linear(n_in, nh)
self.l2 = nn.Linear(nh, n_out)
def forward(self, x):
return self.l2(F.relu(self.l1(x)))
n,m
model = Model(m, 10)
# Let's take a look inside our model.
#
# We can do this by calling the `.named_children` method on the model.
#
#
# Returns an iterator over immediate children modules, yielding both the name of the module as well as the module itself.
for l in model.named_children(): print(f"{l[0]} : {l[1]}")
# Pytorch's `nn.Module` has `__repr__` defined as the following:
#
# ```python
# def __repr__(self):
# # We treat the extra repr like the sub-module, one item per line
# extra_lines = []
# extra_repr = self.extra_repr()
# # empty string will be split into list ['']
# if extra_repr:
# extra_lines = extra_repr.split('\n')
# child_lines = []
# for key, module in self._modules.items():
# mod_str = repr(module)
# mod_str = _addindent(mod_str, 2)
# child_lines.append('(' + key + '): ' + mod_str)
# lines = extra_lines + child_lines
#
# main_str = self._get_name() + '('
# if lines:
# # simple one-liner info, which most builtin Modules will use
# if len(extra_lines) == 1 and not child_lines:
# main_str += extra_lines[0]
# else:
# main_str += '\n ' + '\n '.join(lines) + '\n'
#
# main_str += ')'
# return main_str`
# ```
model
# ### Fit
# Let's create a function that we can call which will run our training loop.
#
# This is standard for Machine Learning libraries like Sklearn.
# + code_folding=[]
def fit():
print("Training...")
for epoch in range(epochs):
for i in range(0,n,bs):
end = i+bs if i+bs < n else n
xb = x_train[i:end]
yb = y_train[i:end]
preds = model(xb)
loss = loss_func(preds, yb)
loss.backward()
with torch.no_grad():
for p in model.parameters():
p -= p.grad * lr
model.zero_grad()
# -
fit()
loss_func(model(xb),yb)
accuracy(model(x_valid), y_valid)
# Question: How does Pytorch know what attributes in `__init__` to set as the model parameters?
# In Python every time an attribute is assigned during a class initialization, `__setattr__()` is called.
#
# When we inherit from `nn.Module` and then execute `super().__init__()` Pytorch creates the following 'private' attributes:
# ```python
# self.training = True
# self._parameters = OrderedDict()
# self._buffers = OrderedDict()
# self._backward_hooks = OrderedDict()
# self._forward_hooks = OrderedDict()
# self._forward_pre_hooks = OrderedDict()
# self._state_dict_hooks = OrderedDict()
# self._load_state_dict_pre_hooks = OrderedDict()
# self._modules = OrderedDict()
# ```
# When the model is instantiated from the `Model` class and `self.l1 = nn.Linear` is set as in attribute `__setattr__()` is called.
#
# Pytorch then does the following:
#
# - checks if the attribute is a Parameter or a Module
# - checks to make sure that `nn.Module` `__init__` was called
# - then registers the Parameter or (sub)Module
# ```Python
#
# class Parameter
#
# '''A kind of Tensor that is to be considered a module parameter.
#
# Parameters are ~torch.Tensor subclasses, that have a very special property when used with Module s - when they're assigned as Module attributes they are automatically added to the list of its parameters, and will appear e.g. in ~Module.parameters iterator. Assigning a Tensor doesn't have such effect. This is because one might want to cache some temporary state, like last hidden state of the RNN, in the model. If there was no such class as Parameter, these temporaries would get registered too.
#
# Arguments: data (Tensor): parameter tensor. requires_grad (bool, optional): if the parameter requires gradient. See excluding-subgraphs for more details. Default: True'''
# ```
# ```python
# class Module
#
# '''Base class for all neural network modules.
#
# Your models should also subclass this class.
#
# Modules can also contain other Modules, allowing to nest them in a tree structure. You can assign the submodules as regular attributes:'''
# ```
# We can demonstrate this by building a dummy module that has a dictionary called `_modules`
class DummyModule():
def __init__(self, n_in, n_out):
self._modules = {}
self.l1 = nn.Linear(n_in, nh)
self.l2 = nn.Linear(nh, n_out)
def __setattr__(self, k,v):
if not k.startswith("_"): # register any keys that do not start with '_'
self._modules[k] = v # put it inside modules dict
super().__setattr__(k,v)
def __repr__(self): return f'{self._modules}'
def parameters(self):
for l in self._modules.values():
for p in l.parameters(): yield p
mdl = DummyModule(m, 10)
mdl
[o.shape for o in mdl.parameters()]
# ### Registering Modules
# Now let's say we wanted to use the layers approach that we wrote earlier.
layers = [nn.Linear(m,nh), nn.ReLU(), nn.Linear(nh, 10)]
class Model(nn.Module):
def __init__(self, layers):
super().__init__() # sets up the parameters module dict and other dicts
self.layers = layers
for i,l in enumerate(self.layers):
self.add_module(f'l{i}',l)
def forward(self, x):
for l in self.layers: x = l(x)
return x
#
model = Model(layers)
model
# ### nn.ModuleList
# If we insist on using the layers technique we can use the built in Pytorch `nn.ModuleList`
class Model(nn.Module):
def __init__(self, layers):
super().__init__()
self.layers = nn.ModuleList(layers)
def forward(self, x):
for l in self.layers: x = l(x)
return x
model = Model(layers)
model
fit()
loss_func(model(xb), yb), accuracy(model(xb), yb)
# ### nn.Sequential
# Even easier would be to use the Pytorch `nn.Sequential`
model = nn.Sequential(nn.Linear(m,nh), nn.ReLU(), nn.Linear(nh, 10))
model
fit()
loss_func(model(xb), yb), accuracy(model(xb), yb)
# ## Optim
# ### Optimizer Step
# Let's refactor our optimization step.
#
# In our training loop we called backward on the loss to compute the gradients and then to make the actual updates to the weights we did the following:
#
# ```python
# with torch.no_grad():
# for p in model.parameters(): p -= p.grad * lr
# model.zero_grad()
# ```
#
# We can simplify this bit of the loop if we instead put this away into an `Optimizer` class which will then have two separate methods:
#
# ```python
# opt.step()
# opt.zero_grad()
# ```
class Optimizer():
def __init__(self, model, lr):
self.model = model
self.lr = lr
def step(self):
with torch.no_grad():
for p in self.model.parameters():
p -= p.grad * self.lr
def zero(self):
self.model.zero_grad()
opt = Optimizer(model, lr)
for epoch in range(epochs):
for i in range(0,n,bs):
end = i+bs if i+bs < n else n
xb = x_train[i:end]
yb = y_train[i:end]
loss = loss_func(model(xb), yb)
loss.backward()
opt.step()
opt.zero()
loss,acc = loss_func(model(xb), yb), accuracy(model(xb), yb)
loss,acc
# ### Pytorch Optim
# Now let's see the Pytorch version.
# `optim.SGD.step` iterates through each `param_group` and then again through each group's `params` key which contain the layers in that group
opt = optim.SGD(model.parameters(), lr=lr)
opt.param_groups
# Let's make a quick function to get model and an optimizer:
#export
def get_model():
model = nn.Sequential(nn.Linear(m, nh), nn.ReLU(), nn.Linear(nh, 10))
return model, optim.SGD(model.parameters(), lr=lr)
model, opt = get_model()
for epoch in range(epochs):
for i in range(0,n,bs):
end = i+bs if i+bs < n else n
xb=x_train[i:end]
yb=y_train[i:end]
preds = model(xb)
loss = loss_func(preds, yb)
loss.backward()
opt.step()
opt.zero_grad()
vpreds = model(x_valid)
loss,acc = loss_func(vpreds, y_valid), accuracy(vpreds, y_valid)
loss,acc
# ## Dataset and DataLoader
# ### Dataset
# Another part of our training loop we can improve is:
#
# ```python
# for i in range(0,n,bs):
# end = i+bs if i+bs < n else n
# xb=x_train[i:end]
# yb=y_train[i:end]
# ```
#
# Let's build a `Dataset` class that will hold the `x`'s and `y`'s in one object.
#
# ```python
# xb, yb = train_ds[i:end]
# ```
#export
class Dataset():
def __init__(self, x, y):
self.x, self.y = x,y
def __len__(self):
return len(self.x)
def __getitem__(self,key):
return self.x[key], self.y[key]
train_ds, valid_ds = Dataset(x_train, y_train), Dataset(x_valid, y_valid)
assert len(train_ds) == len(x_train)
xb,yb = train_ds[0:5]
assert xb.shape==(5,28*28)
assert yb.shape==(5,)
xb,yb
model, opt = get_model()
for epoch in range(epochs):
for i in range(0,n,bs):
end = i+bs if i+bs < n else n
xb, yb = train_ds[i:end]
loss = loss_func(model(xb), yb)
loss.backward()
opt.step()
opt.zero_grad()
vpreds = model(x_valid)
loss,acc = loss_func(vpreds, y_valid), accuracy(vpreds, y_valid)
loss,acc
# ### DataLoader from Scratch
# And we can use the same refactoring logic to make it so we can simply pull `x` and `y` batches out of a `DataLoader` class that holds the `Dataset` class we just made.
#
# ```python
# for xb, yb in train_dl:
# preds = model(xb)
# loss = loss_func(preds, yb)
# ```
class DataLoader():
def __init__(self, ds, bs):
self.ds = ds
self.bs = bs
def __len__(self):
return len(self.ds/self.bs)
def __iter__(self):
for i in range(0, len(self.ds), self.bs):
yield self.ds[i:i+self.bs]
train_dl = DataLoader(train_ds, 64)
valid_dl = DataLoader(valid_ds, 128)
xb, yb = next(iter(valid_dl))
assert xb.shape == (128, 28*28)
plt.imshow(xb[0].view(28, 28))
yb[0]
model, opt = get_model()
def fit():
for epoch in range(epochs):
for xb, yb in train_dl:
loss = loss_func(model(xb), yb)
loss.backward()
opt.step()
opt.zero_grad()
fit()
vpreds = model(x_valid)
loss,acc = loss_func(vpreds, y_valid), accuracy(vpreds, y_valid)
loss,acc
# ### Random Sampling v1
# For certain datasets where the dependent variable is in a specific order we would want to shuffle the data before loading it into batches and putting it through the model.
#
# Every epoch we'll iterate through the entire dataset randomly. The model will get to see each training example once but the order will be different each time.
#
# To do this we need our `Dataloader` to send a batch size of random integers that are within the range of the dataset.
#
# We'll need permutations:
torch.randperm(10)
class DataLoader():
def __init__(self, ds, bs, shuffle=False):
self.ds = ds
self.bs = bs
self.shuffle = shuffle
def __iter__(self):
if self.shuffle:
perms = torch.randperm(len(self.ds))
for i in range(0, len(perms), bs): yield self.ds[perms[i:self.bs]]
else:
for i in range(0, len(self.ds), self.bs): yield self.ds[i:i+self.bs]
test_dl = DataLoader(train_ds, 128, shuffle=True)
test_valid_dl = DataLoader(valid_ds, 128, shuffle=False)
xb, yb = next(iter(test_dl))
plt.imshow(xb[0].view(28,28))
xb, yb = next(iter(test_dl))
plt.imshow(xb[0].view(28,28))
xb, yb = next(iter(test_dl))
plt.imshow(xb[0].view(28,28))
model, opt = get_model()
fit()
vpreds = model(x_valid)
loss,acc = loss_func(vpreds, y_valid), accuracy(vpreds, y_valid)
loss,acc
# ### Random Sampler v2
class Sampler():
def __init__(self, ds, bs, shuffle=False):
self.n = len(ds) # just the length of the dataset not the whole dataset
self.bs = bs
self.shuffle = shuffle
def __iter__(self):
self.idxs = torch.randperm(self.n) if self.shuffle else torch.arange(self.n)
for i in range(0, self.n, self.bs): yield self.idxs[i:i+self.bs]
small_ds = Dataset(*train_ds[:10])
# To test this out let's see when `shuffle=False`
s = Sampler(small_ds, 3, False)
[x for x in s]
s = Sampler(small_ds, 3, True)
[x for x in s]
# +
def collate(b):
xs, ys = zip(*b)
return torch.stack(xs), torch.stack(ys)
class DataLoader():
def __init__(self, ds, sampler, collate_fn=collate):
self.ds = ds
self.sampler = sampler
self.collate_fn = collate_fn
def __iter__(self):
for s in self.sampler: yield self.collate_fn([self.ds[i] for i in s])
# -
train_samp = Sampler(train_ds, bs, shuffle=True)
valid_samp = Sampler(valid_ds, bs, shuffle=False)
train_dl = DataLoader(train_ds, sampler=train_samp, collate_fn=collate)
valid_dl = DataLoader(valid_ds, sampler=valid_samp, collate_fn=collate)
xb, yb = next(iter(train_dl))
plt.imshow(xb[0].view(28,28))
xb, yb = next(iter(train_dl))
plt.imshow(xb[0].view(28,28))
# ### Pytorch DataLoader
# Now we'll use the Pytorch version.
#
# It has a couple of particularly interesting args:
#
# - `drop_last` : drop the last incomplete batch
# - `num_workers`: how many subprocesses to use for data loading. ``0`` means that the data will be loaded in the main process.
# +
from torch.utils.data import DataLoader, SequentialSampler, RandomSampler
train_dl = DataLoader(train_ds, bs, shuffle=True)
valid_dl = DataLoader(valid_ds, 128, shuffle=False)
# -
model, opt = get_model()
fit()
vpreds = model(x_valid)
loss,acc = loss_func(vpreds, y_valid), accuracy(vpreds, y_valid)
loss,acc
# ## Validation
# A validation set is a key component to training properly: it is the only real indication we have that the model learning something useful.
#
# Specifically, it signals to us whether or not the model is overfitting to the training data.
#
# If we were to simply watch the training accuracy we would mostly likely see a continual improvement, as the loss diminishes and the accuracy increases to nearly 100%.
#
# That only indicates the performance of the model on labelled data. If the model's learning algorithm is powerful and it has enough parameters it can memorize the training data.
#
# But then, during inference time, when it is used to make predictions on unseen data, i.e. generalize, it may do horribly.
#
# So let's build a more complete training loop that includes a validation error:
# (Note: `model.train()` and `model.eval()` are used to turn on and off certain types of layers like Dropout and BatchNorm.)
def fit(epochs, model, loss_func, opt, train_dl, valid_dl):
for epoch in range(epochs):
model.train()
for xb, yb in train_dl:
loss = loss_func(model(xb), yb)
loss.backward()
opt.step()
opt.zero_grad()
model.eval()
vloss=[]
acc=[]
with torch.no_grad():
for xb,yb in valid_dl:
preds = model(xb)
vloss.append(loss_func(preds, yb))
acc.append(accuracy(preds, yb))
print('loss:', sum([i.item() for i in vloss])/len(valid_dl))
print('accuracy:', sum([i.item() for i in acc])/len(valid_dl))
model, opt = get_model()
fit(2, model, loss_func, opt, train_dl, valid_dl)
#export
def get_dls(train_ds, valid_ds, bs, **kwargs):
return (DataLoader(train_ds, batch_size=bs, shuffle=True, **kwargs),
DataLoader(valid_ds, batch_size=bs*2, **kwargs))
# Creating our dataloaders, getting a model and optimizer, and training can be run in three lines of code:
get_dls(train_ds, valid_ds, 128)
model, opt = get_model()
fit(4, model, loss_func, opt, train_dl, valid_dl)
# !python notebook2script.py 03_minibatch_training.ipynb
| 03_minibatch_training.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Partial Differential Equation Training
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
# ## 1 Linear Convection
# The 1-D Linear Convection equation is the simplest, most basic model that can be used to learn something about PDE. Here it is:
# $\frac{\partial u}{\partial t}+c\frac{\partial u}{\partial x}=0$.
# c is a velocity, whereas u can be any advected quantity. For example it can be the concentration of some polutant in a river flowing at speed c.
#
# With given initial conditions (understood as a wave), the equation represents the propagation of that initial wave with speed c
# , without change of shape. Let the initial condition be $u(x,0)=u0(x)$
# . Then the exact solution of the equation is $u(x,t)=u0(x−ct)$
# (See lecture this morning).
#
#
# We discretize this equation in both space and time, using the Forward Difference scheme for the time derivative and the Backward Difference scheme for the space derivative. Consider discretizing the spatial coordinate x into points that we index from $i=0$ to $N$ , and stepping in discrete time intervals of size $dt$.
#
#
#
# We use the following convention : $U^{n}_i$ is the amplitude of the wave at time $t=n \times dt$, and at $x=i \times dx$
# How woud you approximate $\frac{\partial u}{\partial t}$ , using $U^{n}_i$, with a forward difference sheme ?
# $$\frac{\partial U_i^n}{\partial t} \approx \frac{U_i^{n+1} - U_i^n}{dt}$$
# How woud you approximate $\frac{\partial u}{\partial x}$ , using $U^{n}_i$, with a backward difference sheme ?
#
# $$\frac{\partial U_i^n}{\partial x} \approx \frac{U_i^n - U_{i-1}^n}{dx}$$
# Write the first order explicit integration scheme we will use, in function of $U^{n}_i$, c, dt, dx, $U^{n}_i$ and $U^{n}_{i-1}$
# $$U^{n+1}_i = U^{n}_i - \left( \frac{c dt}{dx}\right) \left(U^{n}_i - U^{n}_{i-1} \right) $$
# Now let's try implementing this in Python.
# We want to define an evenly spaced grid of points within a domain that is 2 units of length wide, i.e., 0<x_i<2. Vafiable $nx$ which will be the number of grid points we want and dx will be the distance between two adjacent grid points. we define the following values
nx = 41 # This will be changed latter
dx = 2 / (nx-1)
nt = 25 #nt is the number of timesteps we want to calculate
Delta_T=0.625 # time of integration
dt = Delta_T/(nt*1.0) #dt is lentgh of each time step
c = 1 #assume advection velocity is c = 1
# We also need to set up our initial conditions. The initial concentration u0
# is given as u=2 in the interval 0.5≤x≤ and u=1
# everywhere else in (0,2) (i.e., a hat function). Build U
x_ = [dx*i for i in range(nx) ] # defined x space fot plottting
Uo = [2.0 if np.logical_and(i*dx<1.0,i*dx>0.5) else 1.0 for i in range(nx)] #flat hat initial condition
Uo = np.array(Uo) #vectorize
# plot, U, your initial concentration profile.
#
plt.figure(figsize=(15,5))
plt.plot(x_,Uo)
# Why doesn't the hat function have perfectly straight sides? Think for a bit.
# **ANS:** The step in space is big enough to notice the difference between steps. If we use more steps (i.e. 100 , 1000) the difference should be smaller and the border would be more and more *vertical* with more steps dividing the interval [0,2]
# Now we miplement the discretization of the convection equation using finit-difference scheme. For every element i of our array $U^{n}_i$ we need to perform the operation define above ($U^{n+1}_i$). We'll store the result in a new (temporary) array Un, which will be the next state of U for the next time-step. We will repeat this operation for as many time-steps as we specify and then we can see how far the concentration profile has convected.
#
# We will need also boundary conditions, so for the moment we will only use periodic boundary conditions (what leaves from the right, re-enters at the left)
#
# The code is the following :
#
# 1) define U at time =0, dt, dx, c etc...
#
# 2) compute Un as a function of U and all other information.
#
# 3) Take care of boundary conditions at the edges
#
# 4) Time=Time+dt
#
# 5) go back to 2
#
# Boundary conditions: Explain how you compute $U^{n}_0$ and $U^{N}_i$ ?
# **ANS :** The given boundary conditions are reductible to a modulus operation that keeps in range [0,2] the coordinate of space. $U^{n}_0$ uses $U^{n-1}_{j-1} = U^{n-1}_{J}$ given this modulus rule over the *j* coefficients. $U^{N}_i$ uses merely $U^{N-1}$ to calculate the new values.
# Write the code...
# +
def evolve_fd(U): #receving Un, calculate Un+1
#params
global D
global nx
Uaux = U.copy() #for not aletering the original U
Un = U.copy() #same shape as U
#for every space step
for j in range(nx):
# j can only be between [0,nx) , if j-1 is not, modulus operator corrects the value with periodic conditions
j_1 = (j-1)%nx
Un[j] = Uaux[j] - D * (Uaux[j]-Uaux[j_1]) #first order finite diferences
return Un
# -
# PLot U at different times: 3 time steps, 12 time steps, 25 time steps
#
# +
#params
c = 1
D = c*(dt/dx)
#history, only val is initial condition
history = [Uo]
for i in range(nt):
history.append(evolve_fd(history[-1]))
n_dt = [0,3,7,12,25] #these timesteps will be plotted
plt.figure(figsize=(14,3*len(n_dt)))
#for each timestep checked...
for i in range(len(n_dt)):
plt.subplot(len(n_dt),1,i+1)
tmstp = n_dt[i]
plt.plot(x_,history[tmstp],label="timestep = {}".format(tmstp))
plt.legend()
# -
# What do you notice ?
# **ANS :** Te concentration of U smooths and moves to the right as the time advances.
# What is the value of CFL=dt C / dx ?
D = c*(dt/dx)
print("The value of the CFL is {:.3f}".format(D))
# **NOTE :** with the value of CFL (D in the script) inferior to one, the stability condition is fulfilled.
# Redo the same computation, vayring dt , for CFL>1. and CFL << 1
# +
c_sweep = [0.1,1,2,2.1] # check these values of C
DATA = {} #store the data
for c in c_sweep: #for each c to be checked...
D = c*(dt/dx) #redefine D
history = [Uo] #history initialized in Uo
for i in range(nt): #for ach timestep
history.append(evolve_fd(history[-1])) # append the solution
DATA[c] = history # store the labeled history in DATA
n_dt = [0,3,7,12,25] # timesteps to be checked
# PLOTTNG
plt.figure(figsize=(14,3*len(n_dt)))
for i in range(len(n_dt)): # for each timmestep to be checked
plt.subplot(len(n_dt),1,i+1)
tmstp = n_dt[i]
for cc in DATA:# for each C value to be checked
plt.plot(x_,DATA[cc][tmstp],label="timestep = {} for c ={} ; CFL = {:.2f}".format(tmstp,cc,cc*(dt/dx)))
plt.legend()
# -
# What do you notice ? What is the the good integration conditions ?
# **ANS:** When the CFL is too small , the change in U is not evident and slow. On the other hand, having big CFL greater than 1 creates instabilities that increases rapidly the values of the function(order of magnitude abbove the expected range). The optimal CFL condition seems to be smaller than 1 but not too small.
# Now redo the computation, with CFL>1 and using an initial U that is gaussian.
# +
#fucntion to create a gausian distribution given a set of X values, mean and std. deviation
def gaussian_initial(U,mu,sig):
expon = -((U-mu)**2)/(2.0*(sig**2))
return (1.0/(sig*np.sqrt(2*np.pi))) * np.exp(expon)
Uo = np.linspace(0,2,nx) # space betweeen 0 and 2
Uo = gaussian_initial(Uo,1,0.15) #gaussian of x values given
#params
c = 2.1
D = c*(dt/dx)
#history, only val is initial condition
history = [Uo]
for i in range(nt):
history.append(evolve_fd(history[-1]))
n_dt = [0,3,7,12,25] #these timesteps will be plotted
plt.figure(figsize=(14,3*len(n_dt)))
#for each timestep checked...
for i in range(len(n_dt)):
plt.subplot(len(n_dt),1,i+1)
tmstp = n_dt[i]
plt.plot(x_,history[tmstp],label="timestep = {}".format(tmstp))
plt.legend()
# -
# What do you notice ?
# **ANS :** even if the stability condition is broken, the gaussian initial condition seems to be "stable" in time. Maybe because the change in the slope is constant and smooth .
# If you have time : Redo the same thing using a 2nd order space integrator, like leap frog.
# +
#defining the leapfrog 2nd order solver
def evolve_lf(U,U_1): #usning Un and Un-1 can calculate Un+1
#params
global D
global nx
#aux vectors
Uaux = U.copy()
U_1aux = U_1.copy()
#answer vector
Un = U.copy()
for j in range(nx): #for each space step
#calculate j-1 and j+1 witihn the boundary with periodic conditions
j_1 = (j-1)%nx
j1 = (j+1)%nx
#leapfrog solver
Un[j] = U_1aux[j] - D * (Uaux[j1]-Uaux[j_1])
return Un
def solve_lf(Uo):
global nt
# history initialized
history_lf = [Uo]
#first timestep calculated with the first method
history_lf.append(evolve_fd(history_lf[-1]))
#loop over the other timestep using LeapFrog
for i in range(1,nt):
history_lf.append(evolve_lf(history_lf[-1],history_lf[-2]))
return history_lf
# +
#flat hat initil condition
Uo = [2.0 if np.logical_and(i*dx<1.0,i*dx>0.5) else 1.0 for i in range(nx) ]
Uo = np.array(Uo)
#params
c = 1
D = c*(dt/dx)
#leapfrog solver
history_lf = solve_lf(Uo)
n_dt = [0,3,7,12,25] # check this timesteps
#PLOTTING
plt.figure(figsize=(14,3*len(n_dt)))
for i in range(len(n_dt)):
plt.subplot(len(n_dt),1,i+1)
tmstp = n_dt[i]
plt.plot(x_,history_lf[tmstp],label="timestep = {} for c = 1".format(tmstp))
plt.legend()
# +
c_sweep = [0.1,1,2,2.1] # check these values of C
DATA = {} #store the data
for c in c_sweep: #for each c to be checked...
D = c*(dt/dx) #redefine D
history = solve_lf(Uo) #history initialized in Uo
DATA[c] = history # store the labeled history in DATA
n_dt = [0,3,7,12,25] # timesteps to be checked
# PLOTTNG
plt.figure(figsize=(14,3*len(n_dt)))
for i in range(len(n_dt)): # for each timmestep to be checked
plt.subplot(len(n_dt),1,i+1)
tmstp = n_dt[i]
for cc in DATA:# for each C value to be checked
plt.plot(x_,DATA[cc][tmstp],label="timestep = {} for c ={} ; CFL = {:.2f}".format(tmstp,cc,cc*(dt/dx)))
plt.legend()
# +
Uo = np.linspace(0,2,nx) # space betweeen 0 and 2
Uo = gaussian_initial(Uo,1,0.15) #gaussian of x values given
#params
c = 2.1
D = c*(dt/dx)
#history, only val is initial condition
history = solve_lf(Uo)
n_dt = [0,3,7,12,25] #these timesteps will be plotted
plt.figure(figsize=(14,3*len(n_dt)))
#for each timestep checked...
for i in range(len(n_dt)):
plt.subplot(len(n_dt),1,i+1)
tmstp = n_dt[i]
plt.plot(x_,history[tmstp],label="timestep = {}".format(tmstp))
plt.legend()
# -
# **NOTE:** Leapfrog method has a bad performance when dealing with discontinuities in spatial conitions such as the flat hat. The Gausian condition performs well but when time advances some noise begins to be evident in the spatial solution.
| lecture_7/.ipynb_checkpoints/PDE_training-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Apriori Algorithm Code Implementation
# Hey everyone! Welcome to the doc on the implementation of the Apriori algorithm. Just a reminder, it is highly important that you brush up your concepts on how the algorithm works before you start implementing the algorithm in Python. Make sure you are familiar with the following keywords, as we will be using these terms frequently.
#
# **1. Support**<br>
# **2. Confidence**<br>
# **3. Frequent Itemsets**<br>
# **4. Rule Generation technique**<br>
# **5. Candidate Generation**<br>
# **6. Candidate Pruning**<br>
#
# **Note:** It is highly recommended that you first try to implement the algorithm on your own from scratch. We understand how tempting it must be to scroll down and jump to the solution (which you might have already done by now). Keep in mind that implementing the algorithm from scratch will not only improve your ability to write complex algorithms but also help you realise the areas in Apriori that you might be weak in.
#
# You can download the dataset from the link below <br>
# (https://www.kaggle.com/roshansharma/market-basket-optimization). <br>
#
# We have divided the code implementation into the following two parts:<br>
# Part A: Frequent Itemset Generation<br>
# Part B: Rule Generation
# ## PART A: Frequent Itemset Generation
# + colab={} colab_type="code" id="W-IzOGLJqfJW"
# Importing the libraries and the dataset
import pandas as pd
Market_Data = pd.read_csv('Market_Basket_Optimisation.csv',index_col=None, header = None ) # Use your local path here
Market_Data.head(10)
# +
# Converting the Market Dataset into a nested list
Dataset = []
for index, transaction in Market_Data.iterrows():
cleaned_transaction = transaction[~transaction.isnull()].tolist()
Dataset.append(cleaned_transaction)
# -
len(Dataset) # Number of transactions in our dataset
# First 5 transactions of the dataset
Dataset[:5]
# ### 1. The createItem function
# Each transaction consists of various items, and the entire dataset consists of 7,501 transactions. With the help of this function, you can generate all the unique items from the entire dataset.
# + colab={} colab_type="code" id="tJWZCKKWqfJn"
# For the given dataset writing a function to return the list of distinct items in the dataset
def createItem(dataSet):
"""
This function extracts all the unique items from the entire dataset and sorts them (alphabetically)
Attributes
----------
dataSet : list
Market dataset
Return Types
---------
frozen Itemlist : list
frozen list of unique items
"""
itemList = []
for transaction in dataSet:
for item in transaction:
if not [item] in itemList:
# creating unique single lists in Itemlist. ie list of all items
itemList.append([item])
# itemList.sort()
return list(map(frozenset, itemList))
# -
#
# Why have we used frozenset?
#
# Frozenset simply freezes an iterable object, such as the one above, so that the order is immutable. Frozensets cannot be modified further. This property will later be applied while joining different itemset lists. We want the order to be maintained.
# ### 2. The scanData function
# + colab={} colab_type="code" id="tJWZCKKWqfJn"
# Returns an Itemset and dictionary with Support values
def scanData(data, itemsetk, minSupport):
"""
As you know, support is the frequency of a particular item or itemset.
In order to calculate the support, we have created a supportDict dictionary.
This stores the support value for each item set.
If the support value is greater than minsupport, we store the itemset in freqItemset.
Finally the function returns the ‘freqItemset’ and the ‘supportDict’.
Attributes
----------
data : list
List of frozensets
itemsetk : Frozen set
Note they can also be 2 item sets such as [a, b] or 3 item sets like []
minSupport : float
minimum support set by the user
Return Types
---------
freqItemset : Frozenset
list of itemsets that satisfy the support threshold
supportDict : dict
Support values of each itemset
"""
tempDict = {}
for transaction in data:
for item in itemsetk:
if item.issubset(transaction):
if not item in tempDict:
tempDict[item] = 1 # tempDict contains number of all items
else:
tempDict[item] += 1
numItems = float(len(data))
freqItemset = []
supportDict = {}
for key in tempDict:
support = tempDict[key]/numItems
if support >= minSupport:
freqItemset.insert(0, key) # freqItemset contains all frequent items
supportDict[key] = support # contains support of all items
return freqItemset, supportDict
# -
# ### 3. The itemSetGenerator function
# This function takes a list of k-itemsets and returns all the k+1 item sets using candidate generation.Candidate Generation is the method of merging those k-item sets whose first (k-1) elements are identical
# + colab={} colab_type="code" id="tJWZCKKWqfJn"
# Creating Higher order Itemsets
def itemSetGenerator(itemsetk, k):
"""
This function creates itemsets of order k by merging the input itemsets of order k-1)
Attributes
----------
itemsetk: list
contains a frozen list of itemsets of (k-1)th order
k: int
order of itemset we want to generate
Return Types
---------
higherOrderitemset : list
Merged itemsets
"""
higherOrderitemset = []
lenitemsetk = len(itemsetk)
for i in range(lenitemsetk):
for j in range(i+1, lenitemsetk):
L1 = sorted(list(itemsetk[i]))[:k-2]
L2 = sorted(list(itemsetk[j]))[:k-2]
L1.sort()
L2.sort()
# Two frequent itemsets of order k are merged only if their k-1 itemsets are identical
if L1 == L2:
higherOrderitemset.append(itemsetk[i] | itemsetk[j]) # Performing set union creates itemset with n+1 items
return higherOrderitemset
# -
# ### 4. The frquentItemsetGeneration function
# All the functions that we have created until now will come into play in the frquentItemsetGeneration function. All the possible k-order itemsets will be generated, and the support will be calculated for each itemset. Consequently, the freqItemsets of all orders will be stored and returned.
#
# #### Description:
#
# We use the ‘createItem’ function to extract all the unique items and store them as a frozenset. Next we scan these 1-itemsets, generate all the frequent itemsets and store them in 'freqItemsets'. Following this, we generate higher order itemsets until we are no longer able to find any itemsets of the kth order.
# + colab={} colab_type="code" id="tJWZCKKWqfJn"
def frquentItemsetGeneration(dataSet, minSupport):
""""
The apriori function generates all the frequent itemsets and the support values for each possible itemset.
The reason for storing all the support values is that it will be used later for Rule Generation
Attributes
----------
dataSet: list
The list of all transactions created before creating the functions.
minSupport: float
Since minSupport is completed upto the user, we have assumed the minSupport as 0.01 for the Market_Data dataset.
Return Types
---------
freqItemsets : list
All possible frequent itemsets (for all values of order k)
supportDict : dict
support of all the itemsets
"""
itemset1 = createItem(dataSet) # Creating frozenset of items
# Generating all the frequent 1-itemsets and the support of those items
freqItemset1, supportDict = scanData(dataSet, itemset1, minSupport)
freqItemsets = [freqItemset1]
k = 2
while (len(freqItemsets[k-2]) > 0): # Incrementing k until we no longer find any kth order itemsets
itemsetk = itemSetGenerator(freqItemsets[k-2], k) # Generating itemsets of order k
# Generating the frequent itemset for the kth order and support for each of these itemsets
freqItemsetk, supportDictk = scanData(dataSet, itemsetk, minSupport)
supportDict.update(supportDictk)
freqItemsets.append(freqItemsetk)
k += 1
return freqItemsets, supportDict
# -
freqItemsets, supportDict = frquentItemsetGeneration(Dataset, minSupport = 0.01)
supportDict
# Frequent Itemsets of order 3
# Similarly you can extract the other itemsets as well
freqItemsets[2]
# **Congratulations! You now have your frequent itemsets. Let’s move on to Rule Generation. Hope you had no trouble following the implementation until now. If at any point you felt that your concepts were weak or you were not able to follow the code or text, you can revise your concepts from the session on apriori algorithm.**
# ## PART B: Rule Generation
# ### 5. The ‘calcConf’ function
# As you may have guessed, this function takes in a frequent itemset and calculates the confidence for each rule that it generates. <br>
#
# As you know, a rule is made of an antecedent and a consequent. The general structure of a rule is given below:<br>
#   (Rule antecedent) ----> (Rule consequent) <br>
#
# Recall the definition of confidence. For a given rule (a, b)--->(c, d), the confidence is calculated as follows::<br>
#   conf((a, b)--->(c, d)) = support(a, b, c, d)/ support(a, b)<br>
#
# In other words, it is (support of frequent itemset)/ (support of (frequent itemset - consequent))<br>
#
# Notice how the same formula is used in the code?<br>
# + colab={} colab_type="code" id="SelwPmIDqfKE"
def calcConf(freqSet, H, supportDict, bigRuleList, minConf):
""""
For each conseq in H, this function generates rules and calculates its confidence
If the confidence is greater than minconf, we store the rule. We also store their rule consequents in prunedH.
Attributes
----------
freqSet: list
frequent itemset (We have already generated the frequent itemsets in PART A)
eg. frozenset({'french fries', 'mineral water', 'spaghetti'})
H: list
Combinations of items stored in freqset
eg. For the freqSet example shown above, H can be
[frozenset({'french fries'}), frozenset({'spaghetti'}), frozenset({'mineral water'})]
or
[frozenset({'french fries', 'spaghetti'}),
frozenset({'french fries', 'mineral water'}),
frozenset({'spaghetti', 'mineral water'})]
supportDict: dict
Support Values of all the possible generated until now
bigRuleList: list
All our rules will be stored in this list and will be updated every time the function is executed
minConf: float
minimum confidence
Return Types
---------
prunedH : list
Contains those consequents whose rules had a confidence higher than minconf
"""
prunedH = []
for conseq in H:
print(supportDict)
print(f"freqSet -> {freqSet}")
print(f"conseq -> {conseq}")
conf = supportDict[freqSet]/supportDict[freqSet - conseq] # calculate confidence
if conf >= minConf:
bigRuleList.append((freqSet-conseq, conseq, conf))
print(freqSet-conseq, '--->', conseq, 'confidence = ', conf)
prunedH.append(conseq)
return prunedH
# -
# ### 6. The rulesFromConseq function
# Later on, we use the condition that the order of each consequent will always be less than the order of the frequent itemset. If True, we want to generate new rules where the itemsets in Hmp1 are used as rule consequents.<br>
# <br>
# **Q) Now why does calcConf return prunedH and not H?**<br><br>
# Here, we have used confidence-based pruning.<br>
# Let’s consider generating rules for the itemset {a, b, c, d}
# <br><br>
# Suppose the confidence for {b, c, d} ---> {a} is less than the threshold. We can eliminate all the rules in which the rule consequent consists of higher-item sets containing {a}<br><br>
#
# Thus, we can eliminate {b, d} → {a, c}, {c, d} → {a, b}, {b, c} → {a, d}, {d} → {a, b, c}<br>
#
# Now suppose {b, c, d} ---> {a} does not satisfy the threshold. In this case, we eliminate {a} from the rule consequent, so that our algorithm does not have to create higher order itemsets of {a}, and check their thresholds as well.
# + colab={} colab_type="code" id="SelwPmIDqfKE"
def rulesFromConseq(freqSet, H, supportDict, bigRuleList, minConf):
""""
This function generates rules for itemsets of order > 2
Attributes
----------
freqSet: list
frequent itemset
H: list
Combinations of items stored in freqset
supportDict: dict
Support Values of all the possible generated until now
bigRuleList: list
All our rules will be stored in this list and will be updated every time the function is executed
minConf: float
minimum confidence
"""
m = len(H[0]) # Order of the consequent while generating the rules
H = calcConf(freqSet, H, supportDict, bigRuleList, minConf)
if len(H)>1: # For len(H)<=1, you cannot generate higher order cadnidates
# creating higher order candidates
Hmp1 = itemSetGenerator(H, m+1)
if Hmp1 == []:
# Hmp1 will be an empty list if the itemsets in H don't satisfy the condition for merging.
# Thus higher item set consequent will not be generated and the function will be terminated
return 0
if (len(Hmp1[0]) < len(freqSet)):
# Generate rules while the order of the itemsets in Hmp1 is less than the number of items in the frequent itemset
rulesFromConseq(freqSet, Hmp1, supportDict, bigRuleList, minConf)
# -
# ### 7. The generateRules function
# Consider this the final function for Rule Generation. We have our functions and all we need to do is extract the frequent itemsets and generate the rules with the help of our functions.
# **Description:**<br>
# We will begin by initialising the bigRuleList to an empty list. Now, we will form a nested loop runs through all the frequent itemsets in the ‘freqItemsets’ created in PART A. As explained earlier, we start with H1 consisting of all the order one combinations of the items in ‘freqSet’. If the order of ‘freqSet’ is two, i.e., i = 1, then there is no need to create higher order itemsets from its subsets. Hence, we directly generate the rules by calling ‘calcConf’. Once i>1, we need to break the freqSet into its subsets and generate different combinations of the rules. We have already shown the working in the explanation of the ‘rulesFromConseq’ function. Once you have all your rules in the ‘bigRuleList’, you can print them.
# + colab={} colab_type="code" id="SelwPmIDqfKE"
def generateRules(freqItemsets, supportDict, minConf): #supportDict is a dictionary coming from scanData
""""
For each conseq in H, this function creates a rule and the confidence is calculated.
If the confidence is greater than minconf, we store the rule. We also store the rule consequents in prunedH.
Attributes
----------
freqItemsets: list
Frequent Itemsets generated from PART A
eg. frozenset({'french fries', 'mineral water', 'spaghetti'})
supportDict: dict
Support Dictionary created in PART A
minConf: float
minimum confidence set by the user
Return Types
---------
bigRuleList : list
Contains all the rules whose confidence is greater than minConf
"""
bigRuleList = []
for i in range(1, len(freqItemsets)): # Only get the sets with two or more items
for freqSet in freqItemsets[i]:
H1 = [frozenset([item]) for item in freqSet]
if (i > 1):
rulesFromConseq(freqSet, H1, supportDict, bigRuleList, minConf)
else:
calcConf(freqSet, H1, supportDict, bigRuleList, minConf)
return bigRuleList
# -
final_rules = generateRules(freqItemsets, supportDict, 0.3)
# **Congratulations! You now have your rules and you can experiment with this algorithm on other market data sets as well. We hope this explanation was helpful and all of your concepts related to the Apriori algorithm have been cleared.**
Dataset = [['Milk', 'Cheese', 'Eggs'], ['Apples', 'Milk'], ['Milk'], ['Apples', 'Bananas', 'Eggs'], ['Apples', 'Milk', 'Cheese', 'Bananas']]
print(createItem(Dataset))
# +
itemset2 = [frozenset({'Apples', 'Bananas'}), frozenset({'Cheese' , 'Milk'}), frozenset({'Eggs', 'Milk'})]
freqItemset2, supportDict = scanData(Dataset, itemset2, minSupport=0.3)
# -
print(freqItemset2)
# +
Itemset3 = [frozenset({ 'a', 'b' , 'c' }), frozenset({'a', 'c' , 'd' }), frozenset({'b', 'c' , 'd' }), frozenset({'b', 'c' , 'e' })]
itemSetGenerator(Itemset3, 4)
| Course_2-Introduction_to_Machine_Learning_and_Cloud/Module_2-Introduction_to_Machine_Learning/Apriori_Algorithm-Complete_Code.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8.5 64-bit
# language: python
# name: python38564bit55b157e97d614e9db0415667c9a0e53e
# ---
import pandas as pd
msft = pd.read_csv(input())
msft.plot(0, range(1, len(msft.columns)), subplots=input(), figsize=(15,10))
| plot.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# ## Numba: Tell those C++ bullies to get lost
#
# <br>
# ### <NAME>, Professor <NAME>
# #### The George Washington University
#
#
# <br><br>
# 
# + [markdown] slideshow={"slide_type": "slide"}
# ## Getting started
#
# If you haven't already done so, please clone the tutorial repository and fire up a Jupyter notebook server.
#
# ```console
# git clone https://github.com/barbagroup/numba_tutorial_scipy2016.git
#
# # # cd numba_tutorial_scipy2016
#
# jupyter notebook
# ```
#
# If you've already cloned the repository, do a quick `git pull` to make sure you grab a few changes we pushed a few days ago.
#
#
# + [markdown] slideshow={"slide_type": "slide"}
# ## A quick note on style
#
# We use
#
# ```python
# import numpy
# from matplotlib import pyplot
# ```
#
# not
#
# ```python
# import numpy as np
# import matplotlib.pyplot as plt
# ```
#
# (sorry?)
# + [markdown] slideshow={"slide_type": "slide"}
# ## What is Numba?
#
# a JIT compiler for Python that:
#
# * generates optimized machine code using LLVM
# * integrates well with the Scientific Python stack
# * is totally awesome
# + [markdown] slideshow={"slide_type": "slide"}
# ## Numba is _not_:
#
# * a 'full' JIT replacement of CPython a la `Pyston`, `PyPy`, `Pyjion`, etc...
# * magical
#
# but it is specifically designed for math-heavy Python code and it works (we think) very well.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Caveat
#
# YMMV. Numba's JIT compilation produces code optimized for your particular CPU model so you may see better or worse speedups compared to thsoe around you, depending on what kind of processor you have.
# + [markdown] slideshow={"slide_type": "slide"}
# ## Testing
#
# You should really,_really_ write tests when using Numba.
# + [markdown] slideshow={"slide_type": "slide"}
# ## One last note
# We will write a lot of loops.
# It will seem weird.
#
# (sorry)
# -
| slides/Intro.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
#Grid de la segunda parte, en la que utilizamos ya la primera parte bien
#Esta va a ser la de las 44 variables con combined
#importamos paquetes
import sys
sys.path.append('Src/')
from data_modificado import * #hay funciones que estan cambiadas en este script para adaptralas a nuestro dataset
from train_2 import * #este hubo que modificar una linea tambien
from transfer_learning import * #hubo que modificart lo mismo que en train_2
from test_functions import *
from layers import *
from utils import *
from loss import *
from metric import *
from results import *
import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras import layers
import itertools as it
#funciones
def read_df(
metadata_names=['age','Temperature','Precipitation3Days'],
random_state=42,
otu_filename='../Datasets/otu_table_all_80.csv',
metadata_filename='../Datasets/metadata_table_all_80.csv'):
otu = pd.read_csv(otu_filename, index_col=0, header=None).T
#print(otu.head())
otu = otu.set_index('otuids')
otu = otu.astype('int32')
metadata = pd.read_csv(metadata_filename)
#print(metadata.head())
metadata = metadata.set_index('X.SampleID')
metadata.head()
domain = metadata[metadata_names]
#if 'INBREDS' in metadata_names:
# domain = pd.concat([domain, pd.get_dummies(domain['INBREDS'], prefix='INBREDS')], axis=1)
# domain = domain.drop(['INBREDS'], axis=1)
#elif 'Maize_Line' in metadata_names:
# domain = pd.concat([domain, pd.get_dummies(domain['Maize_Line'], prefix='Maize_Line')], axis=1)
# domain = domain.drop(['Maize_Line'], axis=1)
df = pd.concat([otu, domain], axis=1, sort=True, join='outer')
#print(df.head())
#data_microbioma = df[otu.columns].to_numpy(dtype=np.float32)
#data_domain = df[domain.columns].to_numpy(dtype=np.float32)
df_microbioma = df[otu.columns]
df_domain = df[domain.columns]
df_domain.head()
df_microbioma_train, df_microbioma_no_train, df_domain_train, df_domain_no_train = \
train_test_split(df_microbioma, df_domain, test_size=0.1, random_state=random_state)
# Transfer learning subset
df_microbioma_test, df_microbioma_transfer_learning, df_domain_test, df_domain_transfer_learning = \
train_test_split(df_microbioma_no_train, df_domain_no_train, test_size=0.1, random_state=random_state)
df_microbioma_transfer_learning_train, df_microbioma_transfer_learning_test, df_domain_transfer_learning_train, df_domain_transfer_learning_test = \
train_test_split(df_microbioma_transfer_learning, df_domain_transfer_learning, test_size=0.3, random_state=random_state)
return df_microbioma_train, df_microbioma_test, df_microbioma_transfer_learning_train, df_microbioma_transfer_learning_test, df_domain_train, df_domain_test, df_domain_transfer_learning_train, df_domain_transfer_learning_test, otu.columns, domain.columns
#return df_microbioma_train, df_microbioma_test, df_microbioma_transfer_learning_train, df_microbioma_transfer_learning_test, df_domain_train, df_domain_test, df_domain_transfer_learning_train, df_domain_transfer_learning_test, otu.columns, domain.columns
def train_kfold_mod(model_fn, m_train, d_train, z_train, m_test, d_test, z_test,
batch_size, epochs, train_callbacks):
all_models = model_fn()
model, encoder_bioma, encoder_domain, decoder_bioma = all_models
metrics_prefix = None
if encoder_bioma is not None and encoder_domain is not None:
x_train = (m_train, d_train)
y_train = (m_train, m_train, z_train)
x_test = (m_test, d_test)
y_test = (m_test, m_test, z_test)
elif encoder_bioma is not None:
x_train = m_train
y_train = m_train
x_test = m_test
y_test = m_test
metrics_prefix = 'bioma'
elif encoder_domain is not None:
x_train = d_train
y_train = m_train
x_test = d_test
y_test = m_test
metrics_prefix = 'domain'
train_dataset = tf.data.Dataset.from_tensor_slices((x_train, y_train)).shuffle(5000).batch(
batch_size)
train_dataset = train_dataset.prefetch(tf.data.experimental.AUTOTUNE)
val_dataset = tf.data.Dataset.from_tensor_slices((x_test, y_test)).batch(batch_size)
val_dataset = val_dataset.prefetch(tf.data.experimental.AUTOTUNE)
r = model.fit(train_dataset,
epochs=epochs,
validation_data=val_dataset,
callbacks=train_callbacks,
verbose=0)
if metrics_prefix is not None:
old_keys = r.history
r.history = {}
for k, v in old_keys.items():
if k == 'loss' or k == 'val_loss':
new_key = k
elif k.startswith('val_'):
new_key = 'val_{}_{}'.format(metrics_prefix, k[4:])
else:
new_key = '{}_{}'.format(metrics_prefix, k)
r.history[new_key] = v
del val_dataset
del train_dataset
del x_train
del y_train
del x_test
del y_test
return r, all_models
def train_2(model_fn,
data_microbioma,
data_domain,
latent_space=10,
folds=5,
epochs=20,
batch_size=128,
learning_rate_scheduler=ExpDecayScheluder(),
random_seed=347,
verbose=0):
data_zeros_latent = np.zeros((data_microbioma.shape[0], latent_space), dtype=data_microbioma.dtype)
results = []
models = []
train_callbacks = [
callbacks.EarlyStopping(monitor='val_loss', patience=epochs + 1, restore_best_weights=True)]
if verbose >= 0:
train_callbacks += [TqdmCallback(verbose=verbose)]
if learning_rate_scheduler is not None:
train_callbacks += [learning_rate_scheduler.make()]
if folds <= 1:
m_train, m_test = data_microbioma, data_microbioma
d_train, d_test = data_domain, data_domain
z_train, z_test = data_zeros_latent, data_zeros_latent
tf.random.set_seed(random_seed)
r, m = train_kfold(model_fn, m_train, d_train, z_train, m_test, d_test, z_test,
batch_size, epochs, train_callbacks)
results.append(r)
models.append(m)
else: #EL PROBLEMA ESTA AQUI, QUE HACE FALTA UN
kf = KFold(n_splits=folds, random_state=random_seed, shuffle=True)
tf.random.set_seed(random_seed)
for train_index, test_index in kf.split(data_microbioma):
m_train, m_test = data_microbioma[train_index], data_microbioma[test_index]
#print(m_train)
#d_train, d_test = data_domain[train_index], data_domain[test_index]
if data_domain is None:
d_train, d_test = None, None
else:
d_train, d_test = data_domain[train_index], data_domain[test_index]
#print(d_train)
#Esto de hacer el if else ha funcionado, pero no se si hace lo que debe bien
z_train, z_test = data_zeros_latent[train_index], data_zeros_latent[test_index]
r, m = train_kfold_mod(model_fn, m_train, d_train, z_train, m_test, d_test, z_test,
batch_size, epochs, train_callbacks)
results.append(r)
models.append(m)
return results, models
def perform_experiment_2_mod(cv_folds, epochs, batch_size, learning_rate, optimizer,
learning_rate_scheduler, input_transform, output_transform,
reconstruction_loss, latent_space, layers,
activation, activation_latent,
data_microbioma_train, data_domain_train,
show_results=True, device='/CPU:0'): #Show results cambiado de False aTrue
if input_transform is not None:
input_transform = input_transform()
#----------
if output_transform is not None:
output_transform = output_transform()
#----------
if reconstruction_loss.__class__.__name__ == 'MakeLoss':
reconstruction_loss = reconstruction_loss.make()
else:
reconstruction_loss = reconstruction_loss()
domain_layers = [l // 16 for l in layers] ####que es esto???? Esto es para las capas del domain
#print(domain_layers)
bioma_autoencoder = " -> ".join(["b"] +
[str(l) for l in layers] +
[str(latent_space)] +
[str(l) for l in reversed(layers)] +
["b"])
#----------
#esto solo se utiliza para el texto, es irrelevante para nuestro error
if data_domain_train is not None:
domain_autoencoder = " -> ".join(["d"] +
[str(l) for l in domain_layers] +
[str(latent_space)] +
[str(l) for l in reversed(layers)] +
["b"])
else:
domain_autoencoder = " "
#----------
#donde se usa domain autoencoder?
in_transform_name = input_transform.__class__.__name__ if input_transform else "none"
out_transform_name = output_transform.__class__.__name__ if output_transform else "none"
lr_scheduler_text = learning_rate_scheduler[
1] if learning_rate_scheduler is not None else "none"
lr_text = learning_rate if learning_rate_scheduler is not None else "constant = {}".format(
learning_rate)
learning_rate_scheduler = learning_rate_scheduler[
0] if learning_rate_scheduler is not None else None
optimizer = optimizer(learning_rate=learning_rate)
#----------
experiment_parameters = [
("Input transform", in_transform_name),
("Output transform", out_transform_name),
("Reconstruction Loss", reconstruction_loss.__class__.__name__),
("Latent Space", latent_space),
("Bioma Autoencoder", bioma_autoencoder),
("Domain Autoencoder", domain_autoencoder),
("Activation Encoder", activation),
("Activation Decoder", activation),
("Activation Latent", activation_latent),
("CV folds", cv_folds),
("Epochs", epochs),
("Batch Size", batch_size),
("Learning Rate Scheduler", lr_scheduler_text),
("Learning Rate", lr_text),
("Optimizer", optimizer.__class__.__name__),
]
#----------
if show_results:
md_text = ""
md_text += "| Parameter | Value |\n"
md_text += "|:----------------------|:--------------|\n"
for n, v in experiment_parameters:
md_text += "| {} | {} |\n".format(n, v)
display(Markdown(md_text))
#------------
def create_model(print_data=False):
bioma_shape=data_microbioma_train.shape[1]
if data_domain_train is not None:
domain_shape=data_domain_train.shape[1]
#print("data_domain_train!=None")
else:
domain_shape=None
#print("data_domain_train==None")
models = autoencoder(bioma_shape=bioma_shape,
#bioma_shape=717,
domain_shape=domain_shape,
output_shape=bioma_shape,
#output_shape=717,
latent_space=latent_space,
bioma_layers=layers, #Esto es lo de [512,316]
domain_layers=domain_layers, #Esto son cada una de las layers divididas por 16
input_transform=input_transform,
output_transform=output_transform,
activation_function_encoder=activation,
activation_function_decoder=activation,
activation_function_latent=activation_latent)
#Entiendo analizando lo demas que aqui NO esta el error
#la funcion autoencoder esta en model.py (es la unica funcion en ese script)
model, encoder_bioma, encoder_domain, decoder_bioma = models
if print_data:
plot_models(model, encoder_bioma, encoder_domain, decoder_bioma)
compile_train(model,
encoder_bioma=encoder_bioma,
encoder_domain=encoder_domain,
reconstruction_error=reconstruction_loss,
encoded_comparison_error=losses.MeanAbsoluteError(),
metrics=get_experiment_metrics(input_transform, output_transform),
optimizer=optimizer)
#print("He acabado create_model :)")
return model, encoder_bioma, encoder_domain, decoder_bioma
#-----------
create_model(print_data=False)
#-----------
#Esta en esta seccion el problema, en train_2
#print(data_domain_train)
#print(latent_space)
with tf.device(device):
results, models = train_2(create_model,
data_microbioma_train,
data_domain_train,
latent_space=latent_space,
folds=cv_folds,
epochs=epochs,
batch_size=batch_size,
learning_rate_scheduler=learning_rate_scheduler,
verbose=-1)
#----------
validation_results = print_results(results, show_results=show_results)
if show_results:
display(Markdown("*************"))
return experiment_parameters + validation_results, models, results
# -
# __params:__
# - "activat_func":["softmax","sigmoid","relu","tanh"]
# - "activ_ouput":["softmax","sigmoid","relu","tanh"]
# - "learning_rate":[0.01,0.001]
# +
#Cargamos los datos
nombres_metadatos = ["KCAL","PROT","TFAT","CARB","MOIS","ALC","CAFF","THEO","SUGR","FIBE","CALC","IRON","MAGN","PHOS","POTA","SODI","ZINC","COPP","SELE","VC","VB1","VB2","NIAC","VB6","FOLA","VB12","VARA","RET","BCAR","ACAR","CRYP","LYCO","LZ","ATOC","VK","CHOLE","SFAT","MFAT","PFAT","VITD","CHOLN"]
#nombres_metadatos = ["PROT","TFAT","CARB","MOIS","ALC","CAFF","THEO","CALC","MAGN","POTA","ZINC","VC","VB1","VB6","VARA","ACAR","CRYP","LYCO","ATOC","VK","CHOLE","VITD"]
df_microbioma_train, df_microbioma_test, _, _, \
df_domain_train, df_domain_test, _, _, otu_columns, domain_columns = read_df(metadata_names=nombres_metadatos,otu_filename='resultados_ana/datos_otus_filtrados/otu_table_especies_80.csv',metadata_filename='resultados_ana/metadatos_nutrientes.csv')
data_microbioma_train = df_microbioma_train.to_numpy(dtype=np.float32)
data_microbioma_test = df_microbioma_test.to_numpy(dtype=np.float32)
data_domain_train = df_domain_train.to_numpy(dtype=np.float32)
data_domain_test = df_domain_test.to_numpy(dtype=np.float32)
#Preparamos las combinaciones pertinentes (5 mejores)
combinations = [[100,64,0.01,optimizers.Adam,15,[512,256],"tanh","tanh"],\
[100,64,0.01,optimizers.Adam,15,[512,256],"tanh","softmax"],\
[100,64,0.001,optimizers.Adam,15,[512,256],"tanh","tanh"],\
[100,64,0.01,optimizers.Adam,10,[512,256],"tanh","softmax"],\
[100,64,0.001,optimizers.Adam,15,[512,256,128],"tanh","tanh"]]
# -
# ### 0.001, tanh, tanh
for g in combinations:
#entrenamos el autoencoder
print(g)
print("===================\n")
experiment_metrics, models, results = perform_experiment_2_mod(cv_folds=0,
epochs=g[0], ##este es el que varia
batch_size=g[1],
learning_rate=g[2], ##este es el que varia
optimizer=g[3],
learning_rate_scheduler=None,
input_transform=Percentage, #--> lo quitamos porque ya lo tenemos en relativo, con el 80 no
output_transform=tf.keras.layers.Softmax,
reconstruction_loss=MakeLoss(LossBrayCurtis, Percentage, None),
latent_space=g[4],
layers=g[5],
activation=g[6],
activation_latent=g[7],
data_microbioma_train=data_microbioma_train,
data_domain_train=data_domain_train,
show_results=False,#esto se ha cambiado para que no me muestre los resultados
device='/CPU:0')
#Guardamos el modelo, encoder y decoder
model, encoder, _ ,decoder = models[0]
def model_fn_latent():
in_layer = layers.Input(shape=(data_domain_train.shape[1],))
net = layers.Dense(128, activation='tanh')(in_layer)
net = layers.Dense(64, activation='tanh')(net)
net = layers.Dense(32, activation='tanh')(net)
net = layers.Dense(16, activation='tanh')(net)
out_layer = layers.Dense(latent_train.shape[1], activation=None)(net) # 'tanh already'
model = keras.Model(inputs=[in_layer], outputs=[out_layer], name='model')
model.compile(optimizer=optimizers.Adam(lr=0.001), loss=tf.keras.losses.MeanSquaredError(), metrics=[tf.keras.metrics.MeanSquaredError()])
return model
#funcion del modelado -- por ahora lo dejaremos asi, y voy a habalr con ella par aver que cambio, que tuneo
latent_train = encoder.predict(data_microbioma_train)
result_latent, model_latent = train_tl_noEnsemble(model_fn_latent,
latent_train,
latent_train,
data_domain_train,
data_domain_train,
epochs=100,
batch_size=16,
verbose=-1)
#evaluamos el modelo
latent_test = encoder.predict(data_microbioma_test)
predictions = test_model_tl_noEnsemble(model_latent, decoder, Percentage, tf.keras.layers.Softmax, otu_columns, data_microbioma_test, data_domain_test)
print("----------------------------\n")
# ### 0.001, tanh, softmax
for g in combinations:
#entrenamos el autoencoder
print(g)
print("===================\n")
experiment_metrics, models, results = perform_experiment_2_mod(cv_folds=0,
epochs=g[0], ##este es el que varia
batch_size=g[1],
learning_rate=g[2], ##este es el que varia
optimizer=g[3],
learning_rate_scheduler=None,
input_transform=Percentage, #--> lo quitamos porque ya lo tenemos en relativo, con el 80 no
output_transform=tf.keras.layers.Softmax,
reconstruction_loss=MakeLoss(LossBrayCurtis, Percentage, None),
latent_space=g[4],
layers=g[5],
activation=g[6],
activation_latent=g[7],
data_microbioma_train=data_microbioma_train,
data_domain_train=data_domain_train,
show_results=False,#esto se ha cambiado para que no me muestre los resultados
device='/CPU:0')
#Guardamos el modelo, encoder y decoder
model, encoder, _ ,decoder = models[0]
def model_fn_latent():
in_layer = layers.Input(shape=(data_domain_train.shape[1],))
net = layers.Dense(128, activation='tanh')(in_layer)
net = layers.Dense(64, activation='tanh')(net)
net = layers.Dense(32, activation='tanh')(net)
net = layers.Dense(16, activation='tanh')(net)
out_layer = layers.Dense(latent_train.shape[1], activation='softmax')(net) # 'tanh already'
model = keras.Model(inputs=[in_layer], outputs=[out_layer], name='model')
model.compile(optimizer=optimizers.Adam(lr=0.001), loss=tf.keras.losses.MeanSquaredError(), metrics=[tf.keras.metrics.MeanSquaredError()])
return model
#funcion del modelado -- por ahora lo dejaremos asi, y voy a habalr con ella par aver que cambio, que tuneo
latent_train = encoder.predict(data_microbioma_train)
result_latent, model_latent = train_tl_noEnsemble(model_fn_latent,
latent_train,
latent_train,
data_domain_train,
data_domain_train,
epochs=100,
batch_size=16,
verbose=-1)
#evaluamos el modelo
latent_test = encoder.predict(data_microbioma_test)
predictions = test_model_tl_noEnsemble(model_latent, decoder, Percentage, tf.keras.layers.Softmax, otu_columns, data_microbioma_test, data_domain_test)
print("----------------------------\n")
# ### 0.001, tanh, sigmoid
for g in combinations:
#entrenamos el autoencoder
print(g)
print("===================\n")
experiment_metrics, models, results = perform_experiment_2_mod(cv_folds=0,
epochs=g[0], ##este es el que varia
batch_size=g[1],
learning_rate=g[2], ##este es el que varia
optimizer=g[3],
learning_rate_scheduler=None,
input_transform=Percentage, #--> lo quitamos porque ya lo tenemos en relativo, con el 80 no
output_transform=tf.keras.layers.Softmax,
reconstruction_loss=MakeLoss(LossBrayCurtis, Percentage, None),
latent_space=g[4],
layers=g[5],
activation=g[6],
activation_latent=g[7],
data_microbioma_train=data_microbioma_train,
data_domain_train=data_domain_train,
show_results=False,#esto se ha cambiado para que no me muestre los resultados
device='/CPU:0')
#Guardamos el modelo, encoder y decoder
model, encoder, _ ,decoder = models[0]
def model_fn_latent():
in_layer = layers.Input(shape=(data_domain_train.shape[1],))
net = layers.Dense(128, activation='tanh')(in_layer)
net = layers.Dense(64, activation='tanh')(net)
net = layers.Dense(32, activation='tanh')(net)
net = layers.Dense(16, activation='tanh')(net)
out_layer = layers.Dense(latent_train.shape[1], activation='sigmoid')(net) # 'tanh already'
model = keras.Model(inputs=[in_layer], outputs=[out_layer], name='model')
model.compile(optimizer=optimizers.Adam(lr=0.001), loss=tf.keras.losses.MeanSquaredError(), metrics=[tf.keras.metrics.MeanSquaredError()])
return model
#funcion del modelado -- por ahora lo dejaremos asi, y voy a habalr con ella par aver que cambio, que tuneo
latent_train = encoder.predict(data_microbioma_train)
result_latent, model_latent = train_tl_noEnsemble(model_fn_latent,
latent_train,
latent_train,
data_domain_train,
data_domain_train,
epochs=100,
batch_size=16,
verbose=-1)
#evaluamos el modelo
latent_test = encoder.predict(data_microbioma_test)
predictions = test_model_tl_noEnsemble(model_latent, decoder, Percentage, tf.keras.layers.Softmax, otu_columns, data_microbioma_test, data_domain_test)
print("----------------------------\n")
# ### 0.001, tanh, relu
for g in combinations:
#entrenamos el autoencoder
print(g)
print("===================\n")
experiment_metrics, models, results = perform_experiment_2_mod(cv_folds=0,
epochs=g[0], ##este es el que varia
batch_size=g[1],
learning_rate=g[2], ##este es el que varia
optimizer=g[3],
learning_rate_scheduler=None,
input_transform=Percentage, #--> lo quitamos porque ya lo tenemos en relativo, con el 80 no
output_transform=tf.keras.layers.Softmax,
reconstruction_loss=MakeLoss(LossBrayCurtis, Percentage, None),
latent_space=g[4],
layers=g[5],
activation=g[6],
activation_latent=g[7],
data_microbioma_train=data_microbioma_train,
data_domain_train=data_domain_train,
show_results=False,#esto se ha cambiado para que no me muestre los resultados
device='/CPU:0')
#Guardamos el modelo, encoder y decoder
model, encoder, _ ,decoder = models[0]
def model_fn_latent():
in_layer = layers.Input(shape=(data_domain_train.shape[1],))
net = layers.Dense(128, activation='tanh')(in_layer)
net = layers.Dense(64, activation='tanh')(net)
net = layers.Dense(32, activation='tanh')(net)
net = layers.Dense(16, activation='tanh')(net)
out_layer = layers.Dense(latent_train.shape[1], activation='relu')(net) # 'tanh already'
model = keras.Model(inputs=[in_layer], outputs=[out_layer], name='model')
model.compile(optimizer=optimizers.Adam(lr=0.001), loss=tf.keras.losses.MeanSquaredError(), metrics=[tf.keras.metrics.MeanSquaredError()])
return model
#funcion del modelado -- por ahora lo dejaremos asi, y voy a habalr con ella par aver que cambio, que tuneo
latent_train = encoder.predict(data_microbioma_train)
result_latent, model_latent = train_tl_noEnsemble(model_fn_latent,
latent_train,
latent_train,
data_domain_train,
data_domain_train,
epochs=100,
batch_size=16,
verbose=-1)
#evaluamos el modelo
latent_test = encoder.predict(data_microbioma_test)
predictions = test_model_tl_noEnsemble(model_latent, decoder, Percentage, tf.keras.layers.Softmax, otu_columns, data_microbioma_test, data_domain_test)
print("----------------------------\n")
# ### 0.001, softmax, tanh
for g in combinations:
#entrenamos el autoencoder
print(g)
print("===================\n")
experiment_metrics, models, results = perform_experiment_2_mod(cv_folds=0,
epochs=g[0], ##este es el que varia
batch_size=g[1],
learning_rate=g[2], ##este es el que varia
optimizer=g[3],
learning_rate_scheduler=None,
input_transform=Percentage, #--> lo quitamos porque ya lo tenemos en relativo, con el 80 no
output_transform=tf.keras.layers.Softmax,
reconstruction_loss=MakeLoss(LossBrayCurtis, Percentage, None),
latent_space=g[4],
layers=g[5],
activation=g[6],
activation_latent=g[7],
data_microbioma_train=data_microbioma_train,
data_domain_train=data_domain_train,
show_results=False,#esto se ha cambiado para que no me muestre los resultados
device='/CPU:0')
#Guardamos el modelo, encoder y decoder
model, encoder, _ ,decoder = models[0]
def model_fn_latent():
in_layer = layers.Input(shape=(data_domain_train.shape[1],))
net = layers.Dense(128, activation='softmax')(in_layer)
net = layers.Dense(64, activation='softmax')(net)
net = layers.Dense(32, activation='softmax')(net)
net = layers.Dense(16, activation='softmax')(net)
out_layer = layers.Dense(latent_train.shape[1], activation='tanh')(net) # 'tanh already'
model = keras.Model(inputs=[in_layer], outputs=[out_layer], name='model')
model.compile(optimizer=optimizers.Adam(lr=0.001), loss=tf.keras.losses.MeanSquaredError(), metrics=[tf.keras.metrics.MeanSquaredError()])
return model
#funcion del modelado -- por ahora lo dejaremos asi, y voy a habalr con ella par aver que cambio, que tuneo
latent_train = encoder.predict(data_microbioma_train)
result_latent, model_latent = train_tl_noEnsemble(model_fn_latent,
latent_train,
latent_train,
data_domain_train,
data_domain_train,
epochs=100,
batch_size=16,
verbose=-1)
#evaluamos el modelo
latent_test = encoder.predict(data_microbioma_test)
predictions = test_model_tl_noEnsemble(model_latent, decoder, Percentage, tf.keras.layers.Softmax, otu_columns, data_microbioma_test, data_domain_test)
print("----------------------------\n")
# ### 0.001, softmax, softmax
for g in combinations:
#entrenamos el autoencoder
print(g)
print("===================\n")
experiment_metrics, models, results = perform_experiment_2_mod(cv_folds=0,
epochs=g[0], ##este es el que varia
batch_size=g[1],
learning_rate=g[2], ##este es el que varia
optimizer=g[3],
learning_rate_scheduler=None,
input_transform=Percentage, #--> lo quitamos porque ya lo tenemos en relativo, con el 80 no
output_transform=tf.keras.layers.Softmax,
reconstruction_loss=MakeLoss(LossBrayCurtis, Percentage, None),
latent_space=g[4],
layers=g[5],
activation=g[6],
activation_latent=g[7],
data_microbioma_train=data_microbioma_train,
data_domain_train=data_domain_train,
show_results=False,#esto se ha cambiado para que no me muestre los resultados
device='/CPU:0')
#Guardamos el modelo, encoder y decoder
model, encoder, _ ,decoder = models[0]
def model_fn_latent():
in_layer = layers.Input(shape=(data_domain_train.shape[1],))
net = layers.Dense(128, activation='softmax')(in_layer)
net = layers.Dense(64, activation='softmax')(net)
net = layers.Dense(32, activation='softmax')(net)
net = layers.Dense(16, activation='softmax')(net)
out_layer = layers.Dense(latent_train.shape[1], activation='softmax')(net) # 'tanh already'
model = keras.Model(inputs=[in_layer], outputs=[out_layer], name='model')
model.compile(optimizer=optimizers.Adam(lr=0.001), loss=tf.keras.losses.MeanSquaredError(), metrics=[tf.keras.metrics.MeanSquaredError()])
return model
#funcion del modelado -- por ahora lo dejaremos asi, y voy a habalr con ella par aver que cambio, que tuneo
latent_train = encoder.predict(data_microbioma_train)
result_latent, model_latent = train_tl_noEnsemble(model_fn_latent,
latent_train,
latent_train,
data_domain_train,
data_domain_train,
epochs=100,
batch_size=16,
verbose=-1)
#evaluamos el modelo
latent_test = encoder.predict(data_microbioma_test)
predictions = test_model_tl_noEnsemble(model_latent, decoder, Percentage, tf.keras.layers.Softmax, otu_columns, data_microbioma_test, data_domain_test)
print("----------------------------\n")
# ### 0.001, softmax, sigmoid
for g in combinations:
#entrenamos el autoencoder
print(g)
print("===================\n")
experiment_metrics, models, results = perform_experiment_2_mod(cv_folds=0,
epochs=g[0], ##este es el que varia
batch_size=g[1],
learning_rate=g[2], ##este es el que varia
optimizer=g[3],
learning_rate_scheduler=None,
input_transform=Percentage, #--> lo quitamos porque ya lo tenemos en relativo, con el 80 no
output_transform=tf.keras.layers.Softmax,
reconstruction_loss=MakeLoss(LossBrayCurtis, Percentage, None),
latent_space=g[4],
layers=g[5],
activation=g[6],
activation_latent=g[7],
data_microbioma_train=data_microbioma_train,
data_domain_train=data_domain_train,
show_results=False,#esto se ha cambiado para que no me muestre los resultados
device='/CPU:0')
#Guardamos el modelo, encoder y decoder
model, encoder, _ ,decoder = models[0]
def model_fn_latent():
in_layer = layers.Input(shape=(data_domain_train.shape[1],))
net = layers.Dense(128, activation='softmax')(in_layer)
net = layers.Dense(64, activation='softmax')(net)
net = layers.Dense(32, activation='softmax')(net)
net = layers.Dense(16, activation='softmax')(net)
out_layer = layers.Dense(latent_train.shape[1], activation='sigmoid')(net) # 'tanh already'
model = keras.Model(inputs=[in_layer], outputs=[out_layer], name='model')
model.compile(optimizer=optimizers.Adam(lr=0.001), loss=tf.keras.losses.MeanSquaredError(), metrics=[tf.keras.metrics.MeanSquaredError()])
return model
#funcion del modelado -- por ahora lo dejaremos asi, y voy a habalr con ella par aver que cambio, que tuneo
latent_train = encoder.predict(data_microbioma_train)
result_latent, model_latent = train_tl_noEnsemble(model_fn_latent,
latent_train,
latent_train,
data_domain_train,
data_domain_train,
epochs=100,
batch_size=16,
verbose=-1)
#evaluamos el modelo
latent_test = encoder.predict(data_microbioma_test)
predictions = test_model_tl_noEnsemble(model_latent, decoder, Percentage, tf.keras.layers.Softmax, otu_columns, data_microbioma_test, data_domain_test)
print("----------------------------\n")
# ### 0.001, softmax, relu
for g in combinations:
#entrenamos el autoencoder
print(g)
print("===================\n")
experiment_metrics, models, results = perform_experiment_2_mod(cv_folds=0,
epochs=g[0], ##este es el que varia
batch_size=g[1],
learning_rate=g[2], ##este es el que varia
optimizer=g[3],
learning_rate_scheduler=None,
input_transform=Percentage, #--> lo quitamos porque ya lo tenemos en relativo, con el 80 no
output_transform=tf.keras.layers.Softmax,
reconstruction_loss=MakeLoss(LossBrayCurtis, Percentage, None),
latent_space=g[4],
layers=g[5],
activation=g[6],
activation_latent=g[7],
data_microbioma_train=data_microbioma_train,
data_domain_train=data_domain_train,
show_results=False,#esto se ha cambiado para que no me muestre los resultados
device='/CPU:0')
#Guardamos el modelo, encoder y decoder
model, encoder, _ ,decoder = models[0]
def model_fn_latent():
in_layer = layers.Input(shape=(data_domain_train.shape[1],))
net = layers.Dense(128, activation='softmax')(in_layer)
net = layers.Dense(64, activation='softmax')(net)
net = layers.Dense(32, activation='softmax')(net)
net = layers.Dense(16, activation='softmax')(net)
out_layer = layers.Dense(latent_train.shape[1], activation='relu')(net) # 'tanh already'
model = keras.Model(inputs=[in_layer], outputs=[out_layer], name='model')
model.compile(optimizer=optimizers.Adam(lr=0.001), loss=tf.keras.losses.MeanSquaredError(), metrics=[tf.keras.metrics.MeanSquaredError()])
return model
#funcion del modelado -- por ahora lo dejaremos asi, y voy a habalr con ella par aver que cambio, que tuneo
latent_train = encoder.predict(data_microbioma_train)
result_latent, model_latent = train_tl_noEnsemble(model_fn_latent,
latent_train,
latent_train,
data_domain_train,
data_domain_train,
epochs=100,
batch_size=16,
verbose=-1)
#evaluamos el modelo
latent_test = encoder.predict(data_microbioma_test)
predictions = test_model_tl_noEnsemble(model_latent, decoder, Percentage, tf.keras.layers.Softmax, otu_columns, data_microbioma_test, data_domain_test)
print("----------------------------\n")
# ### 0.001, sigmoid, tanh
for g in combinations:
#entrenamos el autoencoder
print(g)
print("===================\n")
experiment_metrics, models, results = perform_experiment_2_mod(cv_folds=0,
epochs=g[0], ##este es el que varia
batch_size=g[1],
learning_rate=g[2], ##este es el que varia
optimizer=g[3],
learning_rate_scheduler=None,
input_transform=Percentage, #--> lo quitamos porque ya lo tenemos en relativo, con el 80 no
output_transform=tf.keras.layers.Softmax,
reconstruction_loss=MakeLoss(LossBrayCurtis, Percentage, None),
latent_space=g[4],
layers=g[5],
activation=g[6],
activation_latent=g[7],
data_microbioma_train=data_microbioma_train,
data_domain_train=data_domain_train,
show_results=False,#esto se ha cambiado para que no me muestre los resultados
device='/CPU:0')
#Guardamos el modelo, encoder y decoder
model, encoder, _ ,decoder = models[0]
def model_fn_latent():
in_layer = layers.Input(shape=(data_domain_train.shape[1],))
net = layers.Dense(128, activation='sigmoid')(in_layer)
net = layers.Dense(64, activation='sigmoid')(net)
net = layers.Dense(32, activation='sigmoid')(net)
net = layers.Dense(16, activation='sigmoid')(net)
out_layer = layers.Dense(latent_train.shape[1], activation='tanh')(net) # 'tanh already'
model = keras.Model(inputs=[in_layer], outputs=[out_layer], name='model')
model.compile(optimizer=optimizers.Adam(lr=0.001), loss=tf.keras.losses.MeanSquaredError(), metrics=[tf.keras.metrics.MeanSquaredError()])
return model
#funcion del modelado -- por ahora lo dejaremos asi, y voy a habalr con ella par aver que cambio, que tuneo
latent_train = encoder.predict(data_microbioma_train)
result_latent, model_latent = train_tl_noEnsemble(model_fn_latent,
latent_train,
latent_train,
data_domain_train,
data_domain_train,
epochs=100,
batch_size=16,
verbose=-1)
#evaluamos el modelo
latent_test = encoder.predict(data_microbioma_test)
predictions = test_model_tl_noEnsemble(model_latent, decoder, Percentage, tf.keras.layers.Softmax, otu_columns, data_microbioma_test, data_domain_test)
print("----------------------------\n")
# ### 0.001, sigmoid, softmax
for g in combinations:
#entrenamos el autoencoder
print(g)
print("===================\n")
experiment_metrics, models, results = perform_experiment_2_mod(cv_folds=0,
epochs=g[0], ##este es el que varia
batch_size=g[1],
learning_rate=g[2], ##este es el que varia
optimizer=g[3],
learning_rate_scheduler=None,
input_transform=Percentage, #--> lo quitamos porque ya lo tenemos en relativo, con el 80 no
output_transform=tf.keras.layers.Softmax,
reconstruction_loss=MakeLoss(LossBrayCurtis, Percentage, None),
latent_space=g[4],
layers=g[5],
activation=g[6],
activation_latent=g[7],
data_microbioma_train=data_microbioma_train,
data_domain_train=data_domain_train,
show_results=False,#esto se ha cambiado para que no me muestre los resultados
device='/CPU:0')
#Guardamos el modelo, encoder y decoder
model, encoder, _ ,decoder = models[0]
def model_fn_latent():
in_layer = layers.Input(shape=(data_domain_train.shape[1],))
net = layers.Dense(128, activation='sigmoid')(in_layer)
net = layers.Dense(64, activation='sigmoid')(net)
net = layers.Dense(32, activation='sigmoid')(net)
net = layers.Dense(16, activation='sigmoid')(net)
out_layer = layers.Dense(latent_train.shape[1], activation='softmax')(net) # 'tanh already'
model = keras.Model(inputs=[in_layer], outputs=[out_layer], name='model')
model.compile(optimizer=optimizers.Adam(lr=0.001), loss=tf.keras.losses.MeanSquaredError(), metrics=[tf.keras.metrics.MeanSquaredError()])
return model
#funcion del modelado -- por ahora lo dejaremos asi, y voy a habalr con ella par aver que cambio, que tuneo
latent_train = encoder.predict(data_microbioma_train)
result_latent, model_latent = train_tl_noEnsemble(model_fn_latent,
latent_train,
latent_train,
data_domain_train,
data_domain_train,
epochs=100,
batch_size=16,
verbose=-1)
#evaluamos el modelo
latent_test = encoder.predict(data_microbioma_test)
predictions = test_model_tl_noEnsemble(model_latent, decoder, Percentage, tf.keras.layers.Softmax, otu_columns, data_microbioma_test, data_domain_test)
print("----------------------------\n")
# ### 0.001, sigmoid, sigmoid
for g in combinations:
#entrenamos el autoencoder
print(g)
print("===================\n")
experiment_metrics, models, results = perform_experiment_2_mod(cv_folds=0,
epochs=g[0], ##este es el que varia
batch_size=g[1],
learning_rate=g[2], ##este es el que varia
optimizer=g[3],
learning_rate_scheduler=None,
input_transform=Percentage, #--> lo quitamos porque ya lo tenemos en relativo, con el 80 no
output_transform=tf.keras.layers.Softmax,
reconstruction_loss=MakeLoss(LossBrayCurtis, Percentage, None),
latent_space=g[4],
layers=g[5],
activation=g[6],
activation_latent=g[7],
data_microbioma_train=data_microbioma_train,
data_domain_train=data_domain_train,
show_results=False,#esto se ha cambiado para que no me muestre los resultados
device='/CPU:0')
#Guardamos el modelo, encoder y decoder
model, encoder, _ ,decoder = models[0]
def model_fn_latent():
in_layer = layers.Input(shape=(data_domain_train.shape[1],))
net = layers.Dense(128, activation='sigmoid')(in_layer)
net = layers.Dense(64, activation='sigmoid')(net)
net = layers.Dense(32, activation='sigmoid')(net)
net = layers.Dense(16, activation='sigmoid')(net)
out_layer = layers.Dense(latent_train.shape[1], activation='sigmoid')(net) # 'tanh already'
model = keras.Model(inputs=[in_layer], outputs=[out_layer], name='model')
model.compile(optimizer=optimizers.Adam(lr=0.001), loss=tf.keras.losses.MeanSquaredError(), metrics=[tf.keras.metrics.MeanSquaredError()])
return model
#funcion del modelado -- por ahora lo dejaremos asi, y voy a habalr con ella par aver que cambio, que tuneo
latent_train = encoder.predict(data_microbioma_train)
result_latent, model_latent = train_tl_noEnsemble(model_fn_latent,
latent_train,
latent_train,
data_domain_train,
data_domain_train,
epochs=100,
batch_size=16,
verbose=-1)
#evaluamos el modelo
latent_test = encoder.predict(data_microbioma_test)
predictions = test_model_tl_noEnsemble(model_latent, decoder, Percentage, tf.keras.layers.Softmax, otu_columns, data_microbioma_test, data_domain_test)
print("----------------------------\n")
# ### 0.001, sigmoid, relu
for g in combinations:
#entrenamos el autoencoder
print(g)
print("===================\n")
experiment_metrics, models, results = perform_experiment_2_mod(cv_folds=0,
epochs=g[0], ##este es el que varia
batch_size=g[1],
learning_rate=g[2], ##este es el que varia
optimizer=g[3],
learning_rate_scheduler=None,
input_transform=Percentage, #--> lo quitamos porque ya lo tenemos en relativo, con el 80 no
output_transform=tf.keras.layers.Softmax,
reconstruction_loss=MakeLoss(LossBrayCurtis, Percentage, None),
latent_space=g[4],
layers=g[5],
activation=g[6],
activation_latent=g[7],
data_microbioma_train=data_microbioma_train,
data_domain_train=data_domain_train,
show_results=False,#esto se ha cambiado para que no me muestre los resultados
device='/CPU:0')
#Guardamos el modelo, encoder y decoder
model, encoder, _ ,decoder = models[0]
def model_fn_latent():
in_layer = layers.Input(shape=(data_domain_train.shape[1],))
net = layers.Dense(128, activation='sigmoid')(in_layer)
net = layers.Dense(64, activation='sigmoid')(net)
net = layers.Dense(32, activation='sigmoid')(net)
net = layers.Dense(16, activation='sigmoid')(net)
out_layer = layers.Dense(latent_train.shape[1], activation='relu')(net) # 'tanh already'
model = keras.Model(inputs=[in_layer], outputs=[out_layer], name='model')
model.compile(optimizer=optimizers.Adam(lr=0.001), loss=tf.keras.losses.MeanSquaredError(), metrics=[tf.keras.metrics.MeanSquaredError()])
return model
#funcion del modelado -- por ahora lo dejaremos asi, y voy a habalr con ella par aver que cambio, que tuneo
latent_train = encoder.predict(data_microbioma_train)
result_latent, model_latent = train_tl_noEnsemble(model_fn_latent,
latent_train,
latent_train,
data_domain_train,
data_domain_train,
epochs=100,
batch_size=16,
verbose=-1)
#evaluamos el modelo
latent_test = encoder.predict(data_microbioma_test)
predictions = test_model_tl_noEnsemble(model_latent, decoder, Percentage, tf.keras.layers.Softmax, otu_columns, data_microbioma_test, data_domain_test)
print("----------------------------\n")
# ### 0.001, relu, tanh
for g in combinations:
#entrenamos el autoencoder
print(g)
print("===================\n")
experiment_metrics, models, results = perform_experiment_2_mod(cv_folds=0,
epochs=g[0], ##este es el que varia
batch_size=g[1],
learning_rate=g[2], ##este es el que varia
optimizer=g[3],
learning_rate_scheduler=None,
input_transform=Percentage, #--> lo quitamos porque ya lo tenemos en relativo, con el 80 no
output_transform=tf.keras.layers.Softmax,
reconstruction_loss=MakeLoss(LossBrayCurtis, Percentage, None),
latent_space=g[4],
layers=g[5],
activation=g[6],
activation_latent=g[7],
data_microbioma_train=data_microbioma_train,
data_domain_train=data_domain_train,
show_results=False,#esto se ha cambiado para que no me muestre los resultados
device='/CPU:0')
#Guardamos el modelo, encoder y decoder
model, encoder, _ ,decoder = models[0]
def model_fn_latent():
in_layer = layers.Input(shape=(data_domain_train.shape[1],))
net = layers.Dense(128, activation='relu')(in_layer)
net = layers.Dense(64, activation='relu')(net)
net = layers.Dense(32, activation='relu')(net)
net = layers.Dense(16, activation='relu')(net)
out_layer = layers.Dense(latent_train.shape[1], activation='tanh')(net) # 'tanh already'
model = keras.Model(inputs=[in_layer], outputs=[out_layer], name='model')
model.compile(optimizer=optimizers.Adam(lr=0.001), loss=tf.keras.losses.MeanSquaredError(), metrics=[tf.keras.metrics.MeanSquaredError()])
return model
#funcion del modelado -- por ahora lo dejaremos asi, y voy a habalr con ella par aver que cambio, que tuneo
latent_train = encoder.predict(data_microbioma_train)
result_latent, model_latent = train_tl_noEnsemble(model_fn_latent,
latent_train,
latent_train,
data_domain_train,
data_domain_train,
epochs=100,
batch_size=16,
verbose=-1)
#evaluamos el modelo
latent_test = encoder.predict(data_microbioma_test)
predictions = test_model_tl_noEnsemble(model_latent, decoder, Percentage, tf.keras.layers.Softmax, otu_columns, data_microbioma_test, data_domain_test)
print("----------------------------\n")
# ### 0.001, relu, softmax
for g in combinations:
#entrenamos el autoencoder
print(g)
print("===================\n")
experiment_metrics, models, results = perform_experiment_2_mod(cv_folds=0,
epochs=g[0], ##este es el que varia
batch_size=g[1],
learning_rate=g[2], ##este es el que varia
optimizer=g[3],
learning_rate_scheduler=None,
input_transform=Percentage, #--> lo quitamos porque ya lo tenemos en relativo, con el 80 no
output_transform=tf.keras.layers.Softmax,
reconstruction_loss=MakeLoss(LossBrayCurtis, Percentage, None),
latent_space=g[4],
layers=g[5],
activation=g[6],
activation_latent=g[7],
data_microbioma_train=data_microbioma_train,
data_domain_train=data_domain_train,
show_results=False,#esto se ha cambiado para que no me muestre los resultados
device='/CPU:0')
#Guardamos el modelo, encoder y decoder
model, encoder, _ ,decoder = models[0]
def model_fn_latent():
in_layer = layers.Input(shape=(data_domain_train.shape[1],))
net = layers.Dense(128, activation='relu')(in_layer)
net = layers.Dense(64, activation='relu')(net)
net = layers.Dense(32, activation='relu')(net)
net = layers.Dense(16, activation='relu')(net)
out_layer = layers.Dense(latent_train.shape[1], activation='softmax')(net) # 'tanh already'
model = keras.Model(inputs=[in_layer], outputs=[out_layer], name='model')
model.compile(optimizer=optimizers.Adam(lr=0.001), loss=tf.keras.losses.MeanSquaredError(), metrics=[tf.keras.metrics.MeanSquaredError()])
return model
#funcion del modelado -- por ahora lo dejaremos asi, y voy a habalr con ella par aver que cambio, que tuneo
latent_train = encoder.predict(data_microbioma_train)
result_latent, model_latent = train_tl_noEnsemble(model_fn_latent,
latent_train,
latent_train,
data_domain_train,
data_domain_train,
epochs=100,
batch_size=16,
verbose=-1)
#evaluamos el modelo
latent_test = encoder.predict(data_microbioma_test)
predictions = test_model_tl_noEnsemble(model_latent, decoder, Percentage, tf.keras.layers.Softmax, otu_columns, data_microbioma_test, data_domain_test)
print("----------------------------\n")
# ### 0.001, relu, sigmoid
for g in combinations:
#entrenamos el autoencoder
print(g)
print("===================\n")
experiment_metrics, models, results = perform_experiment_2_mod(cv_folds=0,
epochs=g[0], ##este es el que varia
batch_size=g[1],
learning_rate=g[2], ##este es el que varia
optimizer=g[3],
learning_rate_scheduler=None,
input_transform=Percentage, #--> lo quitamos porque ya lo tenemos en relativo, con el 80 no
output_transform=tf.keras.layers.Softmax,
reconstruction_loss=MakeLoss(LossBrayCurtis, Percentage, None),
latent_space=g[4],
layers=g[5],
activation=g[6],
activation_latent=g[7],
data_microbioma_train=data_microbioma_train,
data_domain_train=data_domain_train,
show_results=False,#esto se ha cambiado para que no me muestre los resultados
device='/CPU:0')
#Guardamos el modelo, encoder y decoder
model, encoder, _ ,decoder = models[0]
def model_fn_latent():
in_layer = layers.Input(shape=(data_domain_train.shape[1],))
net = layers.Dense(128, activation='relu')(in_layer)
net = layers.Dense(64, activation='relu')(net)
net = layers.Dense(32, activation='relu')(net)
net = layers.Dense(16, activation='relu')(net)
out_layer = layers.Dense(latent_train.shape[1], activation='sigmoid')(net) # 'tanh already'
model = keras.Model(inputs=[in_layer], outputs=[out_layer], name='model')
model.compile(optimizer=optimizers.Adam(lr=0.001), loss=tf.keras.losses.MeanSquaredError(), metrics=[tf.keras.metrics.MeanSquaredError()])
return model
#funcion del modelado -- por ahora lo dejaremos asi, y voy a habalr con ella par aver que cambio, que tuneo
latent_train = encoder.predict(data_microbioma_train)
result_latent, model_latent = train_tl_noEnsemble(model_fn_latent,
latent_train,
latent_train,
data_domain_train,
data_domain_train,
epochs=100,
batch_size=16,
verbose=-1)
#evaluamos el modelo
latent_test = encoder.predict(data_microbioma_test)
predictions = test_model_tl_noEnsemble(model_latent, decoder, Percentage, tf.keras.layers.Softmax, otu_columns, data_microbioma_test, data_domain_test)
print("----------------------------\n")
# ### 0.001, relu, relu
for g in combinations:
#entrenamos el autoencoder
print(g)
print("===================\n")
experiment_metrics, models, results = perform_experiment_2_mod(cv_folds=0,
epochs=g[0], ##este es el que varia
batch_size=g[1],
learning_rate=g[2], ##este es el que varia
optimizer=g[3],
learning_rate_scheduler=None,
input_transform=Percentage, #--> lo quitamos porque ya lo tenemos en relativo, con el 80 no
output_transform=tf.keras.layers.Softmax,
reconstruction_loss=MakeLoss(LossBrayCurtis, Percentage, None),
latent_space=g[4],
layers=g[5],
activation=g[6],
activation_latent=g[7],
data_microbioma_train=data_microbioma_train,
data_domain_train=data_domain_train,
show_results=False,#esto se ha cambiado para que no me muestre los resultados
device='/CPU:0')
#Guardamos el modelo, encoder y decoder
model, encoder, _ ,decoder = models[0]
def model_fn_latent():
in_layer = layers.Input(shape=(data_domain_train.shape[1],))
net = layers.Dense(128, activation='relu')(in_layer)
net = layers.Dense(64, activation='relu')(net)
net = layers.Dense(32, activation='relu')(net)
net = layers.Dense(16, activation='relu')(net)
out_layer = layers.Dense(latent_train.shape[1], activation='relu')(net) # 'tanh already'
model = keras.Model(inputs=[in_layer], outputs=[out_layer], name='model')
model.compile(optimizer=optimizers.Adam(lr=0.001), loss=tf.keras.losses.MeanSquaredError(), metrics=[tf.keras.metrics.MeanSquaredError()])
return model
#funcion del modelado -- por ahora lo dejaremos asi, y voy a habalr con ella par aver que cambio, que tuneo
latent_train = encoder.predict(data_microbioma_train)
result_latent, model_latent = train_tl_noEnsemble(model_fn_latent,
latent_train,
latent_train,
data_domain_train,
data_domain_train,
epochs=100,
batch_size=16,
verbose=-1)
#evaluamos el modelo
latent_test = encoder.predict(data_microbioma_test)
predictions = test_model_tl_noEnsemble(model_latent, decoder, Percentage, tf.keras.layers.Softmax, otu_columns, data_microbioma_test, data_domain_test)
print("----------------------------\n")
# ---------------------------
# ### 0.01, tanh, tanh
for g in combinations:
#entrenamos el autoencoder
print(g)
print("===================\n")
experiment_metrics, models, results = perform_experiment_2_mod(cv_folds=0,
epochs=g[0], ##este es el que varia
batch_size=g[1],
learning_rate=g[2], ##este es el que varia
optimizer=g[3],
learning_rate_scheduler=None,
input_transform=Percentage, #--> lo quitamos porque ya lo tenemos en relativo, con el 80 no
output_transform=tf.keras.layers.Softmax,
reconstruction_loss=MakeLoss(LossBrayCurtis, Percentage, None),
latent_space=g[4],
layers=g[5],
activation=g[6],
activation_latent=g[7],
data_microbioma_train=data_microbioma_train,
data_domain_train=data_domain_train,
show_results=False,#esto se ha cambiado para que no me muestre los resultados
device='/CPU:0')
#Guardamos el modelo, encoder y decoder
model, encoder, _ ,decoder = models[0]
def model_fn_latent():
in_layer = layers.Input(shape=(data_domain_train.shape[1],))
net = layers.Dense(128, activation='tanh')(in_layer)
net = layers.Dense(64, activation='tanh')(net)
net = layers.Dense(32, activation='tanh')(net)
net = layers.Dense(16, activation='tanh')(net)
out_layer = layers.Dense(latent_train.shape[1], activation='tanh')(net) # 'tanh already'
model = keras.Model(inputs=[in_layer], outputs=[out_layer], name='model')
model.compile(optimizer=optimizers.Adam(lr=0.01), loss=tf.keras.losses.MeanSquaredError(), metrics=[tf.keras.metrics.MeanSquaredError()])
return model
#funcion del modelado -- por ahora lo dejaremos asi, y voy a habalr con ella par aver que cambio, que tuneo
latent_train = encoder.predict(data_microbioma_train)
result_latent, model_latent = train_tl_noEnsemble(model_fn_latent,
latent_train,
latent_train,
data_domain_train,
data_domain_train,
epochs=100,
batch_size=16,
verbose=-1)
#evaluamos el modelo
latent_test = encoder.predict(data_microbioma_test)
predictions = test_model_tl_noEnsemble(model_latent, decoder, Percentage, tf.keras.layers.Softmax, otu_columns, data_microbioma_test, data_domain_test)
print("----------------------------\n")
# ### 0.01, tanh, softmax
for g in combinations:
#entrenamos el autoencoder
print(g)
print("===================\n")
experiment_metrics, models, results = perform_experiment_2_mod(cv_folds=0,
epochs=g[0], ##este es el que varia
batch_size=g[1],
learning_rate=g[2], ##este es el que varia
optimizer=g[3],
learning_rate_scheduler=None,
input_transform=Percentage, #--> lo quitamos porque ya lo tenemos en relativo, con el 80 no
output_transform=tf.keras.layers.Softmax,
reconstruction_loss=MakeLoss(LossBrayCurtis, Percentage, None),
latent_space=g[4],
layers=g[5],
activation=g[6],
activation_latent=g[7],
data_microbioma_train=data_microbioma_train,
data_domain_train=data_domain_train,
show_results=False,#esto se ha cambiado para que no me muestre los resultados
device='/CPU:0')
#Guardamos el modelo, encoder y decoder
model, encoder, _ ,decoder = models[0]
def model_fn_latent():
in_layer = layers.Input(shape=(data_domain_train.shape[1],))
net = layers.Dense(128, activation='tanh')(in_layer)
net = layers.Dense(64, activation='tanh')(net)
net = layers.Dense(32, activation='tanh')(net)
net = layers.Dense(16, activation='tanh')(net)
out_layer = layers.Dense(latent_train.shape[1], activation='softmax')(net) # 'tanh already'
model = keras.Model(inputs=[in_layer], outputs=[out_layer], name='model')
model.compile(optimizer=optimizers.Adam(lr=0.01), loss=tf.keras.losses.MeanSquaredError(), metrics=[tf.keras.metrics.MeanSquaredError()])
return model
#funcion del modelado -- por ahora lo dejaremos asi, y voy a habalr con ella par aver que cambio, que tuneo
latent_train = encoder.predict(data_microbioma_train)
result_latent, model_latent = train_tl_noEnsemble(model_fn_latent,
latent_train,
latent_train,
data_domain_train,
data_domain_train,
epochs=100,
batch_size=16,
verbose=-1)
#evaluamos el modelo
latent_test = encoder.predict(data_microbioma_test)
predictions = test_model_tl_noEnsemble(model_latent, decoder, Percentage, tf.keras.layers.Softmax, otu_columns, data_microbioma_test, data_domain_test)
print("----------------------------\n")
# ### 0.01, tanh, sigmoid
for g in combinations:
#entrenamos el autoencoder
print(g)
print("===================\n")
experiment_metrics, models, results = perform_experiment_2_mod(cv_folds=0,
epochs=g[0], ##este es el que varia
batch_size=g[1],
learning_rate=g[2], ##este es el que varia
optimizer=g[3],
learning_rate_scheduler=None,
input_transform=Percentage, #--> lo quitamos porque ya lo tenemos en relativo, con el 80 no
output_transform=tf.keras.layers.Softmax,
reconstruction_loss=MakeLoss(LossBrayCurtis, Percentage, None),
latent_space=g[4],
layers=g[5],
activation=g[6],
activation_latent=g[7],
data_microbioma_train=data_microbioma_train,
data_domain_train=data_domain_train,
show_results=False,#esto se ha cambiado para que no me muestre los resultados
device='/CPU:0')
#Guardamos el modelo, encoder y decoder
model, encoder, _ ,decoder = models[0]
def model_fn_latent():
in_layer = layers.Input(shape=(data_domain_train.shape[1],))
net = layers.Dense(128, activation='tanh')(in_layer)
net = layers.Dense(64, activation='tanh')(net)
net = layers.Dense(32, activation='tanh')(net)
net = layers.Dense(16, activation='tanh')(net)
out_layer = layers.Dense(latent_train.shape[1], activation='sigmoid')(net) # 'tanh already'
model = keras.Model(inputs=[in_layer], outputs=[out_layer], name='model')
model.compile(optimizer=optimizers.Adam(lr=0.01), loss=tf.keras.losses.MeanSquaredError(), metrics=[tf.keras.metrics.MeanSquaredError()])
return model
#funcion del modelado -- por ahora lo dejaremos asi, y voy a habalr con ella par aver que cambio, que tuneo
latent_train = encoder.predict(data_microbioma_train)
result_latent, model_latent = train_tl_noEnsemble(model_fn_latent,
latent_train,
latent_train,
data_domain_train,
data_domain_train,
epochs=100,
batch_size=16,
verbose=-1)
#evaluamos el modelo
latent_test = encoder.predict(data_microbioma_test)
predictions = test_model_tl_noEnsemble(model_latent, decoder, Percentage, tf.keras.layers.Softmax, otu_columns, data_microbioma_test, data_domain_test)
print("----------------------------\n")
# ### 0.01, tanh, relu
for g in combinations:
#entrenamos el autoencoder
print(g)
print("===================\n")
experiment_metrics, models, results = perform_experiment_2_mod(cv_folds=0,
epochs=g[0], ##este es el que varia
batch_size=g[1],
learning_rate=g[2], ##este es el que varia
optimizer=g[3],
learning_rate_scheduler=None,
input_transform=Percentage, #--> lo quitamos porque ya lo tenemos en relativo, con el 80 no
output_transform=tf.keras.layers.Softmax,
reconstruction_loss=MakeLoss(LossBrayCurtis, Percentage, None),
latent_space=g[4],
layers=g[5],
activation=g[6],
activation_latent=g[7],
data_microbioma_train=data_microbioma_train,
data_domain_train=data_domain_train,
show_results=False,#esto se ha cambiado para que no me muestre los resultados
device='/CPU:0')
#Guardamos el modelo, encoder y decoder
model, encoder, _ ,decoder = models[0]
def model_fn_latent():
in_layer = layers.Input(shape=(data_domain_train.shape[1],))
net = layers.Dense(128, activation='tanh')(in_layer)
net = layers.Dense(64, activation='tanh')(net)
net = layers.Dense(32, activation='tanh')(net)
net = layers.Dense(16, activation='tanh')(net)
out_layer = layers.Dense(latent_train.shape[1], activation='relu')(net) # 'tanh already'
model = keras.Model(inputs=[in_layer], outputs=[out_layer], name='model')
model.compile(optimizer=optimizers.Adam(lr=0.01), loss=tf.keras.losses.MeanSquaredError(), metrics=[tf.keras.metrics.MeanSquaredError()])
return model
#funcion del modelado -- por ahora lo dejaremos asi, y voy a habalr con ella par aver que cambio, que tuneo
latent_train = encoder.predict(data_microbioma_train)
result_latent, model_latent = train_tl_noEnsemble(model_fn_latent,
latent_train,
latent_train,
data_domain_train,
data_domain_train,
epochs=100,
batch_size=16,
verbose=-1)
#evaluamos el modelo
latent_test = encoder.predict(data_microbioma_test)
predictions = test_model_tl_noEnsemble(model_latent, decoder, Percentage, tf.keras.layers.Softmax, otu_columns, data_microbioma_test, data_domain_test)
print("----------------------------\n")
# ### 0.01, softmax, tanh
for g in combinations:
#entrenamos el autoencoder
print(g)
print("===================\n")
experiment_metrics, models, results = perform_experiment_2_mod(cv_folds=0,
epochs=g[0], ##este es el que varia
batch_size=g[1],
learning_rate=g[2], ##este es el que varia
optimizer=g[3],
learning_rate_scheduler=None,
input_transform=Percentage, #--> lo quitamos porque ya lo tenemos en relativo, con el 80 no
output_transform=tf.keras.layers.Softmax,
reconstruction_loss=MakeLoss(LossBrayCurtis, Percentage, None),
latent_space=g[4],
layers=g[5],
activation=g[6],
activation_latent=g[7],
data_microbioma_train=data_microbioma_train,
data_domain_train=data_domain_train,
show_results=False,#esto se ha cambiado para que no me muestre los resultados
device='/CPU:0')
#Guardamos el modelo, encoder y decoder
model, encoder, _ ,decoder = models[0]
def model_fn_latent():
in_layer = layers.Input(shape=(data_domain_train.shape[1],))
net = layers.Dense(128, activation='softmax')(in_layer)
net = layers.Dense(64, activation='softmax')(net)
net = layers.Dense(32, activation='softmax')(net)
net = layers.Dense(16, activation='softmax')(net)
out_layer = layers.Dense(latent_train.shape[1], activation='tanh')(net) # 'tanh already'
model = keras.Model(inputs=[in_layer], outputs=[out_layer], name='model')
model.compile(optimizer=optimizers.Adam(lr=0.01), loss=tf.keras.losses.MeanSquaredError(), metrics=[tf.keras.metrics.MeanSquaredError()])
return model
#funcion del modelado -- por ahora lo dejaremos asi, y voy a habalr con ella par aver que cambio, que tuneo
latent_train = encoder.predict(data_microbioma_train)
result_latent, model_latent = train_tl_noEnsemble(model_fn_latent,
latent_train,
latent_train,
data_domain_train,
data_domain_train,
epochs=100,
batch_size=16,
verbose=-1)
#evaluamos el modelo
latent_test = encoder.predict(data_microbioma_test)
predictions = test_model_tl_noEnsemble(model_latent, decoder, Percentage, tf.keras.layers.Softmax, otu_columns, data_microbioma_test, data_domain_test)
print("----------------------------\n")
# ### 0.01, softmax, softmax
for g in combinations:
#entrenamos el autoencoder
print(g)
print("===================\n")
experiment_metrics, models, results = perform_experiment_2_mod(cv_folds=0,
epochs=g[0], ##este es el que varia
batch_size=g[1],
learning_rate=g[2], ##este es el que varia
optimizer=g[3],
learning_rate_scheduler=None,
input_transform=Percentage, #--> lo quitamos porque ya lo tenemos en relativo, con el 80 no
output_transform=tf.keras.layers.Softmax,
reconstruction_loss=MakeLoss(LossBrayCurtis, Percentage, None),
latent_space=g[4],
layers=g[5],
activation=g[6],
activation_latent=g[7],
data_microbioma_train=data_microbioma_train,
data_domain_train=data_domain_train,
show_results=False,#esto se ha cambiado para que no me muestre los resultados
device='/CPU:0')
#Guardamos el modelo, encoder y decoder
model, encoder, _ ,decoder = models[0]
def model_fn_latent():
in_layer = layers.Input(shape=(data_domain_train.shape[1],))
net = layers.Dense(128, activation='softmax')(in_layer)
net = layers.Dense(64, activation='softmax')(net)
net = layers.Dense(32, activation='softmax')(net)
net = layers.Dense(16, activation='softmax')(net)
out_layer = layers.Dense(latent_train.shape[1], activation='softmax')(net) # 'tanh already'
model = keras.Model(inputs=[in_layer], outputs=[out_layer], name='model')
model.compile(optimizer=optimizers.Adam(lr=0.01), loss=tf.keras.losses.MeanSquaredError(), metrics=[tf.keras.metrics.MeanSquaredError()])
return model
#funcion del modelado -- por ahora lo dejaremos asi, y voy a habalr con ella par aver que cambio, que tuneo
latent_train = encoder.predict(data_microbioma_train)
result_latent, model_latent = train_tl_noEnsemble(model_fn_latent,
latent_train,
latent_train,
data_domain_train,
data_domain_train,
epochs=100,
batch_size=16,
verbose=-1)
#evaluamos el modelo
latent_test = encoder.predict(data_microbioma_test)
predictions = test_model_tl_noEnsemble(model_latent, decoder, Percentage, tf.keras.layers.Softmax, otu_columns, data_microbioma_test, data_domain_test)
print("----------------------------\n")
# ### 0.01, softmax, sigmoid
for g in combinations:
#entrenamos el autoencoder
print(g)
print("===================\n")
experiment_metrics, models, results = perform_experiment_2_mod(cv_folds=0,
epochs=g[0], ##este es el que varia
batch_size=g[1],
learning_rate=g[2], ##este es el que varia
optimizer=g[3],
learning_rate_scheduler=None,
input_transform=Percentage, #--> lo quitamos porque ya lo tenemos en relativo, con el 80 no
output_transform=tf.keras.layers.Softmax,
reconstruction_loss=MakeLoss(LossBrayCurtis, Percentage, None),
latent_space=g[4],
layers=g[5],
activation=g[6],
activation_latent=g[7],
data_microbioma_train=data_microbioma_train,
data_domain_train=data_domain_train,
show_results=False,#esto se ha cambiado para que no me muestre los resultados
device='/CPU:0')
#Guardamos el modelo, encoder y decoder
model, encoder, _ ,decoder = models[0]
def model_fn_latent():
in_layer = layers.Input(shape=(data_domain_train.shape[1],))
net = layers.Dense(128, activation='softmax')(in_layer)
net = layers.Dense(64, activation='softmax')(net)
net = layers.Dense(32, activation='softmax')(net)
net = layers.Dense(16, activation='softmax')(net)
out_layer = layers.Dense(latent_train.shape[1], activation='sigmoid')(net) # 'tanh already'
model = keras.Model(inputs=[in_layer], outputs=[out_layer], name='model')
model.compile(optimizer=optimizers.Adam(lr=0.01), loss=tf.keras.losses.MeanSquaredError(), metrics=[tf.keras.metrics.MeanSquaredError()])
return model
#funcion del modelado -- por ahora lo dejaremos asi, y voy a habalr con ella par aver que cambio, que tuneo
latent_train = encoder.predict(data_microbioma_train)
result_latent, model_latent = train_tl_noEnsemble(model_fn_latent,
latent_train,
latent_train,
data_domain_train,
data_domain_train,
epochs=100,
batch_size=16,
verbose=-1)
#evaluamos el modelo
latent_test = encoder.predict(data_microbioma_test)
predictions = test_model_tl_noEnsemble(model_latent, decoder, Percentage, tf.keras.layers.Softmax, otu_columns, data_microbioma_test, data_domain_test)
print("----------------------------\n")
# ### 0.01, softmax, relu
for g in combinations:
#entrenamos el autoencoder
print(g)
print("===================\n")
experiment_metrics, models, results = perform_experiment_2_mod(cv_folds=0,
epochs=g[0], ##este es el que varia
batch_size=g[1],
learning_rate=g[2], ##este es el que varia
optimizer=g[3],
learning_rate_scheduler=None,
input_transform=Percentage, #--> lo quitamos porque ya lo tenemos en relativo, con el 80 no
output_transform=tf.keras.layers.Softmax,
reconstruction_loss=MakeLoss(LossBrayCurtis, Percentage, None),
latent_space=g[4],
layers=g[5],
activation=g[6],
activation_latent=g[7],
data_microbioma_train=data_microbioma_train,
data_domain_train=data_domain_train,
show_results=False,#esto se ha cambiado para que no me muestre los resultados
device='/CPU:0')
#Guardamos el modelo, encoder y decoder
model, encoder, _ ,decoder = models[0]
def model_fn_latent():
in_layer = layers.Input(shape=(data_domain_train.shape[1],))
net = layers.Dense(128, activation='softmax')(in_layer)
net = layers.Dense(64, activation='softmax')(net)
net = layers.Dense(32, activation='softmax')(net)
net = layers.Dense(16, activation='softmax')(net)
out_layer = layers.Dense(latent_train.shape[1], activation='relu')(net) # 'tanh already'
model = keras.Model(inputs=[in_layer], outputs=[out_layer], name='model')
model.compile(optimizer=optimizers.Adam(lr=0.01), loss=tf.keras.losses.MeanSquaredError(), metrics=[tf.keras.metrics.MeanSquaredError()])
return model
#funcion del modelado -- por ahora lo dejaremos asi, y voy a habalr con ella par aver que cambio, que tuneo
latent_train = encoder.predict(data_microbioma_train)
result_latent, model_latent = train_tl_noEnsemble(model_fn_latent,
latent_train,
latent_train,
data_domain_train,
data_domain_train,
epochs=100,
batch_size=16,
verbose=-1)
#evaluamos el modelo
latent_test = encoder.predict(data_microbioma_test)
predictions = test_model_tl_noEnsemble(model_latent, decoder, Percentage, tf.keras.layers.Softmax, otu_columns, data_microbioma_test, data_domain_test)
print("----------------------------\n")
# ### 0.01, sigmoid, tanh
for g in combinations:
#entrenamos el autoencoder
print(g)
print("===================\n")
experiment_metrics, models, results = perform_experiment_2_mod(cv_folds=0,
epochs=g[0], ##este es el que varia
batch_size=g[1],
learning_rate=g[2], ##este es el que varia
optimizer=g[3],
learning_rate_scheduler=None,
input_transform=Percentage, #--> lo quitamos porque ya lo tenemos en relativo, con el 80 no
output_transform=tf.keras.layers.Softmax,
reconstruction_loss=MakeLoss(LossBrayCurtis, Percentage, None),
latent_space=g[4],
layers=g[5],
activation=g[6],
activation_latent=g[7],
data_microbioma_train=data_microbioma_train,
data_domain_train=data_domain_train,
show_results=False,#esto se ha cambiado para que no me muestre los resultados
device='/CPU:0')
#Guardamos el modelo, encoder y decoder
model, encoder, _ ,decoder = models[0]
def model_fn_latent():
in_layer = layers.Input(shape=(data_domain_train.shape[1],))
net = layers.Dense(128, activation='sigmoid')(in_layer)
net = layers.Dense(64, activation='sigmoid')(net)
net = layers.Dense(32, activation='sigmoid')(net)
net = layers.Dense(16, activation='sigmoid')(net)
out_layer = layers.Dense(latent_train.shape[1], activation='tanh')(net) # 'tanh already'
model = keras.Model(inputs=[in_layer], outputs=[out_layer], name='model')
model.compile(optimizer=optimizers.Adam(lr=0.01), loss=tf.keras.losses.MeanSquaredError(), metrics=[tf.keras.metrics.MeanSquaredError()])
return model
#funcion del modelado -- por ahora lo dejaremos asi, y voy a habalr con ella par aver que cambio, que tuneo
latent_train = encoder.predict(data_microbioma_train)
result_latent, model_latent = train_tl_noEnsemble(model_fn_latent,
latent_train,
latent_train,
data_domain_train,
data_domain_train,
epochs=100,
batch_size=16,
verbose=-1)
#evaluamos el modelo
latent_test = encoder.predict(data_microbioma_test)
predictions = test_model_tl_noEnsemble(model_latent, decoder, Percentage, tf.keras.layers.Softmax, otu_columns, data_microbioma_test, data_domain_test)
print("----------------------------\n")
# ### 0.01, sigmoid, softmax
for g in combinations:
#entrenamos el autoencoder
print(g)
print("===================\n")
experiment_metrics, models, results = perform_experiment_2_mod(cv_folds=0,
epochs=g[0], ##este es el que varia
batch_size=g[1],
learning_rate=g[2], ##este es el que varia
optimizer=g[3],
learning_rate_scheduler=None,
input_transform=Percentage, #--> lo quitamos porque ya lo tenemos en relativo, con el 80 no
output_transform=tf.keras.layers.Softmax,
reconstruction_loss=MakeLoss(LossBrayCurtis, Percentage, None),
latent_space=g[4],
layers=g[5],
activation=g[6],
activation_latent=g[7],
data_microbioma_train=data_microbioma_train,
data_domain_train=data_domain_train,
show_results=False,#esto se ha cambiado para que no me muestre los resultados
device='/CPU:0')
#Guardamos el modelo, encoder y decoder
model, encoder, _ ,decoder = models[0]
def model_fn_latent():
in_layer = layers.Input(shape=(data_domain_train.shape[1],))
net = layers.Dense(128, activation='sigmoid')(in_layer)
net = layers.Dense(64, activation='sigmoid')(net)
net = layers.Dense(32, activation='sigmoid')(net)
net = layers.Dense(16, activation='sigmoid')(net)
out_layer = layers.Dense(latent_train.shape[1], activation='softmax')(net) # 'tanh already'
model = keras.Model(inputs=[in_layer], outputs=[out_layer], name='model')
model.compile(optimizer=optimizers.Adam(lr=0.01), loss=tf.keras.losses.MeanSquaredError(), metrics=[tf.keras.metrics.MeanSquaredError()])
return model
#funcion del modelado -- por ahora lo dejaremos asi, y voy a habalr con ella par aver que cambio, que tuneo
latent_train = encoder.predict(data_microbioma_train)
result_latent, model_latent = train_tl_noEnsemble(model_fn_latent,
latent_train,
latent_train,
data_domain_train,
data_domain_train,
epochs=100,
batch_size=16,
verbose=-1)
#evaluamos el modelo
latent_test = encoder.predict(data_microbioma_test)
predictions = test_model_tl_noEnsemble(model_latent, decoder, Percentage, tf.keras.layers.Softmax, otu_columns, data_microbioma_test, data_domain_test)
print("----------------------------\n")
# ### 0.01, sigmoid, sigmoid
for g in combinations:
#entrenamos el autoencoder
print(g)
print("===================\n")
experiment_metrics, models, results = perform_experiment_2_mod(cv_folds=0,
epochs=g[0], ##este es el que varia
batch_size=g[1],
learning_rate=g[2], ##este es el que varia
optimizer=g[3],
learning_rate_scheduler=None,
input_transform=Percentage, #--> lo quitamos porque ya lo tenemos en relativo, con el 80 no
output_transform=tf.keras.layers.Softmax,
reconstruction_loss=MakeLoss(LossBrayCurtis, Percentage, None),
latent_space=g[4],
layers=g[5],
activation=g[6],
activation_latent=g[7],
data_microbioma_train=data_microbioma_train,
data_domain_train=data_domain_train,
show_results=False,#esto se ha cambiado para que no me muestre los resultados
device='/CPU:0')
#Guardamos el modelo, encoder y decoder
model, encoder, _ ,decoder = models[0]
def model_fn_latent():
in_layer = layers.Input(shape=(data_domain_train.shape[1],))
net = layers.Dense(128, activation='sigmoid')(in_layer)
net = layers.Dense(64, activation='sigmoid')(net)
net = layers.Dense(32, activation='sigmoid')(net)
net = layers.Dense(16, activation='sigmoid')(net)
out_layer = layers.Dense(latent_train.shape[1], activation='sigmoid')(net) # 'tanh already'
model = keras.Model(inputs=[in_layer], outputs=[out_layer], name='model')
model.compile(optimizer=optimizers.Adam(lr=0.01), loss=tf.keras.losses.MeanSquaredError(), metrics=[tf.keras.metrics.MeanSquaredError()])
return model
#funcion del modelado -- por ahora lo dejaremos asi, y voy a habalr con ella par aver que cambio, que tuneo
latent_train = encoder.predict(data_microbioma_train)
result_latent, model_latent = train_tl_noEnsemble(model_fn_latent,
latent_train,
latent_train,
data_domain_train,
data_domain_train,
epochs=100,
batch_size=16,
verbose=-1)
#evaluamos el modelo
latent_test = encoder.predict(data_microbioma_test)
predictions = test_model_tl_noEnsemble(model_latent, decoder, Percentage, tf.keras.layers.Softmax, otu_columns, data_microbioma_test, data_domain_test)
print("----------------------------\n")
# ### 0.01, sigmoid, relu
for g in combinations:
#entrenamos el autoencoder
print(g)
print("===================\n")
experiment_metrics, models, results = perform_experiment_2_mod(cv_folds=0,
epochs=g[0], ##este es el que varia
batch_size=g[1],
learning_rate=g[2], ##este es el que varia
optimizer=g[3],
learning_rate_scheduler=None,
input_transform=Percentage, #--> lo quitamos porque ya lo tenemos en relativo, con el 80 no
output_transform=tf.keras.layers.Softmax,
reconstruction_loss=MakeLoss(LossBrayCurtis, Percentage, None),
latent_space=g[4],
layers=g[5],
activation=g[6],
activation_latent=g[7],
data_microbioma_train=data_microbioma_train,
data_domain_train=data_domain_train,
show_results=False,#esto se ha cambiado para que no me muestre los resultados
device='/CPU:0')
#Guardamos el modelo, encoder y decoder
model, encoder, _ ,decoder = models[0]
def model_fn_latent():
in_layer = layers.Input(shape=(data_domain_train.shape[1],))
net = layers.Dense(128, activation='sigmoid')(in_layer)
net = layers.Dense(64, activation='sigmoid')(net)
net = layers.Dense(32, activation='sigmoid')(net)
net = layers.Dense(16, activation='sigmoid')(net)
out_layer = layers.Dense(latent_train.shape[1], activation='relu')(net) # 'tanh already'
model = keras.Model(inputs=[in_layer], outputs=[out_layer], name='model')
model.compile(optimizer=optimizers.Adam(lr=0.01), loss=tf.keras.losses.MeanSquaredError(), metrics=[tf.keras.metrics.MeanSquaredError()])
return model
#funcion del modelado -- por ahora lo dejaremos asi, y voy a habalr con ella par aver que cambio, que tuneo
latent_train = encoder.predict(data_microbioma_train)
result_latent, model_latent = train_tl_noEnsemble(model_fn_latent,
latent_train,
latent_train,
data_domain_train,
data_domain_train,
epochs=100,
batch_size=16,
verbose=-1)
#evaluamos el modelo
latent_test = encoder.predict(data_microbioma_test)
predictions = test_model_tl_noEnsemble(model_latent, decoder, Percentage, tf.keras.layers.Softmax, otu_columns, data_microbioma_test, data_domain_test)
print("----------------------------\n")
# ### 0.01, relu, tanh
for g in combinations:
#entrenamos el autoencoder
print(g)
print("===================\n")
experiment_metrics, models, results = perform_experiment_2_mod(cv_folds=0,
epochs=g[0], ##este es el que varia
batch_size=g[1],
learning_rate=g[2], ##este es el que varia
optimizer=g[3],
learning_rate_scheduler=None,
input_transform=Percentage, #--> lo quitamos porque ya lo tenemos en relativo, con el 80 no
output_transform=tf.keras.layers.Softmax,
reconstruction_loss=MakeLoss(LossBrayCurtis, Percentage, None),
latent_space=g[4],
layers=g[5],
activation=g[6],
activation_latent=g[7],
data_microbioma_train=data_microbioma_train,
data_domain_train=data_domain_train,
show_results=False,#esto se ha cambiado para que no me muestre los resultados
device='/CPU:0')
#Guardamos el modelo, encoder y decoder
model, encoder, _ ,decoder = models[0]
def model_fn_latent():
in_layer = layers.Input(shape=(data_domain_train.shape[1],))
net = layers.Dense(128, activation='relu')(in_layer)
net = layers.Dense(64, activation='relu')(net)
net = layers.Dense(32, activation='relu')(net)
net = layers.Dense(16, activation='relu')(net)
out_layer = layers.Dense(latent_train.shape[1], activation='tanh')(net) # 'tanh already'
model = keras.Model(inputs=[in_layer], outputs=[out_layer], name='model')
model.compile(optimizer=optimizers.Adam(lr=0.01), loss=tf.keras.losses.MeanSquaredError(), metrics=[tf.keras.metrics.MeanSquaredError()])
return model
#funcion del modelado -- por ahora lo dejaremos asi, y voy a habalr con ella par aver que cambio, que tuneo
latent_train = encoder.predict(data_microbioma_train)
result_latent, model_latent = train_tl_noEnsemble(model_fn_latent,
latent_train,
latent_train,
data_domain_train,
data_domain_train,
epochs=100,
batch_size=16,
verbose=-1)
#evaluamos el modelo
latent_test = encoder.predict(data_microbioma_test)
predictions = test_model_tl_noEnsemble(model_latent, decoder, Percentage, tf.keras.layers.Softmax, otu_columns, data_microbioma_test, data_domain_test)
print("----------------------------\n")
# ### 0.01, relu, softmax
for g in combinations:
#entrenamos el autoencoder
print(g)
print("===================\n")
experiment_metrics, models, results = perform_experiment_2_mod(cv_folds=0,
epochs=g[0], ##este es el que varia
batch_size=g[1],
learning_rate=g[2], ##este es el que varia
optimizer=g[3],
learning_rate_scheduler=None,
input_transform=Percentage, #--> lo quitamos porque ya lo tenemos en relativo, con el 80 no
output_transform=tf.keras.layers.Softmax,
reconstruction_loss=MakeLoss(LossBrayCurtis, Percentage, None),
latent_space=g[4],
layers=g[5],
activation=g[6],
activation_latent=g[7],
data_microbioma_train=data_microbioma_train,
data_domain_train=data_domain_train,
show_results=False,#esto se ha cambiado para que no me muestre los resultados
device='/CPU:0')
#Guardamos el modelo, encoder y decoder
model, encoder, _ ,decoder = models[0]
def model_fn_latent():
in_layer = layers.Input(shape=(data_domain_train.shape[1],))
net = layers.Dense(128, activation='relu')(in_layer)
net = layers.Dense(64, activation='relu')(net)
net = layers.Dense(32, activation='relu')(net)
net = layers.Dense(16, activation='relu')(net)
out_layer = layers.Dense(latent_train.shape[1], activation='softmax')(net) # 'tanh already'
model = keras.Model(inputs=[in_layer], outputs=[out_layer], name='model')
model.compile(optimizer=optimizers.Adam(lr=0.01), loss=tf.keras.losses.MeanSquaredError(), metrics=[tf.keras.metrics.MeanSquaredError()])
return model
#funcion del modelado -- por ahora lo dejaremos asi, y voy a habalr con ella par aver que cambio, que tuneo
latent_train = encoder.predict(data_microbioma_train)
result_latent, model_latent = train_tl_noEnsemble(model_fn_latent,
latent_train,
latent_train,
data_domain_train,
data_domain_train,
epochs=100,
batch_size=16,
verbose=-1)
#evaluamos el modelo
latent_test = encoder.predict(data_microbioma_test)
predictions = test_model_tl_noEnsemble(model_latent, decoder, Percentage, tf.keras.layers.Softmax, otu_columns, data_microbioma_test, data_domain_test)
print("----------------------------\n")
# ### 0.01, relu, sigmoid
for g in combinations:
#entrenamos el autoencoder
print(g)
print("===================\n")
experiment_metrics, models, results = perform_experiment_2_mod(cv_folds=0,
epochs=g[0], ##este es el que varia
batch_size=g[1],
learning_rate=g[2], ##este es el que varia
optimizer=g[3],
learning_rate_scheduler=None,
input_transform=Percentage, #--> lo quitamos porque ya lo tenemos en relativo, con el 80 no
output_transform=tf.keras.layers.Softmax,
reconstruction_loss=MakeLoss(LossBrayCurtis, Percentage, None),
latent_space=g[4],
layers=g[5],
activation=g[6],
activation_latent=g[7],
data_microbioma_train=data_microbioma_train,
data_domain_train=data_domain_train,
show_results=False,#esto se ha cambiado para que no me muestre los resultados
device='/CPU:0')
#Guardamos el modelo, encoder y decoder
model, encoder, _ ,decoder = models[0]
def model_fn_latent():
in_layer = layers.Input(shape=(data_domain_train.shape[1],))
net = layers.Dense(128, activation='relu')(in_layer)
net = layers.Dense(64, activation='relu')(net)
net = layers.Dense(32, activation='relu')(net)
net = layers.Dense(16, activation='relu')(net)
out_layer = layers.Dense(latent_train.shape[1], activation='sigmoid')(net) # 'tanh already'
model = keras.Model(inputs=[in_layer], outputs=[out_layer], name='model')
model.compile(optimizer=optimizers.Adam(lr=0.01), loss=tf.keras.losses.MeanSquaredError(), metrics=[tf.keras.metrics.MeanSquaredError()])
return model
#funcion del modelado -- por ahora lo dejaremos asi, y voy a habalr con ella par aver que cambio, que tuneo
latent_train = encoder.predict(data_microbioma_train)
result_latent, model_latent = train_tl_noEnsemble(model_fn_latent,
latent_train,
latent_train,
data_domain_train,
data_domain_train,
epochs=100,
batch_size=16,
verbose=-1)
#evaluamos el modelo
latent_test = encoder.predict(data_microbioma_test)
predictions = test_model_tl_noEnsemble(model_latent, decoder, Percentage, tf.keras.layers.Softmax, otu_columns, data_microbioma_test, data_domain_test)
print("----------------------------\n")
# ### 0.01, relu, relu
for g in combinations:
#entrenamos el autoencoder
print(g)
print("===================\n")
experiment_metrics, models, results = perform_experiment_2_mod(cv_folds=0,
epochs=g[0], ##este es el que varia
batch_size=g[1],
learning_rate=g[2], ##este es el que varia
optimizer=g[3],
learning_rate_scheduler=None,
input_transform=Percentage, #--> lo quitamos porque ya lo tenemos en relativo, con el 80 no
output_transform=tf.keras.layers.Softmax,
reconstruction_loss=MakeLoss(LossBrayCurtis, Percentage, None),
latent_space=g[4],
layers=g[5],
activation=g[6],
activation_latent=g[7],
data_microbioma_train=data_microbioma_train,
data_domain_train=data_domain_train,
show_results=False,#esto se ha cambiado para que no me muestre los resultados
device='/CPU:0')
#Guardamos el modelo, encoder y decoder
model, encoder, _ ,decoder = models[0]
def model_fn_latent():
in_layer = layers.Input(shape=(data_domain_train.shape[1],))
net = layers.Dense(128, activation='relu')(in_layer)
net = layers.Dense(64, activation='relu')(net)
net = layers.Dense(32, activation='relu')(net)
net = layers.Dense(16, activation='relu')(net)
out_layer = layers.Dense(latent_train.shape[1], activation='relu')(net) # 'tanh already'
model = keras.Model(inputs=[in_layer], outputs=[out_layer], name='model')
model.compile(optimizer=optimizers.Adam(lr=0.01), loss=tf.keras.losses.MeanSquaredError(), metrics=[tf.keras.metrics.MeanSquaredError()])
return model
#funcion del modelado -- por ahora lo dejaremos asi, y voy a habalr con ella par aver que cambio, que tuneo
latent_train = encoder.predict(data_microbioma_train)
result_latent, model_latent = train_tl_noEnsemble(model_fn_latent,
latent_train,
latent_train,
data_domain_train,
data_domain_train,
epochs=100,
batch_size=16,
verbose=-1)
#evaluamos el modelo
latent_test = encoder.predict(data_microbioma_test)
predictions = test_model_tl_noEnsemble(model_latent, decoder, Percentage, tf.keras.layers.Softmax, otu_columns, data_microbioma_test, data_domain_test)
print("----------------------------\n")
| ScriptsAna/GridADMetadatos/Grid244combined.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Unit 2: Popularity Recommendations
# In this section we build a recommender that sorts items by popularity as of the number of ratings they received. As a result we return the $N$ most popular items as recommendations.
# +
from typing import Dict, List
import numpy as np
import pandas as pd
from scipy.stats import spearmanr
# -
# `Dataset` is just a wrapper for the MovieLens training data
from recsys_training.data import Dataset, genres
ml100k_ratings_filepath = '../data/raw/ml-100k/u.data'
ml100k_item_filepath = '../data/raw/ml-100k/u.item'
# ## Load Data
# We load the dataset with 100,000 ratings and split it $4:1$ into train and test set.
#
# (**Remark**: We do not focus on proper hyperparameter search within this tutorial and therefore do not generate a separate validation dataset)
data = Dataset(ml100k_ratings_filepath)
data.rating_split(train_size=0.8, seed=42)
items = pd.read_csv(ml100k_item_filepath, sep='|', header=None,
names=['item', 'title', 'release', 'video_release', 'imdb_url']+genres,
engine='python')
data.train_ratings
data.test_ratings
# Build a Mapping from user id to its item ratings. We will need this later.
user_ratings = data.get_user_ratings()
# Show up to 20 user ratings for the first user
user = 1
list(user_ratings[user].items())[:20]
# ## Popularity Ranking
# How do we define _popularity_? It turns out that there can be different things justifying the popularity of content:
# - **pure count**: simply count the number of ratings or interactions an item received regardless of their quality
# - **positive count**: only count the number of ratings or interactions that we assume reflect preference towards items, e.g. ratings above user mean ratings
# - **time-dependency**: despite evergreen stars items may also be popular for a limited time only - how can we account for this?
#
# **Remark**: Popularity ranking entails no personalization. We obtain a single popularity ranking of items which is independent from the user and serve the same top-$N$ items to every user.
# ### Popularity based on simple Interaction Counts
# 
#
# **Task**: Infer the item popularity order from training ratings as an array with items in descending order of popularity.
item_popularity = data.train_ratings.item.value_counts()
item_popularity
item_order = item_popularity.values
item_order
# What are the most popular movies?
top_movie_ids = item_order[:5]
items[items['item'].isin(top_movie_ids)][['item', 'title']]
# ### Popularity based on positive Interaction Counts
#
# We assume that the the mean rating for each user is the threshold above which movies are regarded as favorable and below which movies are deemed as bad.
#
# 1. compute that user mean rating for each user.
# 2. remove all ratings that fall below this threshold.
# 3. apply the process above to the remaining ratings.
user_mean_ratings = data.train_ratings[['user', 'rating']].groupby('user')
user_mean_ratings = user_mean_ratings.mean().reset_index()
user_mean_ratings.rename(columns={'rating': 'user_mean_rating'},
inplace=True)
user_mean_ratings
positive_train_ratings = data.train_ratings.merge(user_mean_ratings,
on='user',
how='left')
keep_ratings = (positive_train_ratings['rating'] >= positive_train_ratings['user_mean_rating'])
positive_train_ratings = positive_train_ratings[keep_ratings]
positive_train_ratings.drop(columns='user_mean_rating', inplace=True)
positive_train_ratings
item_popularity_positive = positive_train_ratings.item.value_counts()
item_popularity_positive
item_order_positive = item_popularity.index.values
items[items['item'].isin(item_order_positive[:5])][['item', 'title']]
# #### How strong do both orderings correlate with each other?
# Check spearman rank correlation between both orderings to quantify the distortion in ordering.
joint_counts = [[item_popularity.loc[item], item_popularity_positive[item]]
for item in np.intersect1d(item_popularity_positive.index.values,
item_popularity.index.values)]
joint_counts = np.array(joint_counts)
joint_counts
spearmanr(joint_counts)
# ### Using Popularity Ordering for top-$N$ Recommendations
#
# Now, we can produce recommendations from our popularity ordering.
# 
#
# **Task**: Write a method `get_recommendation` that returns the top-$N$ items without any known positives, i.e. items the user has already viewed.
def get_recommendations(user: int,
user_ratings: dict,
item_popularity_order: np.array,
N: int) -> List[int]:
known_positives = None
recommendations = None
return recommendations
# Try it ...
get_recommendations(1, user_ratings, item_order, 10)
# ## Evaluating the Relevance of Recommendations
def get_relevant_items(test_ratings: pd.DataFrame) -> Dict[int, List[int]]:
"""
returns {user: [items]} as a list of relevant items per user
for all users found in the test dataset
"""
relevant_items = test_ratings[['user', 'item']]
relevant_items = relevant_items.groupby('user')
relevant_items = {user: relevant_items.get_group(user)['item'].values
for user in relevant_items.groups.keys()}
return relevant_items
relevant_items = get_relevant_items(data.test_ratings)
relevant_items[1]
# ### $Precision@10$
# Now, we can compute the intersection between the top-$N$ recommended items and the items each user interacted with. Ideally, we want every recommendation to be a hit, i.e. an item the user consumed. In this case the size of intersections is $N$ given $N$ recommendations which is a precision of 100% = $\frac{N}{N}$.
#
# We compute the so called $Precision@N$ for every user and take the mean over all. The resulting metric is called _mean average precision at N_ or short $MAP@N$.
# 
#
# **Task:** Compute the $MAP@N$ for popularity recommendations
def get_precision(users: List[int], user_ratings: Dict[int, Dict[int, float]],
item_order: np.array, N: int) -> Dict[int, float]:
pass
return prec_at_N
# Try it ...
N = 10
users = relevant_items.keys()
prec_at_N = get_precision(users, user_ratings, item_order, N)
np.mean(list(prec_at_N.values()))
| notebooks/2_e_popularity_recs.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <img src='https://certificate.tpq.io/quantsdev_banner_color.png' width="250px" align="right">
# # Reinforcement Learning
# **Hedging of Options — WORK IN PROGRESS**
# © Dr <NAME> | The Python Quants GmbH
#
# [quants@dev Discord Server](https://discord.gg/uJPtp9Awaj) | [@quants_dev](https://twitter.com/quants_dev) | <a href="mailto:<EMAIL>"><EMAIL></a>
#
# <img src="https://hilpisch.com/aiif_cover_shadow.png" width="300px" align="left">
# ## Reinforcement Learning
import os
import math
import random
import numpy as np
import pandas as pd
from pylab import plt, mpl
plt.style.use('seaborn')
mpl.rcParams['font.family'] = 'serif'
np.set_printoptions(precision=4, suppress=True)
os.environ['PYTHONHASHSEED'] = '0'
# %config InlineBackend.figure_format = 'svg'
import warnings as w
w.simplefilter('ignore')
# + [markdown] tags=[]
# ## `TensorFlow/Keras`
# -
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '4'
from tensorflow.python.framework.ops import disable_eager_execution
disable_eager_execution()
import tensorflow as tf
tf.__version__
from tensorflow import keras
from keras.layers import Dense
from keras.models import Sequential
from sklearn.metrics import accuracy_score
def set_seeds(seed=100):
#random.seed(seed)
np.random.seed(seed)
tf.random.set_seed(seed)
env.seed(seed)
env.action_space.seed(100)
# ## Q-Learning
from collections import deque
class DQLAgent:
def __init__(self, gamma=0.95, hu=24, opt=keras.optimizers.Adam, lr=0.001):
self.epsilon = 1.0
self.epsilon_min = 0.01
self.epsilon_decay = 0.995
self.gamma = gamma
self.batch_size = 32
self.averages = list()
self.memory = deque(maxlen=2000)
self.osn = env.observation_space.shape[0]
self.action_space = env.action_space
self.model = self._build_model(hu, opt, lr)
def _build_model(self, hu, opt, lr):
model = Sequential()
model.add(Dense(hu, input_dim=self.osn,
activation='relu'))
model.add(Dense(hu, activation='relu'))
model.add(Dense(hu, activation='relu'))
model.add(Dense(self.action_space.n, activation='linear'))
model.compile(loss='mse', optimizer=opt(learning_rate=lr))
return model
def opt_action(self, state):
bnds = [(0, 1)]
def f(state, a):
state[0, 3] = a
state[0, 4] = state[0, 2] - a * state[0, 0]
return self.model.predict(state)[0]
action = minimize(lambda a: -f(state, a), 0.5,
bounds=bnds, method='Powell')['x'][0]
return action
def act(self, state):
if random.random() <= self.epsilon:
return env.action_space.sample()
action = self.opt_action(state)
return action
def replay(self):
batch = random.sample(self.memory, self.batch_size)
for state, action, reward, next_state, done in batch:
if not done:
action = self.opt_action(next_state)
next_state[0, 3] = action
next_state[0, 4] = next_state[0, 2] - action * next_state[0, 0]
reward += self.gamma * self.model.predict(next_state)[0]
reward = np.array(reward).reshape(1, -1)
self.model.fit(state, reward, epochs=1,
verbose=False)
if self.epsilon > self.epsilon_min:
self.epsilon *= self.epsilon_decay
def learn(self, episodes):
self.trewards = []
self.max_treward = -10000
for e in range(1, episodes + 1):
state = env.reset()
state = np.reshape(state, [1, self.osn])
treward = 0
for _ in range(5000):
action = self.act(state)
next_state, reward, done, info = env.step(action)
treward += reward
next_state = np.reshape(next_state,
[1, self.osn])
self.memory.append([state, action, reward,
next_state, done])
state = next_state
if done:
self.trewards.append(treward)
av = sum(self.trewards[-25:]) / 25
self.averages.append(av)
self.max_treward = max(self.max_treward, treward)
templ = 'episode: {:4d}/{} | treward: {:7.3f} | '
templ += 'av: {:6.1f} | best: {:.3f}'
print(templ.format(e, episodes, treward, av,
self.max_treward), end='\r')
break
if len(self.memory) > self.batch_size:
self.replay()
print()
def test(self, episodes):
trewards = []
for e in range(1, episodes + 1):
treward = 0
state = env.reset()
for _ in range(5001):
state = np.reshape(state, [1, self.osn])
action = self.opt_action(state)
next_state, reward, done, info = env.step(action)
treward += reward
state = next_state
if done:
trewards.append(treward)
print('episode: {:4d}/{} | treward: {:.2f}'
.format(e, episodes, treward), end='\r')
break
return trewards
# ## Hedge Environment
from bsm import bsm_call_value
from scipy.optimize import minimize
class observation_space:
def __init__(self, n):
self.shape = (n,)
class action_space:
def __init__(self, n):
self.n = n
def seed(self, seed):
random.seed(seed)
def sample(self):
return random.random()
class Finance:
def __init__(self, symbol, S0, K_, T, r_, sigma_, steps):
self.symbol = symbol
self.initial_value = S0
self.strike_ = K_
self.maturity = pd.Timestamp(T)
self.short_rate_ = r_
self.volatility_ = sigma_
self.steps = steps
self.observation_space = observation_space(5)
self.osn = self.observation_space.shape[0]
self.action_space = action_space(1)
self._simulate_data()
self.portfolios = pd.DataFrame()
self.episode = 0
def _simulate_data(self):
s = [self.initial_value]
self.strike = random.choice(self.strike_)
self.short_rate = random.choice(self.short_rate_)
self.volatility = random.choice(self.volatility_)
self.dt = 1 / self.steps # "one year" as the default assumption
for t in range(1, self.steps + 1):
s_ = s[t - 1] * math.exp(
((self.short_rate - 1 / 2 * self.volatility ** 2) * self.dt +
self.volatility * math.sqrt(self.dt) * random.gauss(0, 1))
)
s.append(s_)
self.data = pd.DataFrame(s, columns=[self.symbol],
index=pd.date_range(start='2022-1-1',
end='2023-1-1',
periods=self.steps+1))
def _get_state(self):
St = self.data[self.symbol].iloc[self.bar]
ttm = (self.maturity - self.data.index[self.bar]).days / 365
C = bsm_call_value(St, self.strike, ttm, self.short_rate, self.volatility)
return np.array([St, ttm, C, self.stock, self.bond])
def seed(self, seed=None):
pass
def reset(self):
self.bar = 0
self.bond = 0
self.stock = 0
self.treward = 0
self.episode += 1
self._simulate_data()
self.state = self._get_state()
return self.state
def step(self, action):
if self.bar == 0:
reward = 0
self.bar += 1
self.stock = float(action)
self.bond = self.state[2] - self.stock * self.state[0]
self.new_state = self._get_state()
else:
self.bar += 1
self.new_state = self._get_state()
portfolio = (self.stock * self.new_state[0] +
self.bond * math.exp(self.short_rate * self.dt))
pl = portfolio - self.new_state[2]
df = pd.DataFrame({'e': self.episode, 's': self.stock, 'b': self.bond,
'phi': portfolio, 'V': self.new_state[2],
'p&l[$]': pl, 'p&l[%]': pl / self.new_state[2] * 100,
'St': self.new_state[0], 'K': self.strike,
'r': self.short_rate, 'sigma': self.volatility}, index=[0])
self.portfolios = self.portfolios.append(df, ignore_index=True)
# reward = -abs(portfolio - self.new_state[2])
# reward = -min(abs(portfolio / self.new_state[2] - 1) * 100, 100)
reward = -min(abs(portfolio / self.new_state[2] - 1), 1)
self.stock = float(action)
self.bond = self.new_state[2] - self.stock * self.new_state[0]
if self.bar == len(self.data) - 1:
done = True
else:
done = False
info = {}
self.state = self.new_state
return self.state, float(reward), done, info
# ## Learning
env = Finance('stock', S0=1., K_=[0.9, 0.95, 1, 1.05, 1.1],
T='2023-1-10', r_=[0, 0.01, 0.05],
sigma_=[0.1, 0.15, 0.2], steps=365)
# set_seeds(100)
agent = DQLAgent(gamma=0.95, hu=24, opt=keras.optimizers.Adam, lr=0.0001)
episodes = 50
# %time agent.learn(episodes)
agent.epsilon
# ## Testing
agent.test(3)
n = max(env.portfolios['e'])
n -= 1
p = env.portfolios[env.portfolios['e'] == n].iloc[0][['K', 'r', 'sigma']]
title = f"CALL | K={p['K']} | r={p['r']} | sigma={p['sigma']}"
env.portfolios[env.portfolios['e'] == n].tail().round(6)
env.portfolios[env.portfolios['e'] == n][['phi', 'V', 'St']].plot(
secondary_y='St', title=title, style=['-', '--', '-'], lw=1);
env.portfolios[env.portfolios['e'] == n][['p&l[$]']].cumsum().plot(
lw=1, title=title);
env.portfolios[env.portfolios['e'] == n][['p&l[$]', 'p&l[%]']].hist(bins=30);
env.portfolios[env.portfolios['e'] == n][['s', 'St']].plot(
secondary_y='St', title=title);
# <img src="https://certificate.tpq.io/quantsdev_banner_color.png" alt="quants@dev" width="35%" align="right" border="0"><br>
#
# [quants@dev Discord Server](https://discord.gg/uJPtp9Awaj) | [@quants_dev](https://twitter.com/quants_dev) | <a href="mailto:<EMAIL>"><EMAIL></a>
| 12_q_learn_hedge_1.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R
# language: R
# name: ir
# ---
# # Association between stroke and psychosis across four nationally representative epidemiological studies
# This is the analysis code for the above named paper with embedded results. This is a Jupyter notebook which can be re-run to reproduce the same results when accompanied by the same data
# Load R packages
library(dplyr)
library(foreign)
library(jtools)
library(table1)
library(epiR)
library(lme4)
library(IRdisplay)
options(warn=-1)
# ## Loading and recoding of national datasets
# ### Adult Psychiatric Morbidity Survey 2007 (England, United Kingdom)
# +
# Load data
#
spssfile <- "./Data/apms2007/apms07arch.sav"
apms_complete <- read.spss(spssfile, to.data.frame = TRUE, use.value.labels = FALSE)
# Select variables
#
dataset_apms <- select(apms_complete,
age = ResAge, sex = ResSex,
paranoia = PSQ3a, # Felt that people were directly acting to harm you / your interests
hearvoices = PSQ5a, # Heard voices saying quite a few words of sentences
thoughtpass = PSQ2a, # Ever felt thoughts were directly interfered with by some outside force or person in past year
stroke = HDoct9, # by medic
highest_ed = EDQUAL5,
sample_weight = wt_ints1)
# Label dataset
#
dataset_apms$dataset_name <- 'apms'
# Recode stroke variable
#
dataset_apms$stroke[dataset_apms$stroke==1] <- 1 # Yes
dataset_apms$stroke[dataset_apms$stroke==2] <- 0 # No
dataset_apms$stroke[is.na(dataset_apms$stroke)] <- 0 # No evidence
dataset_apms$stroke <- factor(dataset_apms$stroke, levels = c(1,0), labels = c(" yes", " no"))
# Recode psychotic symptom variables
#
dataset_apms$paranoia[dataset_apms$paranoia==3] <- 0 # unsure = no
dataset_apms$paranoia[dataset_apms$paranoia==1] <- 1 # Yes
dataset_apms$paranoia[dataset_apms$paranoia==2] <- 0 # No
dataset_apms$paranoia[is.na(dataset_apms$paranoia)] <- 0 # No evidence
dataset_apms$hearvoices[dataset_apms$hearvoices==3] <- 0 # unsure = no
dataset_apms$hearvoices[dataset_apms$hearvoices==1] <- 1 # Yes
dataset_apms$hearvoices[dataset_apms$hearvoices==2] <- 0 # No
dataset_apms$hearvoices[is.na(dataset_apms$hearvoices)] <- 0 # No evidence
dataset_apms$thoughtpass[dataset_apms$thoughtpass==3] <- 0 # unsure = no
dataset_apms$thoughtpass[dataset_apms$thoughtpass==1] <- 1 # Yes
dataset_apms$thoughtpass[dataset_apms$thoughtpass==2] <- 0 # No
dataset_apms$thoughtpass[is.na(dataset_apms$thoughtpass)] <- 0 # No evidence
# Calculate probable psychosis variable
#
dataset_apms$psychosis_tot <- rowSums(dataset_apms[,c("paranoia", "hearvoices", "thoughtpass")], na.rm=TRUE)
dataset_apms$prob_psychosis[dataset_apms$psychosis_tot >= 2] <- 1
dataset_apms$prob_psychosis[dataset_apms$psychosis_tot <= 1] <- 0
dataset_apms$psychosis_tot <- NULL
# Set types for psychosis variables
#
dataset_apms$paranoia <- factor(dataset_apms$paranoia, levels = c(1,0), labels = c(" yes", " no"))
dataset_apms$hearvoices <- factor(dataset_apms$hearvoices, levels = c(1,0), labels = c(" yes", " no"))
dataset_apms$thoughtpass <- factor(dataset_apms$thoughtpass, levels = c(1,0), labels = c(" yes", " no"))
dataset_apms$prob_psychosis <- factor(dataset_apms$prob_psychosis, levels = c(1,0), labels = c(" yes", " no"))
# Recode and set variable type for highest level of education
#
dataset_apms <- dataset_apms %>%
mutate(highest_ed = recode(highest_ed,
`1` = 4, # College / uni
`2` = 4, # College / uni
`3` = 3, # Late teen high school
`4` = 2, # Mid teen high school
`5` = 2, # "Foreign / other" coded as Mid teen high school
`6` = 1)) # No or primary education
dataset_apms$highest_ed <- factor(dataset_apms$highest_ed,
levels = c(1, 2, 3, 4),
labels = c("No / primary education", "Mid-teen high school", "Late teen high school", "College / university"))
# Set variable type for sex
#
dataset_apms$sex <- factor(dataset_apms$sex, levels = c(1,2), labels = c(" male", " female"))
# Give variables formatted variable names for table
#
label(dataset_apms$sex) <- "Sex"
label(dataset_apms$age) <- "Age"
label(dataset_apms$highest_ed) <- "Highest level of education"
label(dataset_apms$stroke) <- "Stroke"
label(dataset_apms$prob_psychosis) <- "Prob.Psychosis"
# Remove intermediary memory objects
#
rm(apms_complete)
# -
# ### Collaborative Psychiatric Epidemiology Surveys 2001-2003 (United States)
# +
# Load data
#
d1_filename <- "./Data/ICPSR_20240/DS0001/20240-0001-Data.tsv"
d3_filename <- "./Data/ICPSR_20240/DS0003/20240-0003-Data.tsv"
d1_df <- read.csv(d1_filename, sep="\t", header=T)
d3_df <- read.csv(d3_filename, sep="\t", header=T)
# Select variables
#
d1_select_df <- select(d1_df,
caseid = CASEID,
sex = V09036,
age = V07306)
d3_select_df <- select(d3_df,
caseid = CASEID,
stroke = C10H, # by medic
highest_ed = H13,
paranoia = PS1F, # Unjust plot to harm you/have people follow-nobody believe
hearvoices = PS1B, # Ever hear voices others couldn't hear
thoughtpass = PS1C) # Ever have mind control experience
# Merge datasets
#
dataset_cpes <- merge(d1_select_df, d3_select_df, by="caseid")
# Label dataset
#
dataset_cpes$dataset_name <- 'cpes'
# Recode stroke variable
#
dataset_cpes$stroke[dataset_cpes$stroke == 1 ] <- 1
dataset_cpes$stroke[dataset_cpes$stroke == 5 ] <- 0
dataset_cpes$stroke[dataset_cpes$stroke == -8 ] <- 0
dataset_cpes$stroke[dataset_cpes$stroke == -9 ] <- 0
dataset_cpes$stroke <- factor(dataset_cpes$stroke, levels = c(1,0), labels = c(" yes", " no"))
# Recode psychotic symptom variables
#
# Note: NAs replaced by zeros because random forest imputation imputed all NAs as zeros.
# See Bell_et_al_StrokePsychosis_Imputation.ipynb for code
#
dataset_cpes$paranoia[dataset_cpes$paranoia == 1 ] <- 1
dataset_cpes$paranoia[dataset_cpes$paranoia == 5 ] <- 0
dataset_cpes$paranoia[dataset_cpes$paranoia == -8 ] <- 0
dataset_cpes$paranoia[dataset_cpes$paranoia == -9 ] <- 0
dataset_cpes$hearvoices[dataset_cpes$hearvoices == 1 ] <- 1
dataset_cpes$hearvoices[dataset_cpes$hearvoices == 5 ] <- 0
dataset_cpes$hearvoices[dataset_cpes$hearvoices == -8 ] <- 0
dataset_cpes$hearvoices[dataset_cpes$hearvoices == -9 ] <- 0
dataset_cpes$thoughtpass[dataset_cpes$thoughtpass == 1 ] <- 1
dataset_cpes$thoughtpass[dataset_cpes$thoughtpass == 5 ] <- 0
dataset_cpes$thoughtpass[dataset_cpes$thoughtpass == -8 ] <- 0
dataset_cpes$thoughtpass[dataset_cpes$thoughtpass == -9 ] <- 0
# Calculate probable psychosis variable
#
dataset_cpes$psychosis_tot <- rowSums(dataset_cpes[,c("paranoia", "hearvoices", "thoughtpass")], na.rm=TRUE)
dataset_cpes$prob_psychosis[dataset_cpes$psychosis_tot >= 2] <- 1
dataset_cpes$prob_psychosis[dataset_cpes$psychosis_tot <= 1] <- 0
dataset_cpes$psychosis_tot <- NULL
# Set types for psychosis variables
#
dataset_cpes$paranoia <- factor(dataset_cpes$paranoia, levels = c(1,0), labels = c(" yes", " no"))
dataset_cpes$hearvoices <- factor(dataset_cpes$hearvoices, levels = c(1,0), labels = c(" yes", " no"))
dataset_cpes$thoughtpass <- factor(dataset_cpes$thoughtpass, levels = c(1,0), labels = c(" yes", " no"))
dataset_cpes$prob_psychosis <- factor(dataset_cpes$prob_psychosis, levels = c(1,0), labels = c(" yes", " no"))
# Recode and set variable type for highest level of education
#
dataset_cpes$highest_ed[dataset_cpes$highest_ed <= 9] <- 1 # Basic
dataset_cpes$highest_ed[dataset_cpes$highest_ed==10] <- 2 # Mid teen high school
dataset_cpes$highest_ed[dataset_cpes$highest_ed==11] <- 2 # Mid teen high school
dataset_cpes$highest_ed[dataset_cpes$highest_ed==12] <- 3 # Late teen high school
dataset_cpes$highest_ed[dataset_cpes$highest_ed==13] <- 3 # Late teen high school
dataset_cpes$highest_ed[dataset_cpes$highest_ed==14] <- 3 # Late teen high school
dataset_cpes$highest_ed[dataset_cpes$highest_ed==15] <- 3 # Late teen high school
dataset_cpes$highest_ed[dataset_cpes$highest_ed==16] <- 4 # College / uni
dataset_cpes$highest_ed[dataset_cpes$highest_ed==17] <- 4 # College / uni
dataset_cpes$highest_ed <- factor(dataset_cpes$highest_ed,
levels = c(1, 2, 3, 4),
labels = c("No / primary education", "Mid-teen high school", "Late teen high school", "College / university"))
# Set variable type for sex
#
dataset_cpes$sex <- factor(dataset_cpes$sex,
levels = c(1,2),
labels = c(" male", " female"))
# Give variables formatted variable names for table
#
label(dataset_cpes$sex) <- "Sex"
label(dataset_cpes$age) <- "Age"
label(dataset_cpes$highest_ed) <- "Highest level of education"
label(dataset_cpes$stroke) <- "Stroke"
label(dataset_cpes$prob_psychosis) <- "Prob.Psychosis"
# Remove intermediary memory objects
#
rm(d1_df, d3_df, d1_select_df, d3_select_df)
# -
# ### National Mental Health Survey 2015 (Colombia)
# +
# Load data
#
personas_datafile = "./Data/ensm2015/Personas.txt"
personas_df <- read.csv(personas_datafile, header = TRUE, sep='|', stringsAsFactors = FALSE, na.strings=c("NULL","900","950"))
adultos1_datafile = "./Data/ensm2015/Adultos_I.txt"
adultos1_df <- read.csv(adultos1_datafile, header = TRUE, sep='|', stringsAsFactors = FALSE, na.strings=c("NULL","900","950"))
adultos2_datafile = "./Data/ensm2015/Adultos_II.txt"
adultos2_df <- read.csv(adultos2_datafile, header = TRUE, sep='|', stringsAsFactors = FALSE, na.strings=c("NULL","900","950"))
# Merge datasets
#
merged_df <- merge(personas_df, adultos1_df, by="identificador_persona")
merged_df <- merge(merged_df, adultos2_df, by="identificador_persona")
# Select variables
#
dataset_ensm <- select(merged_df,
identificador_persona,
age = age.x,
sex = sex.x,
paranoia = m7_p21, # ¿Siente que alguien ha tratado de herirlo en alguna forma?
hearvoices = m7_p24, # ¿Oye voces sin saber de dónde vienen o que otras personas no pueden oír?
thoughtpass = m7_p23, # ¿Ha notado interferencias o algo raro en su pensamiento?
highest_ed = NivelEducativo.x,
stroke = m5_p38)
# Label dataset
#
dataset_ensm$dataset_name <- 'ensm'
# Recode stroke variable
#
dataset_ensm$stroke[dataset_ensm$stroke==2] <- 0
dataset_ensm$stroke <- factor(dataset_ensm$stroke, levels = c(1,0), labels = c(" yes", " no"))
# Recode psychotic symptom variables
#
dataset_ensm$paranoia[dataset_ensm$paranoia==2] <- 0
dataset_ensm$hearvoices[dataset_ensm$hearvoices==2] <- 0
dataset_ensm$thoughtpass[dataset_ensm$thoughtpass==2] <- 0
# Calculate probable psychosis variable
#
dataset_ensm$psychosis_tot <- rowSums(dataset_ensm[,c("paranoia", "hearvoices", "thoughtpass")], na.rm=TRUE)
dataset_ensm$prob_psychosis[dataset_ensm$psychosis_tot >= 2] <- 1
dataset_ensm$prob_psychosis[dataset_ensm$psychosis_tot <= 1] <- 0
dataset_ensm$psychosis_tot <- NULL
# Set types for psychosis variables
#
dataset_ensm$paranoia <- factor(dataset_ensm$paranoia, levels = c(1,0), labels = c(" yes", " no"))
dataset_ensm$hearvoices <- factor(dataset_ensm$hearvoices, levels = c(1,0), labels = c(" yes", " no"))
dataset_ensm$thoughtpass <- factor(dataset_ensm$thoughtpass, levels = c(1,0), labels = c(" yes", " no"))
dataset_ensm$prob_psychosis <- factor(dataset_ensm$prob_psychosis, levels = c(1,0), labels = c(" yes", " no"))
# Recode and set variable type for highest level of education
#
dataset_ensm$highest_ed[dataset_ensm$highest_ed == 1] <- 1 # Basic
dataset_ensm$highest_ed[dataset_ensm$highest_ed == 2] <- 2 # Mid teen high school
dataset_ensm$highest_ed[dataset_ensm$highest_ed == 3] <- 3 # Late teen high school
dataset_ensm$highest_ed[dataset_ensm$highest_ed == 4] <- 4 # College / uni
dataset_ensm$highest_ed <- factor(dataset_ensm$highest_ed,
levels = c(1, 2, 3, 4),
labels = c("No / primary education", "Mid-teen high school", "Late teen high school", "College / university"))
# Set variable type for sex
#
dataset_ensm$sex <- factor(dataset_ensm$sex, levels = c(1,2), labels = c(" male", " female"))
# Give variables formatted variable names for table
#
label(dataset_ensm$sex) <- "Sex"
label(dataset_ensm$age) <- "Age"
label(dataset_ensm$highest_ed) <- "Highest level of education"
label(dataset_ensm$stroke) <- "Stroke"
label(dataset_ensm$prob_psychosis) <- "Prob.Psychosis"
# Remove intermediary memory objects
#
rm(personas_df, adultos1_df, adultos2_df, merged_df)
# -
# ### National Health Survey 2016-2017 (Chile)
# +
# Load data
#
spssfile <- "./Data/ensc_2016-2017/Base de datos Encuesta Nacional de Salud 2016-2017(ENS).Formulario 1_2_EX.MINSAL_EPI. (2).sav"
ensc_f1_complete <- read.spss(spssfile, to.data.frame = TRUE, use.value.labels = FALSE)
spssfile <- "./Data/ensc_2016-2017/diag_main_v8_spss_11102019.sav"
ensc_dx_complete <- read.spss(spssfile, to.data.frame = TRUE, use.value.labels = FALSE)
# Select variables
#
dataset_ensc_f1 <- select(ensc_f1_complete,
caseid = IdEncuesta,
age = Edad,
sex = Sexo,
stroke = d4) # by medic
dataset_ensc_dx <- select(ensc_dx_complete,
caseid = IdEncuesta,
paranoia = PS1f, # ¿Alguna vez pensó que había una conspiración para hacerle daño o perseguirlo y que su familia o amigos no creyeran que esto fuera real?
hearvoices = PS1b, # La segunda experiencia es oír voces que otras personas no podían oír. ¿Alguna vez le ha pasado algo como esto?
thoughtpass = PS1c, # La tercera experiencia tiene que ver con control de la mente.
highest_ed = NEDU2_MINSAL_1)
# Merge datasets
#
dataset_ensc <- merge(dataset_ensc_f1, dataset_ensc_dx, by="caseid")
# Label dataset
#
dataset_ensc$dataset_name <- 'ensc'
# Recode stroke variable
#
dataset_ensc$stroke[dataset_ensc$stroke == 1 ] <- 1
dataset_ensc$stroke[dataset_ensc$stroke == 2 ] <- 0
dataset_ensc$stroke <- factor(dataset_ensc$stroke, levels = c(1,0), labels = c(" yes", " no"))
# Recode psychotic symptom variables
#
dataset_ensc$paranoia[dataset_ensc$paranoia == 1 ] <- 1
dataset_ensc$paranoia[dataset_ensc$paranoia == 5 ] <- 0
dataset_ensc$paranoia[dataset_ensc$paranoia == 8 ] <- 0
dataset_ensc$paranoia[dataset_ensc$paranoia == 9 ] <- 0
dataset_ensc$hearvoices[dataset_ensc$hearvoices == 1 ] <- 1
dataset_ensc$hearvoices[dataset_ensc$hearvoices == 5 ] <- 0
dataset_ensc$hearvoices[dataset_ensc$hearvoices == 8 ] <- 0
dataset_ensc$hearvoices[dataset_ensc$hearvoices == 9 ] <- 0
dataset_ensc$thoughtpass[dataset_ensc$thoughtpass == 1 ] <- 1
dataset_ensc$thoughtpass[dataset_ensc$thoughtpass == 5 ] <- 0
dataset_ensc$thoughtpass[dataset_ensc$thoughtpass == 8 ] <- 0
dataset_ensc$thoughtpass[dataset_ensc$thoughtpass == 9 ] <- 0
# Calculate probable psychosis variable
#
dataset_ensc$psychosis_tot <- rowSums(dataset_ensc[,c("paranoia", "hearvoices", "thoughtpass")], na.rm=TRUE)
dataset_ensc$prob_psychosis[dataset_ensc$psychosis_tot >= 2] <- 1
dataset_ensc$prob_psychosis[dataset_ensc$psychosis_tot <= 1] <- 0
dataset_ensc$psychosis_tot <- NULL
# Set types for psychosis variables
#
dataset_ensc$paranoia <- factor(dataset_ensc$paranoia, levels = c(1,0), labels = c(" yes", " no"))
dataset_ensc$hearvoices <- factor(dataset_ensc$hearvoices, levels = c(1,0), labels = c(" yes", " no"))
dataset_ensc$thoughtpass <- factor(dataset_ensc$thoughtpass, levels = c(1,0), labels = c(" yes", " no"))
dataset_ensc$prob_psychosis <- factor(dataset_ensc$prob_psychosis, levels = c(1,0), labels = c(" yes", " no"))
# Recode and set variable type for highest level of education
#
dataset_ensc$highest_ed[dataset_ensc$highest_ed == 1] <- 1 # Basic
dataset_ensc$highest_ed[dataset_ensc$highest_ed == 2] <- 1 # Basic
dataset_ensc$highest_ed[dataset_ensc$highest_ed == 3] <- 2 # Mid teen high school
dataset_ensc$highest_ed[dataset_ensc$highest_ed == 4] <- 2 # Mid teen high school
dataset_ensc$highest_ed[dataset_ensc$highest_ed == 5] <- 3 # Late teen high school
dataset_ensc$highest_ed[dataset_ensc$highest_ed == 6] <- 3 # Late teen high school
dataset_ensc$highest_ed[dataset_ensc$highest_ed == 7] <- 4 # College / uni
dataset_ensc$highest_ed[dataset_ensc$highest_ed == 8] <- 4 # College / uni
dataset_ensc$highest_ed[dataset_ensc$highest_ed == 9] <- 4 # College / uni
dataset_ensc$highest_ed[dataset_ensc$highest_ed == 10] <- 4 # College / uni
dataset_ensc$highest_ed <- factor(dataset_ensc$highest_ed,
levels = c(1, 2, 3, 4),
labels = c("No / primary education", "Mid-teen high school", "Late teen high school", "College / university"))
# Set variable type for sex
#
dataset_ensc$sex <- factor(dataset_ensc$sex, levels = c(1,2), labels = c(" male", " female"))
# Give variables formatted variable names for table
#
label(dataset_ensc$sex) <- "Sex"
label(dataset_ensc$age) <- "Age"
label(dataset_ensc$highest_ed) <- "Highest level of education"
label(dataset_ensc$stroke) <- "Stroke"
label(dataset_ensc$prob_psychosis) <- "Prob.Psychosis"
# Remove intermediary memory objects
#
rm(ensc_f1_complete, ensc_dx_complete, dataset_ensc_f1, dataset_ensc_dx)
# -
# ### Create combined countries dataset
# +
# Select relevant variables from each national dataset
#
subset_apms <- select(dataset_apms,
dataset_name,
sex, age,
stroke, prob_psychosis,
paranoia, hearvoices, thoughtpass,
highest_ed)
subset_cpes <- select(dataset_cpes,
dataset_name,
sex, age,
stroke, prob_psychosis,
paranoia, hearvoices, thoughtpass,
highest_ed)
subset_ensm <- select(dataset_ensm,
dataset_name,
sex, age,
stroke, prob_psychosis,
paranoia, hearvoices, thoughtpass,
highest_ed)
subset_ensc <- select(dataset_ensc,
dataset_name,
sex, age,
stroke, prob_psychosis,
paranoia, hearvoices, thoughtpass,
highest_ed)
# ...and combine into one combined countries dataset
#
dataset_combined <- rbind(subset_apms, subset_cpes)
dataset_combined <- rbind(dataset_combined, subset_ensm)
dataset_combined <- rbind(dataset_combined, subset_ensc)
# Remove intermediary memory objects
#
rm(subset_apms, subset_ensm, subset_cpes, subset_ensc)
# -
# ## Functions to calculate prevalences
# #### Calculate stroke prevalence in total population
calc_stroke_prevalence <- function(prev_df) {
# How many people have stroke in the total population?
tbobj <- as.numeric(table(prev_df$stroke))
nstrokecases <- tbobj[1]; npopulation <- sum(tbobj)
prev_matrix <- as.matrix(cbind(nstrokecases, npopulation))
# Calculate prevalence
prev_est <- epi.conf(prev_matrix, ctype = "prevalence", method = "exact", N = nrow(prev_df), design = 1,
conf.level = 0.95) * 100
est_plus_95CI <- sprintf("%.2f%% (%.2f - %.2f)", prev_est$est, prev_est$lower, prev_est$upper)
est_plus_95CI
}
# #### Calculate probable psychosis prevalence in total population
calc_probpsychosis_prevalence <- function(prev_df) {
# How many people have probable psychosis in the total population?
tbobj <- as.numeric(table(prev_df$prob_psychosis))
nprobpsychosiscases <- tbobj[1]; npopulation <- sum(tbobj)
prev_matrix <- as.matrix(cbind(nprobpsychosiscases, npopulation))
# Calculate prevalence
prev_est <- epi.conf(prev_matrix, ctype = "prevalence", method = "exact", N = nrow(prev_df), design = 1,
conf.level = 0.95) * 100
est_plus_95CI <- sprintf("%.2f%% (%.2f - %.2f)", prev_est$est, prev_est$lower, prev_est$upper)
est_plus_95CI
}
# #### Calculate prevalence of probable psychosis in stroke population
calc_probpsychosis_in_stroke_prevalence <- function(prev_df) {
# How many people have stroke?
stroke_table <- as.numeric(table(prev_df$stroke))
stroke_pop <- stroke_table[1]
# How many people who have stroke have psychosis?
stroke_psychosis_table <- as.numeric(table(prev_df$stroke, prev_df$prob_psychosis))
psychosis_in_stroke_cases <- stroke_psychosis_table[1]
prev_matrix <- as.matrix(cbind(psychosis_in_stroke_cases, stroke_pop))
# Calculate prevalence
prev_est <- epi.conf(prev_matrix, ctype = "prevalence", method = "exact", N = nrow(prev_df), design = 1,
conf.level = 0.95) * 100
prev_est
est_plus_95CI <- sprintf("%.2f%% (%.2f - %.2f)", prev_est$est, prev_est$lower, prev_est$upper)
est_plus_95CI
}
# #### Calculate prevalence of stroke in probable psychosis population
calc_stroke_in_probpsychosis_prevalence <- function(prev_df) {
# How many people have probable psychosis?
psychosis_table <- as.numeric(table(prev_df$prob_psychosis))
psychosis_pop <- psychosis_table[1]
# How many people who have probable psychosis have stroke?
psychosis_stroke_table <- as.numeric(table(prev_df$prob_psychosis, prev_df$stroke))
stroke_in_psychosis_cases <- psychosis_stroke_table[1]
prev_matrix <- as.matrix(cbind(stroke_in_psychosis_cases, psychosis_pop))
# Calculate prevalence
prev_est <- epi.conf(prev_matrix, ctype = "prevalence", method = "exact", N = nrow(prev_df), design = 1,
conf.level = 0.95) * 100
prev_est
est_plus_95CI <- sprintf("%.2f%% (%.2f - %.2f)", prev_est$est, prev_est$lower, prev_est$upper)
est_plus_95CI
}
# ## Functions to run regression models
# #### Regression models testing unadjusted and adjusted associations between stroke and probable psychosis in national datasets
run_prob_psychosis_models <- function(funcdf) {
# Unadjusted
test_model <- glm(prob_psychosis ~ stroke, data = funcdf, family = binomial(link = "logit"), maxit=500)
summ_of_model <- summ(test_model, exp = TRUE, digits = 4)
un_prob_psychosis_OR_CIs <- sprintf("%.2f (%.2f - %.2f)", summ_of_model$coeftable[2], summ_of_model$coeftable[4], summ_of_model$coeftable[6])
# Adjusted by sex, age, education
test_model <- glm(prob_psychosis ~ stroke + sex + age + highest_ed, data = funcdf, family = binomial(link = "logit"), maxit=500)
summ_of_model <- summ(test_model, exp = TRUE, digits = 4)
adj_ed_prob_psychosis_OR_CIs <- sprintf("%.2f (%.2f - %.2f)", summ_of_model$coeftable[2], summ_of_model$coeftable[9], summ_of_model$coeftable[16])
prob_psychosis_results <- c(un_prob_psychosis_OR_CIs,
adj_ed_prob_psychosis_OR_CIs)
prob_psychosis_results
}
# #### Multi-level regression models testing unadjusted and adjusted associations between stroke and probable psychosis in combined countries dataset
run_prob_psychosis_combined_model <- function(funcdf) {
# Unadjusted
test_model <- glmer(prob_psychosis ~ stroke + (1|dataset_name),
data = dataset_combined,
family = binomial(link = "logit"),
control = glmerControl(tolPwrss=1e-3))
test_model_CIs <- confint(test_model, parm="beta_", method="Wald")
test_model_table <- cbind(est=fixef(test_model),test_model_CIs)
test_model_table_OR <- exp(test_model_table)
un_prob_psychosis_OR_CIs <- sprintf("%.2f (%.2f - %.2f)", test_model_table_OR[2], test_model_table_OR[4], test_model_table_OR[6])
# Adjusted by sex, age, education
test_model <- glmer(prob_psychosis ~ stroke + sex + age + highest_ed + (1|dataset_name),
data = dataset_combined,
family = binomial(link = "logit"),
control = glmerControl(tolPwrss=1e-3))
test_model_CIs <- confint(test_model, parm="beta_", method="Wald")
test_model_table <- cbind(est=fixef(test_model),test_model_CIs)
test_model_table_OR <- exp(test_model_table)
adj_ed_prob_psychosis_OR_CIs <- sprintf("%.2f (%.2f - %.2f)", test_model_table_OR[2], test_model_table_OR[9], test_model_table_OR[16])
prob_psychosis_combined_results <- c(un_prob_psychosis_OR_CIs,
adj_ed_prob_psychosis_OR_CIs)
prob_psychosis_combined_results
}
# #### Regression models testing unadjusted and adjusted associations between stroke and paranoia in national datasets
run_paranoia_models <- function(funcdf) {
# Unadjusted
test_model <- glm(paranoia ~ stroke, data = funcdf, family = binomial(link = "logit"))
summ_of_model <- summ(test_model, exp = TRUE, digits = 4)
un_paranoia_OR_CIs <- sprintf("%.2f (%.2f - %.2f)", summ_of_model$coeftable[2], summ_of_model$coeftable[4], summ_of_model$coeftable[6])
# Adjusted by sex, age, education
test_model <- glm(paranoia ~ stroke + sex + age + highest_ed, data = funcdf, family = binomial(link = "logit"))
summ_of_model <- summ(test_model, exp = TRUE, digits = 4)
adj_ed_paranoia_OR_CIs <- sprintf("%.2f (%.2f - %.2f)", summ_of_model$coeftable[2], summ_of_model$coeftable[9], summ_of_model$coeftable[16])
paranoia_results <- c(un_paranoia_OR_CIs,
adj_ed_paranoia_OR_CIs)
paranoia_results
}
# #### Multi-level regression models testing unadjusted and adjusted associations between stroke and paranoia in combined countries dataset
run_paranoia_combined_model <- function(funcdf) {
# Unadjusted
test_model <- glmer(paranoia ~ stroke + (1|dataset_name),
data = funcdf,
family = binomial(link = "logit"),
control = glmerControl(tolPwrss=1e-3))
test_model_CIs <- confint(test_model, parm="beta_", method="Wald")
test_model_table <- cbind(est=fixef(test_model),test_model_CIs)
test_model_table_OR <- exp(test_model_table)
un_paranoia_OR_CIs <- sprintf("%.2f (%.2f - %.2f)", test_model_table_OR[2], test_model_table_OR[4], test_model_table_OR[6])
# Adjusted by sex, age, education
test_model <- glmer(paranoia ~ stroke + sex + age + highest_ed + (1|dataset_name),
data = funcdf,
family = binomial(link = "logit"),
control = glmerControl(tolPwrss=1e-3))
test_model_CIs <- confint(test_model, parm="beta_", method="Wald")
test_model_table <- cbind(est=fixef(test_model),test_model_CIs)
test_model_table_OR <- exp(test_model_table)
adj_ed_paranoia_OR_CIs <- sprintf("%.2f (%.2f - %.2f)", test_model_table_OR[2], test_model_table_OR[9], test_model_table_OR[16])
paranoia_combined_results <- c(un_paranoia_OR_CIs,
adj_ed_paranoia_OR_CIs)
paranoia_combined_results
}
# #### Regression models testing unadjusted and adjusted association between stroke and hallucinated voices in national datasets
run_hearvoices_models <- function(funcdf) {
# Unadjusted
test_model <- glm(hearvoices ~ stroke, data = funcdf, family = binomial(link = "logit"))
summ_of_model <- summ(test_model, exp = TRUE, digits = 4)
un_hearvoices_OR_CIs <- sprintf("%.2f (%.2f - %.2f)", summ_of_model$coeftable[2], summ_of_model$coeftable[4], summ_of_model$coeftable[6])
# Adjusted by sex, age and education
test_model <- glm(hearvoices ~ stroke + sex + age + highest_ed, data = funcdf, family = binomial(link = "logit"))
summ_of_model <- summ(test_model, exp = TRUE, digits = 4)
adj_ed_hearvoices_OR_CIs <- sprintf("%.2f (%.2f - %.2f)", summ_of_model$coeftable[2], summ_of_model$coeftable[9], summ_of_model$coeftable[16])
hearvoices_results <- c(un_hearvoices_OR_CIs,
adj_ed_hearvoices_OR_CIs)
hearvoices_results
}
# #### Multi-level regression models testing unadjusted and adjusted associations between stroke and paranoia in combined countries dataset
run_hearvoices_combined_model <- function(funcdf) {
# Unadjusted
test_model <- glmer(hearvoices ~ stroke + (1|dataset_name),
data = funcdf,
family = binomial(link = "logit"),
control = glmerControl(tolPwrss=1e-3))
test_model_CIs <- confint(test_model, parm="beta_", method="Wald")
test_model_table <- cbind(est=fixef(test_model),test_model_CIs)
test_model_table_OR <- exp(test_model_table)
un_hearvoices_OR_CIs <- sprintf("%.2f (%.2f - %.2f)", test_model_table_OR[2], test_model_table_OR[4], test_model_table_OR[6])
# Adjusted by sex, age, education
test_model <- glmer(hearvoices ~ stroke + sex + age + highest_ed + (1|dataset_name),
data = funcdf,
family = binomial(link = "logit"),
control = glmerControl(tolPwrss=1e-3))
test_model_CIs <- confint(test_model, parm="beta_", method="Wald")
test_model_table <- cbind(est=fixef(test_model),test_model_CIs)
test_model_table_OR <- exp(test_model_table)
adj_ed_hearvoices_OR_CIs <- sprintf("%.2f (%.2f - %.2f)", test_model_table_OR[2], test_model_table_OR[9], test_model_table_OR[16])
hearvoices_combined_results <- c(un_hearvoices_OR_CIs,
adj_ed_hearvoices_OR_CIs)
hearvoices_combined_results
}
# #### Regression models testing unadjusted and adjusted associations between stroke and thought passivity delusions in national datasets
run_thoughtpass_models <- function(funcdf) {
# Unadjusted
test_model <- glm(thoughtpass ~ stroke, data = funcdf, family = binomial(link = "logit"))
summ_of_model <- summ(test_model, exp = TRUE, digits = 4)
un_thoughtpass_OR_CIs <- sprintf("%.2f (%.2f - %.2f)", summ_of_model$coeftable[2], summ_of_model$coeftable[4], summ_of_model$coeftable[6])
# Adjusted by sex, age, education
test_model <- glm(thoughtpass ~ stroke + sex + age + highest_ed, data = funcdf, family = binomial(link = "logit"))
summ_of_model <- summ(test_model, exp = TRUE, digits = 4)
adj_ed_thoughtpass_OR_CIs <- sprintf("%.2f (%.2f - %.2f)", summ_of_model$coeftable[2], summ_of_model$coeftable[9], summ_of_model$coeftable[16])
thoughtpass_results <- c(un_thoughtpass_OR_CIs,
adj_ed_thoughtpass_OR_CIs)
thoughtpass_results
}
# #### Multi-level regression models testing unadjusted and adjusted associations between stroke and thought passivity delusions in combined countries dataset
run_thoughtpass_combined_model <- function(funcdf) {
# Unadjusted
test_model <- glmer(thoughtpass ~ stroke + (1|dataset_name),
data = funcdf,
family = binomial(link = "logit"),
control = glmerControl(tolPwrss=1e-3))
test_model_CIs <- confint(test_model, parm="beta_", method="Wald")
test_model_table <- cbind(est=fixef(test_model),test_model_CIs)
test_model_table_OR <- exp(test_model_table)
un_thoughtpass_OR_CIs <- sprintf("%.2f (%.2f - %.2f)", test_model_table_OR[2], test_model_table_OR[4], test_model_table_OR[6])
# Adjusted by sex, age, education
test_model <- glmer(thoughtpass ~ stroke + sex + age + highest_ed + (1|dataset_name),
data = funcdf,
family = binomial(link = "logit"),
control = glmerControl(tolPwrss=1e-3))
test_model_CIs <- confint(test_model, parm="beta_", method="Wald")
test_model_table <- cbind(est=fixef(test_model),test_model_CIs)
test_model_table_OR <- exp(test_model_table)
adj_ed_thoughtpass_OR_CIs <- sprintf("%.2f (%.2f - %.2f)", test_model_table_OR[2], test_model_table_OR[9], test_model_table_OR[16])
thoughtpass_combined_results <- c(un_thoughtpass_OR_CIs,
adj_ed_thoughtpass_OR_CIs)
thoughtpass_combined_results
}
# ## Results reported in paper
# ### Descriptive statistics
# +
dataset_combined$dataset_name <- factor(dataset_combined$dataset_name,
levels = c("apms", "cpes", "ensc", "ensm"),
labels = c("UK", "US", "Chile", "Colombia"))
# Relabel
label(dataset_combined$sex) <- "Sex"
label(dataset_combined$age) <- "Age"
label(dataset_combined$highest_ed) <- "Highest level of education"
label(dataset_combined$paranoia) <- "Paranoia"
label(dataset_combined$hearvoices) <- "Hallucinated voices"
label(dataset_combined$thoughtpass) <- "Passivity delusion"
label(dataset_combined$prob_psychosis) <- "Probable Psychosis"
label(dataset_combined$stroke) <- "Stroke"
# Display table with descriptive statistics including missing values for unaltered data
display_html(table1(~ age + sex + highest_ed + stroke + paranoia + hearvoices + thoughtpass + prob_psychosis | dataset_name,
data=dataset_combined, overall="Total"))
# -
# #### Insert results of missing data imputation for CPES dataset
# +
# As the missing data imputation did not impute any new symptoms for cases in the CPES dataset
# and replaced all NAs as symptom not present (see CPESVariableImputationCheck.R),
# we replace all NAs with " no" in this dataset
dataset_cpes$paranoia[is.na(dataset_cpes$paranoia)] <- " no"
dataset_cpes$hearvoices[is.na(dataset_cpes$hearvoices)] <- " no"
dataset_cpes$thoughtpass[is.na(dataset_cpes$thoughtpass)] <- " no"
#... and recombine the datasets
subset_apms <- select(dataset_apms,
dataset_name,
sex, age,
stroke, prob_psychosis,
paranoia, hearvoices, thoughtpass,
highest_ed)
subset_cpes <- select(dataset_cpes,
dataset_name,
sex, age,
stroke, prob_psychosis,
paranoia, hearvoices, thoughtpass,
highest_ed)
subset_ensm <- select(dataset_ensm,
dataset_name,
sex, age,
stroke, prob_psychosis,
paranoia, hearvoices, thoughtpass,
highest_ed)
subset_ensc <- select(dataset_ensc,
dataset_name,
sex, age,
stroke, prob_psychosis,
paranoia, hearvoices, thoughtpass,
highest_ed)
# ...and combine into one combined countries dataset
#
dataset_combined <- rbind(subset_apms, subset_cpes)
dataset_combined <- rbind(dataset_combined, subset_ensm)
dataset_combined <- rbind(dataset_combined, subset_ensc)
# Remove intermediary memory objects
#
rm(subset_apms, subset_ensm, subset_cpes, subset_ensc)
# -
# ### Prevalence results
# #### Calculate prevalence results
# +
# Stroke prevalence
#
strok_prev_vector <- c()
strok_prev_vector <- c(strok_prev_vector, calc_stroke_prevalence(dataset_apms))
strok_prev_vector <- c(strok_prev_vector, calc_stroke_prevalence(dataset_cpes))
strok_prev_vector <- c(strok_prev_vector, calc_stroke_prevalence(dataset_ensm))
strok_prev_vector <- c(strok_prev_vector, calc_stroke_prevalence(dataset_ensc))
strok_prev_vector <- c(strok_prev_vector, calc_stroke_prevalence(dataset_combined))
# Probable psychosis prevalence
#
probpsych_prev_vector <- c()
probpsych_prev_vector <- c(probpsych_prev_vector, calc_probpsychosis_prevalence(dataset_apms))
probpsych_prev_vector <- c(probpsych_prev_vector, calc_probpsychosis_prevalence(dataset_cpes))
probpsych_prev_vector <- c(probpsych_prev_vector, calc_probpsychosis_prevalence(dataset_ensm))
probpsych_prev_vector <- c(probpsych_prev_vector, calc_probpsychosis_prevalence(dataset_ensc))
probpsych_prev_vector <- c(probpsych_prev_vector, calc_probpsychosis_prevalence(dataset_combined))
# Probable psychosis in stroke prevalence
#
probpsych_in_stroke_prev_vector <- c()
probpsych_in_stroke_prev_vector <- c(probpsych_in_stroke_prev_vector, calc_probpsychosis_in_stroke_prevalence(dataset_apms))
probpsych_in_stroke_prev_vector <- c(probpsych_in_stroke_prev_vector, calc_probpsychosis_in_stroke_prevalence(dataset_cpes))
probpsych_in_stroke_prev_vector <- c(probpsych_in_stroke_prev_vector, calc_probpsychosis_in_stroke_prevalence(dataset_ensm))
probpsych_in_stroke_prev_vector <- c(probpsych_in_stroke_prev_vector, calc_probpsychosis_in_stroke_prevalence(dataset_ensc))
probpsych_in_stroke_prev_vector <- c(probpsych_in_stroke_prev_vector, calc_probpsychosis_in_stroke_prevalence(dataset_combined))
# Stroke in probable psychosis prevalence
#
stroke_in_probpsychosis_prev_vector <- c()
stroke_in_probpsychosis_prev_vector <- c(stroke_in_probpsychosis_prev_vector, calc_stroke_in_probpsychosis_prevalence(dataset_apms))
stroke_in_probpsychosis_prev_vector <- c(stroke_in_probpsychosis_prev_vector, calc_stroke_in_probpsychosis_prevalence(dataset_cpes))
stroke_in_probpsychosis_prev_vector <- c(stroke_in_probpsychosis_prev_vector, calc_stroke_in_probpsychosis_prevalence(dataset_ensm))
stroke_in_probpsychosis_prev_vector <- c(stroke_in_probpsychosis_prev_vector, calc_stroke_in_probpsychosis_prevalence(dataset_ensc))
stroke_in_probpsychosis_prev_vector <- c(stroke_in_probpsychosis_prev_vector, calc_stroke_in_probpsychosis_prevalence(dataset_combined))
# -
# #### Display results in table
# Display in table
#
name_vec <- c("UK", "US", "Colombia", "Chile", "Combined")
prevalence_table_df <- data.frame(Country = name_vec,
"Stroke Prevalence" = strok_prev_vector,
"Prob Psychosis Prevalence" = probpsych_prev_vector,
"Prob Psychosis in Stroke Prevalence" = probpsych_in_stroke_prev_vector,
"Stroke in Prob Psychosis Prevalence" = stroke_in_probpsychosis_prev_vector)
prevalence_table_df
# ### Analysis of directed acyclic graph of major causal relationships between stroke and psychosis
# Download graph from dagitty.net and display
# +
library(dagitty)
g <- downloadGraph("dagitty.net/mWB5lCB")
plot(g)
# -
# Test whether there total effect can be estimated by covariate adjustment
adjustmentSets(g, "Stroke", "Psychosis")
# No output, so this reports there are no minimal sets of variates that will acheive this
# ### Regression results reporting associations between stroke and probable psychosis, and stroke and psychotic symptoms
# #### Unadjusted and adjusted associations between stroke and probable psychosis
# Calculate probable psychosis results from regression models
#
uk_results <- run_prob_psychosis_models(dataset_apms)
us_results <- run_prob_psychosis_models(dataset_cpes)
colombia_results <- run_prob_psychosis_models(dataset_ensm)
chile_results <- run_prob_psychosis_models(dataset_ensc)
combined_results <- run_prob_psychosis_combined_model(dataset_combined)
# +
# Collate results into table and display
#
probpsychosis_regression_table_df <- data.frame("Unadjusted" = character(), "Adjusted" = character())
probpsychosis_regression_table_df[nrow(probpsychosis_regression_table_df)+1,] <- uk_results
probpsychosis_regression_table_df[nrow(probpsychosis_regression_table_df)+1,] <- us_results
probpsychosis_regression_table_df[nrow(probpsychosis_regression_table_df)+1,] <- colombia_results
probpsychosis_regression_table_df[nrow(probpsychosis_regression_table_df)+1,] <- chile_results
probpsychosis_regression_table_df[nrow(probpsychosis_regression_table_df)+1,] <- combined_results
rownames(probpsychosis_regression_table_df) <- name_vec
probpsychosis_regression_table_df
# -
# #### Unadjusted and adjusted associations between stroke and paranoia
# Calculate paranoia results from regression models
#
uk_results <- run_paranoia_models(dataset_apms)
us_results <- run_paranoia_models(dataset_cpes)
colombia_results <- run_paranoia_models(dataset_ensm)
chile_results <- run_paranoia_models(dataset_ensc)
combined_results <- run_paranoia_combined_model(dataset_combined)
# +
# Collate results into table and display
#
paranoia_regression_table_df <- data.frame("Unadjusted" = character(), "Adjusted" = character())
paranoia_regression_table_df[nrow(paranoia_regression_table_df)+1,] <- uk_results
paranoia_regression_table_df[nrow(paranoia_regression_table_df)+1,] <- us_results
paranoia_regression_table_df[nrow(paranoia_regression_table_df)+1,] <- colombia_results
paranoia_regression_table_df[nrow(paranoia_regression_table_df)+1,] <- chile_results
paranoia_regression_table_df[nrow(paranoia_regression_table_df)+1,] <- combined_results
rownames(paranoia_regression_table_df) <- name_vec
paranoia_regression_table_df
# -
# #### Unadjusted and adjusted associations between stroke and hallucinated voices
# Calculate hallucinated voices from regression models
#
uk_results <- run_hearvoices_models(dataset_apms)
us_results <- run_hearvoices_models(dataset_cpes)
colombia_results <- run_hearvoices_models(dataset_ensm)
chile_results <- run_hearvoices_models(dataset_ensc)
combined_results <- run_hearvoices_combined_model(dataset_combined)
# +
# Collate results into table and display
#
hearvoices_regression_table_df <- data.frame("Unadjusted" = character(), "Adjusted" = character())
hearvoices_regression_table_df[nrow(hearvoices_regression_table_df)+1,] <- uk_results
hearvoices_regression_table_df[nrow(hearvoices_regression_table_df)+1,] <- us_results
hearvoices_regression_table_df[nrow(hearvoices_regression_table_df)+1,] <- colombia_results
hearvoices_regression_table_df[nrow(hearvoices_regression_table_df)+1,] <- chile_results
hearvoices_regression_table_df[nrow(hearvoices_regression_table_df)+1,] <- combined_results
rownames(hearvoices_regression_table_df) <- name_vec
hearvoices_regression_table_df
# -
# #### Unadjusted and adjusted associations between stroke and thought passivity delusions
# Calculate thought passivity delusions results from regression models
#
uk_results <- run_thoughtpass_models(dataset_apms)
us_results <- run_thoughtpass_models(dataset_cpes)
colombia_results <- run_thoughtpass_models(dataset_ensm)
chile_results <- run_thoughtpass_models(dataset_ensc)
combined_results <- run_thoughtpass_combined_model(dataset_combined)
# +
# Collate results into table and display
#
thoughtpass_regression_table_df <- data.frame("Unadjusted" = character(), "Adjusted" = character())
thoughtpass_regression_table_df[nrow(thoughtpass_regression_table_df)+1,] <- uk_results
thoughtpass_regression_table_df[nrow(thoughtpass_regression_table_df)+1,] <- us_results
thoughtpass_regression_table_df[nrow(thoughtpass_regression_table_df)+1,] <- colombia_results
thoughtpass_regression_table_df[nrow(thoughtpass_regression_table_df)+1,] <- chile_results
thoughtpass_regression_table_df[nrow(thoughtpass_regression_table_df)+1,] <- combined_results
rownames(thoughtpass_regression_table_df) <- name_vec
thoughtpass_regression_table_df
# -
# ### Analysis platform details and software versions
version
packageVersion("dplyr")
packageVersion("foreign")
packageVersion("table1")
packageVersion("jtools")
packageVersion("epiR")
packageVersion("lme4")
packageVersion("IRdisplay")
| Bell_et_al_StrokePsychosis_Analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="IkSguVy8Xv83"
# # **DRMIME (2D)**
#
# ---
#
# <font size = 4> DRMIME is a self-supervised deep-learning method that can be used to register 2D images.
#
# <font size = 4> **This particular notebook enables self-supervised registration of 2D dataset.**
#
# ---
#
# <font size = 4>*Disclaimer*:
#
# <font size = 4>This notebook is part of the Zero-Cost Deep-Learning to Enhance Microscopy project (https://github.com/HenriquesLab/DeepLearning_Collab/wiki). Jointly developed by the Jacquemet (link to https://cellmig.org/) and Henriques (https://henriqueslab.github.io/) laboratories.
#
#
# <font size = 4>While this notebook is part of the Zero-Cost Deep-Learning to Enhance Microscopy project (ZeroCostDL4Mic), this notebook structure substantially deviates from other ZeroCostDL4Mic notebooks and our template. This is because the deep learning method employed here is used to improve the image registration process. No Deep Learning models are actually saved, only the registered images.
#
#
# <font size = 4>This notebook is largely based on the following paper:
#
# <font size = 4>DRMIME: Differentiable Mutual Information and Matrix Exponential for Multi-Resolution Image Registration by <NAME>
# *et al.* published on arXiv in 2020 (https://arxiv.org/abs/2001.09865)
#
# <font size = 4>And source code found in: https://github.com/abnan/DRMIME
#
# <font size = 4>**Please also cite this original paper when using or developing this notebook.**
#
# + [markdown] id="jWAz2i7RdxUV"
# # **How to use this notebook?**
#
# ---
#
# <font size = 4>Video describing how to use our notebooks are available on youtube:
# - [**Video 1**](https://www.youtube.com/watch?v=GzD2gamVNHI&feature=youtu.be): Full run through of the workflow to obtain the notebooks and the provided test datasets as well as a common use of the notebook
# - [**Video 2**](https://www.youtube.com/watch?v=PUuQfP5SsqM&feature=youtu.be): Detailed description of the different sections of the notebook
#
#
# ---
# ###**Structure of a notebook**
#
# <font size = 4>The notebook contains two types of cell:
#
# <font size = 4>**Text cells** provide information and can be modified by douple-clicking the cell. You are currently reading the text cell. You can create a new text by clicking `+ Text`.
#
# <font size = 4>**Code cells** contain code and the code can be modfied by selecting the cell. To execute the cell, move your cursor on the `[ ]`-mark on the left side of the cell (play button appears). Click to execute the cell. After execution is done the animation of play button stops. You can create a new coding cell by clicking `+ Code`.
#
# ---
# ###**Table of contents, Code snippets** and **Files**
#
# <font size = 4>On the top left side of the notebook you find three tabs which contain from top to bottom:
#
# <font size = 4>*Table of contents* = contains structure of the notebook. Click the content to move quickly between sections.
#
# <font size = 4>*Code snippets* = contain examples how to code certain tasks. You can ignore this when using this notebook.
#
# <font size = 4>*Files* = contain all available files. After mounting your google drive (see section 1.) you will find your files and folders here.
#
# <font size = 4>**Remember that all uploaded files are purged after changing the runtime.** All files saved in Google Drive will remain. You do not need to use the Mount Drive-button; your Google Drive is connected in section 1.2.
#
# <font size = 4>**Note:** The "sample data" in "Files" contains default files. Do not upload anything in here!
#
# ---
# ###**Making changes to the notebook**
#
# <font size = 4>**You can make a copy** of the notebook and save it to your Google Drive. To do this click file -> save a copy in drive.
#
# <font size = 4>To **edit a cell**, double click on the text. This will show you either the source code (in code cells) or the source text (in text cells).
# You can use the `#`-mark in code cells to comment out parts of the code. This allows you to keep the original code piece in the cell as a comment.
# + [markdown] id="gKDLkLWUd-YX"
# # **0. Before getting started**
# ---
#
# <font size = 4>Before you run the notebook, please ensure that you are logged into your Google account and have the training and/or data to process in your Google Drive.
#
# <font size = 4>For DRMIME to train, it requires at least two images. One **`"Fixed image"`** (template for the registration) and one **`Moving Image`** (image to be registered). Multiple **`Moving Images`** can also be provided if you want to register them to the same **`"Fixed image"`**. If you provide several **`Moving Images`**, multiple DRMIME instances will run one after another.
#
# <font size = 4>The registration can also be applied to other channels. If you wish to apply the registration to other channels, please provide the images in another folder and carefully check your file names. Additional channels need to have the same name as the registered images and a prefix indicating the channel number starting at "C1_". See the example below.
#
# <font size = 4>Here is a common data structure that can work:
#
# * Data
#
# - **Fixed_image_folder**
# - img_1.tif (image used as template for the registration)
# - **Moving_image_folder**
# - img_3.tif, img_4.tif, ... (images to be registered)
# - **Folder_containing_additional_channels** (optional, if you want to apply the registration to other channel(s))
# - C1_img_3.tif, C1_img_4.tif, ...
# - C2_img_3.tif, C2_img_4.tif, ...
# - C3_img_3.tif, C3_img_4.tif, ...
# - **Results**
#
# <font size = 4>The **Results** folder will contain the processed images and PDF reports. Your original images remain unmodified.
#
# ---
#
#
# + [markdown] id="n4yWFoJNnoin"
# # **1. Install DRMIME and dependencies**
# ---
# + id="fq21zJVFNASx" cellView="form"
Notebook_version = '1.13'
Network = 'DRMIME'
from builtins import any as b_any
def get_requirements_path():
# Store requirements file in 'contents' directory
current_dir = os.getcwd()
dir_count = current_dir.count('/') - 1
path = '../' * (dir_count) + 'requirements.txt'
return path
def filter_files(file_list, filter_list):
filtered_list = []
for fname in file_list:
if b_any(fname.split('==')[0] in s for s in filter_list):
filtered_list.append(fname)
return filtered_list
def build_requirements_file(before, after):
path = get_requirements_path()
# Exporting requirements.txt for local run
# !pip freeze > $path
# Get minimum requirements file
df = pd.read_csv(path, delimiter = "\n")
mod_list = [m.split('.')[0] for m in after if not m in before]
req_list_temp = df.values.tolist()
req_list = [x[0] for x in req_list_temp]
# Replace with package name and handle cases where import name is different to module name
mod_name_list = [['sklearn', 'scikit-learn'], ['skimage', 'scikit-image']]
mod_replace_list = [[x[1] for x in mod_name_list] if s in [x[0] for x in mod_name_list] else s for s in mod_list]
filtered_list = filter_files(req_list, mod_replace_list)
file=open(path,'w')
for item in filtered_list:
file.writelines(item + '\n')
file.close()
import sys
before = [str(m) for m in sys.modules]
#@markdown ##Install DRMIME and dependencies
# Here we install DRMIME and other required packages
# !pip install wget
from skimage import io
import numpy as np
import math
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import torch.optim as optim
from skimage.transform import pyramid_gaussian
from skimage.filters import gaussian
from skimage.filters import threshold_otsu
from skimage.filters import sobel
from skimage.color import rgb2gray
from skimage import feature
from torch.autograd import Function
import cv2
from IPython.display import clear_output
import pandas as pd
from skimage.io import imsave
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# ------- Common variable to all ZeroCostDL4Mic notebooks -------
import urllib
import os, random
import shutil
import zipfile
from tifffile import imread, imsave
import time
import sys
import wget
from pathlib import Path
import pandas as pd
import csv
from glob import glob
from scipy import signal
from scipy import ndimage
from skimage import io
from sklearn.linear_model import LinearRegression
from skimage.util import img_as_uint
import matplotlib as mpl
from skimage.metrics import structural_similarity
from skimage.metrics import peak_signal_noise_ratio as psnr
from astropy.visualization import simple_norm
from skimage import img_as_float32
# Colors for the warning messages
class bcolors:
WARNING = '\033[31m'
W = '\033[0m' # white (normal)
R = '\033[31m' # red
#Disable some of the tensorflow warnings
import warnings
warnings.filterwarnings("ignore")
# Check if this is the latest version of the notebook
All_notebook_versions = pd.read_csv("https://raw.githubusercontent.com/HenriquesLab/ZeroCostDL4Mic/master/Colab_notebooks/Latest_Notebook_versions.csv", dtype=str)
print('Notebook version: '+Notebook_version)
Latest_Notebook_version = All_notebook_versions[All_notebook_versions["Notebook"] == Network]['Version'].iloc[0]
print('Latest notebook version: '+Latest_Notebook_version)
if Notebook_version == Latest_Notebook_version:
print("This notebook is up-to-date.")
else:
print(bcolors.WARNING +"A new version of this notebook has been released. We recommend that you download it at https://github.com/HenriquesLab/ZeroCostDL4Mic/wiki")
# !pip freeze > requirements.txt
print("Libraries installed")
# Build requirements file for local run
after = [str(m) for m in sys.modules]
build_requirements_file(before, after)
# + [markdown] id="cbTknRcviyT7"
# # **2. Initialise the Colab session**
#
#
#
#
# ---
#
#
#
#
#
# + [markdown] id="DMNHVZfHmbKb"
# ## **2.1. Check for GPU access**
# ---
#
# By default, the session should be using Python 3 and GPU acceleration, but it is possible to ensure that these are set properly by doing the following:
#
# <font size = 4>Go to **Runtime -> Change the Runtime type**
#
# <font size = 4>**Runtime type: Python 3** *(Python 3 is programming language in which this program is written)*
#
# <font size = 4>**Accelerator: GPU** *(Graphics processing unit)*
#
# + cellView="form" id="h5i5CS2bSmZr"
#@markdown ##Run this cell to check if you have GPU access
# #%tensorflow_version 1.x
import tensorflow as tf
if tf.test.gpu_device_name()=='':
print('You do not have GPU access.')
print('Did you change your runtime ?')
print('If the runtime setting is correct then Google did not allocate a GPU for your session')
print('Expect slow performance. To access GPU try reconnecting later')
else:
print('You have GPU access')
# !nvidia-smi
# + [markdown] id="n3B3meGTbYVi"
# ## **2.2. Mount your Google Drive**
# ---
# <font size = 4> To use this notebook on the data present in your Google Drive, you need to mount your Google Drive to this notebook.
#
# <font size = 4> Play the cell below to mount your Google Drive and follow the link. In the new browser window, select your drive and select 'Allow', copy the code, paste into the cell and press enter. This will give Colab access to the data on the drive.
#
# <font size = 4> Once this is done, your data are available in the **Files** tab on the top left of notebook.
# + cellView="form" id="01Djr8v-5pPk"
#@markdown ##Play the cell to connect your Google Drive to Colab
#@markdown * Click on the URL.
#@markdown * Sign in your Google Account.
#@markdown * Copy the authorization code.
#@markdown * Enter the authorization code.
#@markdown * Click on "Files" site on the right. Refresh the site. Your Google Drive folder should now be available here as "drive".
# mount user's Google Drive to Google Colab.
from google.colab import drive
drive.mount('/content/gdrive')
# + [markdown] id="HLYcZR9gMv42"
# # **3. Select your parameters and paths**
# ---
# + [markdown] id="Kbn9_JdqnNnK"
# ## **3.1. Setting main training parameters**
# ---
# <font size = 4>
# + [markdown] id="CB6acvUFtWqd"
# <font size = 4> **Paths for training, predictions and results**
# These is the path to your folders containing the image you want to register. To find the path of the folder containing your datasets, go to your Files on the left of the notebook, navigate to the folder containing your files and copy the path by right-clicking on the folder, **Copy path** and pasting it into the right box below.
#
# <font size = 4>**`Fixed_image_folder`:** This is the folder containing your "Fixed image".
#
# <font size = 4>**`Moving_image_folder`:** This is the folder containing your "Moving Image(s)".
#
# <font size = 4>**`Result_folder`:** This is the folder where your results will be saved.
#
#
# <font size = 5>**Training Parameters**
#
# <font size = 4>**`model_name`:** Choose a name for your model.
#
# <font size = 4>**`number_of_iteration`:** Input how many iteration (rounds) the network will be trained. Preliminary results can already be observed after a 200 iterations, but a full training should run for 500-1000 iterations. **Default value: 500**
#
# <font size = 4>**`Registration_mode`:** Choose which registration method you would like to use.
#
# <font size = 5>**Additional channels**
#
# <font size = 4> This option enable you to apply the registration to other images (for instance other channels). Place these images in the **`Additional_channels_folder`**. Additional channels need to have the same name as the images you want to register (found in **`Moving_image_folder`**) and a prefix indicating the channel number starting at "C1_".
#
#
# <font size = 5>**Advanced Parameters - experienced users only**
#
# <font size = 4>**`n_neurons`:** Number of neurons (elementary constituents) that will assemble your model. **Default value: 100**.
#
# <font size = 4>**`mine_initial_learning_rate`:** Input the initial value to be used as learning rate for MINE. **Default value: 0.001**
# <font size = 4>**`homography_net_vL_initial_learning_rate`:** Input the initial value to be used as learning rate for homography_net_vL. **Default value: 0.001**
#
# <font size = 4>**`homography_net_v1_initial_learning_rate`:** Input the initial value to be used as learning rate for homography_net_v1. **Default value: 0.0001**
#
# + id="ewpNJ_I0Mv47" cellView="form"
#@markdown ###Path to the Fixed and Moving image folders:
Fixed_image_folder = "" #@param {type:"string"}
import os.path
from os import path
if path.isfile(Fixed_image_folder):
I = imread(Fixed_image_folder).astype(np.float32) # fixed image
if path.isdir(Fixed_image_folder):
Fixed_image = os.listdir(Fixed_image_folder)
I = imread(Fixed_image_folder+"/"+Fixed_image[0]).astype(np.float32) # fixed image
Moving_image_folder = "" #@param {type:"string"}
#@markdown ### Provide the path to the folder where the predictions are to be saved
Result_folder = "" #@param {type:"string"}
#@markdown ###Training Parameters
model_name = "" #@param {type:"string"}
number_of_iteration = 500#@param {type:"number"}
Registration_mode = "Affine" #@param ["Affine", "Perspective"]
#@markdown ###Do you want to apply the registration to other channel(s)?
Apply_registration_to_other_channels = False#@param {type:"boolean"}
Additional_channels_folder = "" #@param {type:"string"}
#@markdown ###Advanced Parameters
Use_Default_Advanced_Parameters = True#@param {type:"boolean"}
#@markdown ###If not, please input:
n_neurons = 100 #@param {type:"number"}
mine_initial_learning_rate = 0.001 #@param {type:"number"}
homography_net_vL_initial_learning_rate = 0.001 #@param {type:"number"}
homography_net_v1_initial_learning_rate = 0.0001 #@param {type:"number"}
if (Use_Default_Advanced_Parameters):
print("Default advanced parameters enabled")
n_neurons = 100
mine_initial_learning_rate = 0.001
homography_net_vL_initial_learning_rate = 0.001
homography_net_v1_initial_learning_rate = 0.0001
#failsafe for downscale could be useful
#to be added
#Load a random moving image to visualise and test the settings
random_choice = random.choice(os.listdir(Moving_image_folder))
J = imread(Moving_image_folder+"/"+random_choice).astype(np.float32)
# Check if additional channel(s) need to be registered and if so how many
print(str(len(os.listdir(Moving_image_folder)))+" image(s) will be registered.")
if Apply_registration_to_other_channels:
other_channel_images = os.listdir(Additional_channels_folder)
Number_of_other_channels = len(other_channel_images)/len(os.listdir(Moving_image_folder))
if Number_of_other_channels.is_integer():
print("The registration(s) will be propagated to "+str(Number_of_other_channels)+" other channel(s)")
else:
print(bcolors.WARNING +"!! WARNING: Incorrect number of images in Folder_containing_additional_channels"+W)
#here we check that no model with the same name already exist, if so print a warning
if os.path.exists(Result_folder+'/'+model_name):
print(bcolors.WARNING +"!! WARNING: "+model_name+" already exists and will be deleted in the following cell !!")
print(bcolors.WARNING +"To continue training "+model_name+", choose a new model_name here, and load "+model_name+" in section 3.3"+W)
print("Example of two images to be registered")
#Here we display one image
f=plt.figure(figsize=(10,10))
plt.subplot(1,2,1)
plt.imshow(I, norm=simple_norm(I, percent = 99), interpolation='nearest')
plt.title('Fixed image')
plt.axis('off');
plt.subplot(1,2,2)
plt.imshow(J, norm=simple_norm(J, percent = 99), interpolation='nearest')
plt.title('Moving image')
plt.axis('off');
plt.savefig('/content/TrainingDataExample_DRMIME2D.png',bbox_inches='tight',pad_inches=0)
plt.show()
# + [markdown] id="QpKgUER3y9tn"
# ## **3.2. Choose and test the image pre-processing settings**
# ---
# <font size = 4> DRMIME makes use of multi-resolution image pyramids to perform registration. Unlike a conventional method where computation starts at the highest level of the image pyramid and gradually proceeds to the lower levels, DRMIME simultaneously use all the levels in gradient descent-based optimization using automatic differentiation. Here, you can choose the parameters that define the multi-resolution image pyramids that will be used.
#
# <font size = 4>**`nb_images_pyramid`:** Choose the number of images to use to assemble the pyramid. **Default value: 10**.
#
# <font size = 4>**`Level_downscaling`:** Choose the level of downscaling that will be used to create the images of the pyramid **Default value: 1.8**.
#
# <font size = 4>**`sampling`:** amount of sampling used for the perspective registration. **Default value: 0.1**.
#
#
# + cellView="form" id="MoNXLwG6yd76"
#@markdown ##Image pre-processing settings
nb_images_pyramid = 10#@param {type:"number"} # where registration starts (at the coarsest resolution)
L = nb_images_pyramid
Level_downscaling = 1.8#@param {type:"number"}
downscale = Level_downscaling
sampling = 0.1#@param {type:"number"} # 10% sampling used only for perspective registration
ifplot=True
if np.ndim(I) == 3:
nChannel=I.shape[2]
pyramid_I = tuple(pyramid_gaussian(gaussian(I, sigma=1, multichannel=True), downscale=downscale, multichannel=True))
pyramid_J = tuple(pyramid_gaussian(gaussian(J, sigma=1, multichannel=True), downscale=downscale, multichannel=True))
elif np.ndim(I) == 2:
nChannel=1
pyramid_I = tuple(pyramid_gaussian(gaussian(I, sigma=1, multichannel=False), downscale=downscale, multichannel=False))
pyramid_J = tuple(pyramid_gaussian(gaussian(J, sigma=1, multichannel=False), downscale=downscale, multichannel=False))
else:
print("Unknown rank for an image")
# Control the display
width=5
height=5
rows = int(L/5)+1
cols = 5
axes=[]
fig=plt.figure(figsize=(16,16))
if Registration_mode == "Affine":
print("Affine registration selected")
# create a list of necessary objects you will need and commit to GPU
I_lst,J_lst,h_lst,w_lst,xy_lst,ind_lst=[],[],[],[],[],[]
for s in range(L):
I_ = torch.tensor(cv2.normalize(pyramid_I[s].astype(np.float32), None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)).to(device)
J_ = torch.tensor(cv2.normalize(pyramid_J[s].astype(np.float32), None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)).to(device)
if nChannel>1:
I_lst.append(I_.permute(2,0,1))
J_lst.append(J_.permute(2,0,1))
h_, w_ = I_lst[s].shape[1], I_lst[s].shape[2]
edges_grayscale = cv2.dilate(cv2.Canny(cv2.GaussianBlur(rgb2gray(pyramid_I[s]),(21,21),0).astype(np.uint8), 0, 30),
np.ones((5,5),np.uint8),
iterations = 1)
ind_ = torch.nonzero(torch.tensor(edges_grayscale).view(h_*w_)).squeeze().to(device)[:1000000]
ind_lst.append(ind_)
else:
I_lst.append(I_)
J_lst.append(J_)
h_, w_ = I_lst[s].shape[0], I_lst[s].shape[1]
edges_grayscale = cv2.dilate(cv2.Canny(cv2.GaussianBlur(rgb2gray(pyramid_I[s]),(21,21),0).astype(np.uint8), 0, 30),
np.ones((5,5),np.uint8),
iterations = 1)
ind_ = torch.nonzero(torch.tensor(edges_grayscale).view(h_*w_)).squeeze().to(device)[:1000000]
ind_lst.append(ind_)
axes.append( fig.add_subplot(rows, cols, s+1) )
subplot_title=(str(s))
axes[-1].set_title(subplot_title)
plt.imshow(edges_grayscale)
plt.axis('off');
h_lst.append(h_)
w_lst.append(w_)
y_, x_ = torch.meshgrid([torch.arange(0,h_).float().to(device), torch.arange(0,w_).float().to(device)])
y_, x_ = 2.0*y_/(h_-1) - 1.0, 2.0*x_/(w_-1) - 1.0
xy_ = torch.stack([x_,y_],2)
xy_lst.append(xy_)
fig.tight_layout()
plt.show()
if Registration_mode == "Perspective":
print("Perspective registration selected")
# create a list of necessary objects you will need and commit to GPU
I_lst,J_lst,h_lst,w_lst,xy_lst,ind_lst=[],[],[],[],[],[]
for s in range(L):
I_ = torch.tensor(cv2.normalize(pyramid_I[s].astype(np.float32), None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)).to(device)
J_ = torch.tensor(cv2.normalize(pyramid_J[s].astype(np.float32), None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)).to(device)
if nChannel>1:
I_lst.append(I_.permute(2,0,1))
J_lst.append(J_.permute(2,0,1))
h_, w_ = I_lst[s].shape[1], I_lst[s].shape[2]
ind_ = torch.randperm(int(h_*w_*sampling))
ind_lst.append(ind_)
else:
I_lst.append(I_)
J_lst.append(J_)
h_, w_ = I_lst[s].shape[0], I_lst[s].shape[1]
edges_grayscale = cv2.dilate(cv2.Canny(cv2.GaussianBlur(rgb2gray(pyramid_I[s]),(21,21),0).astype(np.uint8), 0, 10),
np.ones((5,5),np.uint8),
iterations = 1)
ind_ = torch.randperm(int(h_*w_*sampling))
ind_lst.append(ind_)
axes.append( fig.add_subplot(rows, cols, s+1) )
subplot_title=(str(s))
axes[-1].set_title(subplot_title)
plt.imshow(edges_grayscale)
plt.axis('off');
h_lst.append(h_)
w_lst.append(w_)
y_, x_ = torch.meshgrid([torch.arange(0,h_).float().to(device), torch.arange(0,w_).float().to(device)])
y_, x_ = 2.0*y_/(h_-1) - 1.0, 2.0*x_/(w_-1) - 1.0
xy_ = torch.stack([x_,y_],2)
xy_lst.append(xy_)
fig.tight_layout()
plt.show()
# + [markdown] id="keIQhCmOMv5S"
# # **4. Train the network**
# ---
# + [markdown] id="Ovu0ESxivcxx"
# ## **4.1. Prepare for training**
# ---
# <font size = 4>Here, we use the information from 3. to load the correct dependencies.
# + id="t4QTv4vQvbnS" cellView="form"
#@markdown ##Load the dependencies required for training
print("--------------------------------------------------")
# Remove the model name folder if exists
if os.path.exists(Result_folder+'/'+model_name):
print(bcolors.WARNING +"!! WARNING: Model folder already exists and has been removed !!"+W)
shutil.rmtree(Result_folder+'/'+model_name)
os.makedirs(Result_folder+'/'+model_name)
if Registration_mode == "Affine":
class HomographyNet(nn.Module):
def __init__(self):
super(HomographyNet, self).__init__()
# affine transform basis matrices
self.B = torch.zeros(6,3,3).to(device)
self.B[0,0,2] = 1.0
self.B[1,1,2] = 1.0
self.B[2,0,1] = 1.0
self.B[3,1,0] = 1.0
self.B[4,0,0], self.B[4,1,1] = 1.0, -1.0
self.B[5,1,1], self.B[5,2,2] = -1.0, 1.0
self.v1 = torch.nn.Parameter(torch.zeros(6,1,1).to(device), requires_grad=True)
self.vL = torch.nn.Parameter(torch.zeros(6,1,1).to(device), requires_grad=True)
def forward(self, s):
C = torch.sum(self.B*self.vL,0)
if s==0:
C += torch.sum(self.B*self.v1,0)
A = torch.eye(3).to(device)
H = A
for i in torch.arange(1,10):
A = torch.mm(A/i,C)
H = H + A
return H
class MINE(nn.Module): #https://arxiv.org/abs/1801.04062
def __init__(self):
super(MINE, self).__init__()
self.fc1 = nn.Linear(2*nChannel, n_neurons)
self.fc2 = nn.Linear(n_neurons, n_neurons)
self.fc3 = nn.Linear(n_neurons, 1)
self.bsize = 1 # 1 may be sufficient
def forward(self, x, ind):
x = x.view(x.size()[0]*x.size()[1],x.size()[2])
MI_lb=0.0
for i in range(self.bsize):
ind_perm = ind[torch.randperm(len(ind))]
z1 = self.fc3(F.relu(self.fc2(F.relu(self.fc1(x[ind,:])))))
z2 = self.fc3(F.relu(self.fc2(F.relu(self.fc1(torch.cat((x[ind,0:nChannel],x[ind_perm,nChannel:2*nChannel]),1))))))
MI_lb += torch.mean(z1) - torch.log(torch.mean(torch.exp(z2)))
return MI_lb/self.bsize
def AffineTransform(I, H, xv, yv):
# apply affine transform
xvt = (xv*H[0,0]+yv*H[0,1]+H[0,2])/(xv*H[2,0]+yv*H[2,1]+H[2,2])
yvt = (xv*H[1,0]+yv*H[1,1]+H[1,2])/(xv*H[2,0]+yv*H[2,1]+H[2,2])
J = F.grid_sample(I,torch.stack([xvt,yvt],2).unsqueeze(0)).squeeze()
return J
def multi_resolution_loss():
loss=0.0
for s in np.arange(L-1,-1,-1):
if nChannel>1:
Jw_ = AffineTransform(J_lst[s].unsqueeze(0), homography_net(s), xy_lst[s][:,:,0], xy_lst[s][:,:,1]).squeeze()
mi = mine_net(torch.cat([I_lst[s],Jw_],0).permute(1,2,0),ind_lst[s])
loss = loss - (1./L)*mi
else:
Jw_ = AffineTransform(J_lst[s].unsqueeze(0).unsqueeze(0), homography_net(s), xy_lst[s][:,:,0], xy_lst[s][:,:,1]).squeeze()
mi = mine_net(torch.stack([I_lst[s],Jw_],2),ind_lst[s])
loss = loss - (1./L)*mi
return loss
if Registration_mode == "Perspective":
class HomographyNet(nn.Module):
def __init__(self):
super(HomographyNet, self).__init__()
# affine transform basis matrices
self.B = torch.zeros(8,3,3).to(device)
self.B[0,0,2] = 1.0
self.B[1,1,2] = 1.0
self.B[2,0,1] = 1.0
self.B[3,1,0] = 1.0
self.B[4,0,0], self.B[4,1,1] = 1.0, -1.0
self.B[5,1,1], self.B[5,2,2] = -1.0, 1.0
self.B[6,2,0] = 1.0
self.B[7,2,1] = 1.0
self.v1 = torch.nn.Parameter(torch.zeros(8,1,1).to(device), requires_grad=True)
self.vL = torch.nn.Parameter(torch.zeros(8,1,1).to(device), requires_grad=True)
def forward(self, s):
C = torch.sum(self.B*self.vL,0)
if s==0:
C += torch.sum(self.B*self.v1,0)
A = torch.eye(3).to(device)
H = A
for i in torch.arange(1,10):
A = torch.mm(A/i,C)
H = H + A
return H
class MINE(nn.Module): #https://arxiv.org/abs/1801.04062
def __init__(self):
super(MINE, self).__init__()
self.fc1 = nn.Linear(2*nChannel, n_neurons)
self.fc2 = nn.Linear(n_neurons, n_neurons)
self.fc3 = nn.Linear(n_neurons, 1)
self.bsize = 1 # 1 may be sufficient
def forward(self, x, ind):
x = x.view(x.size()[0]*x.size()[1],x.size()[2])
MI_lb=0.0
for i in range(self.bsize):
ind_perm = ind[torch.randperm(len(ind))]
z1 = self.fc3(F.relu(self.fc2(F.relu(self.fc1(x[ind,:])))))
z2 = self.fc3(F.relu(self.fc2(F.relu(self.fc1(torch.cat((x[ind,0:nChannel],x[ind_perm,nChannel:2*nChannel]),1))))))
MI_lb += torch.mean(z1) - torch.log(torch.mean(torch.exp(z2)))
return MI_lb/self.bsize
def PerspectiveTransform(I, H, xv, yv):
# apply homography
xvt = (xv*H[0,0]+yv*H[0,1]+H[0,2])/(xv*H[2,0]+yv*H[2,1]+H[2,2])
yvt = (xv*H[1,0]+yv*H[1,1]+H[1,2])/(xv*H[2,0]+yv*H[2,1]+H[2,2])
J = F.grid_sample(I,torch.stack([xvt,yvt],2).unsqueeze(0)).squeeze()
return J
def multi_resolution_loss():
loss=0.0
for s in np.arange(L-1,-1,-1):
if nChannel>1:
Jw_ = PerspectiveTransform(J_lst[s].unsqueeze(0), homography_net(s), xy_lst[s][:,:,0], xy_lst[s][:,:,1]).squeeze()
mi = mine_net(torch.cat([I_lst[s],Jw_],0).permute(1,2,0),ind_lst[s])
loss = loss - (1./L)*mi
else:
Jw_ = PerspectiveTransform(J_lst[s].unsqueeze(0).unsqueeze(0), homography_net(s), xy_lst[s][:,:,0], xy_lst[s][:,:,1]).squeeze()
mi = mine_net(torch.stack([I_lst[s],Jw_],2),ind_lst[s])
loss = loss - (1./L)*mi
return loss
def histogram_mutual_information(image1, image2):
hgram, x_edges, y_edges = np.histogram2d(image1.ravel(), image2.ravel(), bins=100)
pxy = hgram / float(np.sum(hgram))
px = np.sum(pxy, axis=1)
py = np.sum(pxy, axis=0)
px_py = px[:, None] * py[None, :]
nzs = pxy > 0
return np.sum(pxy[nzs] * np.log(pxy[nzs] / px_py[nzs]))
print("Done")
# + [markdown] id="0Dfn8ZsEMv5d"
# ## **4.2. Start Trainning**
# ---
# <font size = 4>When playing the cell below you should see updates after each iterations (round). A new network will be trained for each image that need to be registered.
#
# <font size = 4>* **CRITICAL NOTE:** Google Colab has a time limit for processing (to prevent using GPU power for datamining). Training time must be less than 12 hours! If training takes longer than 12 hours, please decrease the number of epochs or number of patches. Another way circumvent this is to save the parameters of the model after training and start training again from this point.
#
#
# + id="fisJmA13Mv5e" cellView="form"
#@markdown ##Start training and the registration process
start = time.time()
loop_number = 1
if Registration_mode == "Affine":
print("Affine registration.....")
for image in os.listdir(Moving_image_folder):
if path.isfile(Fixed_image_folder):
I = imread(Fixed_image_folder).astype(np.float32) # fixed image
if path.isdir(Fixed_image_folder):
Fixed_image = os.listdir(Fixed_image_folder)
I = imread(Fixed_image_folder+"/"+Fixed_image[0]).astype(np.float32) # fixed image
J = imread(Moving_image_folder+"/"+image).astype(np.float32)
# Here we generate the pyramidal images
ifplot=True
if np.ndim(I) == 3:
nChannel=I.shape[2]
pyramid_I = tuple(pyramid_gaussian(gaussian(I, sigma=1, multichannel=True), downscale=downscale, multichannel=True))
pyramid_J = tuple(pyramid_gaussian(gaussian(J, sigma=1, multichannel=True), downscale=downscale, multichannel=True))
elif np.ndim(I) == 2:
nChannel=1
pyramid_I = tuple(pyramid_gaussian(gaussian(I, sigma=1, multichannel=False), downscale=downscale, multichannel=False))
pyramid_J = tuple(pyramid_gaussian(gaussian(J, sigma=1, multichannel=False), downscale=downscale, multichannel=False))
else:
print("Unknown rank for an image")
# create a list of necessary objects you will need and commit to GPU
I_lst,J_lst,h_lst,w_lst,xy_lst,ind_lst=[],[],[],[],[],[]
for s in range(L):
I_ = torch.tensor(cv2.normalize(pyramid_I[s].astype(np.float32), None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)).to(device)
J_ = torch.tensor(cv2.normalize(pyramid_J[s].astype(np.float32), None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)).to(device)
if nChannel>1:
I_lst.append(I_.permute(2,0,1))
J_lst.append(J_.permute(2,0,1))
h_, w_ = I_lst[s].shape[1], I_lst[s].shape[2]
edges_grayscale = cv2.dilate(cv2.Canny(cv2.GaussianBlur(rgb2gray(pyramid_I[s]),(21,21),0).astype(np.uint8), 0, 30),
np.ones((5,5),np.uint8),
iterations = 1)
ind_ = torch.nonzero(torch.tensor(edges_grayscale).view(h_*w_)).squeeze().to(device)[:1000000]
ind_lst.append(ind_)
else:
I_lst.append(I_)
J_lst.append(J_)
h_, w_ = I_lst[s].shape[0], I_lst[s].shape[1]
edges_grayscale = cv2.dilate(cv2.Canny(cv2.GaussianBlur(rgb2gray(pyramid_I[s]),(21,21),0).astype(np.uint8), 0, 30),
np.ones((5,5),np.uint8),
iterations = 1)
ind_ = torch.nonzero(torch.tensor(edges_grayscale).view(h_*w_)).squeeze().to(device)[:1000000]
ind_lst.append(ind_)
h_lst.append(h_)
w_lst.append(w_)
y_, x_ = torch.meshgrid([torch.arange(0,h_).float().to(device), torch.arange(0,w_).float().to(device)])
y_, x_ = 2.0*y_/(h_-1) - 1.0, 2.0*x_/(w_-1) - 1.0
xy_ = torch.stack([x_,y_],2)
xy_lst.append(xy_)
homography_net = HomographyNet().to(device)
mine_net = MINE().to(device)
optimizer = optim.Adam([{'params': mine_net.parameters(), 'lr': 1e-3},
{'params': homography_net.vL, 'lr': 5e-3},
{'params': homography_net.v1, 'lr': 1e-4}], amsgrad=True)
mi_list = []
for itr in range(number_of_iteration):
optimizer.zero_grad()
loss = multi_resolution_loss()
mi_list.append(-loss.item())
loss.backward()
optimizer.step()
clear_output(wait=True)
plt.plot(mi_list)
plt.xlabel('Iteration number')
plt.ylabel('MI')
plt.title(image+". Image registration "+str(loop_number)+" out of "+str(len(os.listdir(Moving_image_folder)))+".")
plt.show()
I_t = torch.tensor(I).to(device) # without Gaussian
J_t = torch.tensor(J).to(device) # without Gaussian
H = homography_net(0)
if nChannel>1:
J_w = AffineTransform(J_t.permute(2,0,1).unsqueeze(0), H, xy_lst[0][:,:,0], xy_lst[0][:,:,1]).squeeze().permute(1,2,0)
else:
J_w = AffineTransform(J_t.unsqueeze(0).unsqueeze(0), H , xy_lst[0][:,:,0], xy_lst[0][:,:,1]).squeeze()
#Apply registration to other channels
if Apply_registration_to_other_channels:
for n_channel in range(1, int(Number_of_other_channels)+1):
channel = imread(Additional_channels_folder+"/C"+str(n_channel)+"_"+image).astype(np.float32)
channel_t = torch.tensor(channel).to(device)
channel_w = AffineTransform(channel_t.unsqueeze(0).unsqueeze(0), H , xy_lst[0][:,:,0], xy_lst[0][:,:,1]).squeeze()
channel_registered = channel_w.cpu().data.numpy()
io.imsave(Result_folder+'/'+model_name+"/"+"C"+str(n_channel)+"_"+image+"_"+Registration_mode+"_registered.tif", channel_registered)
# Export results to numpy array
registered = J_w.cpu().data.numpy()
# Save results
io.imsave(Result_folder+'/'+model_name+"/"+image+"_"+Registration_mode+"_registered.tif", registered)
loop_number = loop_number + 1
print("Your images have been registered and saved in your result_folder")
#Perspective registration
if Registration_mode == "Perspective":
print("Perspective registration.....")
for image in os.listdir(Moving_image_folder):
if path.isfile(Fixed_image_folder):
I = imread(Fixed_image_folder).astype(np.float32) # fixed image
if path.isdir(Fixed_image_folder):
Fixed_image = os.listdir(Fixed_image_folder)
I = imread(Fixed_image).astype(np.float32) # fixed image
J = imread(Moving_image_folder+"/"+image).astype(np.float32)
# Here we generate the pyramidal images
ifplot=True
if np.ndim(I) == 3:
nChannel=I.shape[2]
pyramid_I = tuple(pyramid_gaussian(gaussian(I, sigma=1, multichannel=True), downscale=downscale, multichannel=True))
pyramid_J = tuple(pyramid_gaussian(gaussian(J, sigma=1, multichannel=True), downscale=downscale, multichannel=True))
elif np.ndim(I) == 2:
nChannel=1
pyramid_I = tuple(pyramid_gaussian(gaussian(I, sigma=1, multichannel=False), downscale=downscale, multichannel=False))
pyramid_J = tuple(pyramid_gaussian(gaussian(J, sigma=1, multichannel=False), downscale=downscale, multichannel=False))
else:
print("Unknown rank for an image")
# create a list of necessary objects you will need and commit to GPU
I_lst,J_lst,h_lst,w_lst,xy_lst,ind_lst=[],[],[],[],[],[]
for s in range(L):
I_ = torch.tensor(cv2.normalize(pyramid_I[s].astype(np.float32), None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)).to(device)
J_ = torch.tensor(cv2.normalize(pyramid_J[s].astype(np.float32), None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)).to(device)
if nChannel>1:
I_lst.append(I_.permute(2,0,1))
J_lst.append(J_.permute(2,0,1))
h_, w_ = I_lst[s].shape[1], I_lst[s].shape[2]
ind_ = torch.randperm(int(h_*w_*sampling))
ind_lst.append(ind_)
else:
I_lst.append(I_)
J_lst.append(J_)
h_, w_ = I_lst[s].shape[0], I_lst[s].shape[1]
edges_grayscale = cv2.dilate(cv2.Canny(cv2.GaussianBlur(rgb2gray(pyramid_I[s]),(21,21),0).astype(np.uint8), 0, 10),
np.ones((5,5),np.uint8),
iterations = 1)
ind_ = torch.randperm(int(h_*w_*sampling))
ind_lst.append(ind_)
h_lst.append(h_)
w_lst.append(w_)
y_, x_ = torch.meshgrid([torch.arange(0,h_).float().to(device), torch.arange(0,w_).float().to(device)])
y_, x_ = 2.0*y_/(h_-1) - 1.0, 2.0*x_/(w_-1) - 1.0
xy_ = torch.stack([x_,y_],2)
xy_lst.append(xy_)
homography_net = HomographyNet().to(device)
mine_net = MINE().to(device)
optimizer = optim.Adam([{'params': mine_net.parameters(), 'lr': 1e-3},
{'params': homography_net.vL, 'lr': 1e-3},
{'params': homography_net.v1, 'lr': 1e-4}], amsgrad=True)
mi_list = []
for itr in range(number_of_iteration):
optimizer.zero_grad()
loss = multi_resolution_loss()
mi_list.append(-loss.item())
loss.backward()
optimizer.step()
clear_output(wait=True)
plt.plot(mi_list)
plt.xlabel('Iteration number')
plt.ylabel('MI')
plt.title(image+". Image registration "+str(loop_number)+" out of "+str(len(os.listdir(Moving_image_folder)))+".")
plt.show()
I_t = torch.tensor(I).to(device) # without Gaussian
J_t = torch.tensor(J).to(device) # without Gaussian
H = homography_net(0)
if nChannel>1:
J_w = PerspectiveTransform(J_t.permute(2,0,1).unsqueeze(0), H, xy_lst[0][:,:,0], xy_lst[0][:,:,1]).squeeze().permute(1,2,0)
else:
J_w = PerspectiveTransform(J_t.unsqueeze(0).unsqueeze(0), H , xy_lst[0][:,:,0], xy_lst[0][:,:,1]).squeeze()
#Apply registration to other channels
if Apply_registration_to_other_channels:
for n_channel in range(1, int(Number_of_other_channels)+1):
channel = imread(Additional_channels_folder+"/C"+str(n_channel)+"_"+image).astype(np.float32)
channel_t = torch.tensor(channel).to(device)
channel_w = PerspectiveTransform(channel_t.unsqueeze(0).unsqueeze(0), H , xy_lst[0][:,:,0], xy_lst[0][:,:,1]).squeeze()
channel_registered = channel_w.cpu().data.numpy()
io.imsave(Result_folder+'/'+model_name+"/"+"C"+str(n_channel)+"_"+image+"_Perspective_registered.tif", channel_registered)
# Export results to numpy array
registered = J_w.cpu().data.numpy()
# Save results
io.imsave(Result_folder+'/'+model_name+"/"+image+"_Perspective_registered.tif", registered)
loop_number = loop_number + 1
print("Your images have been registered and saved in your result_folder")
# PDF export missing
#pdf_export(trained = True, augmentation = Use_Data_augmentation, pretrained_model = Use_pretrained_model)
# + [markdown] id="PfTw_pQUUAqB"
# ## **4.3. Assess the registration**
# ---
#
#
#
# + id="SrArBvqwYvc9" cellView="form"
# @markdown ##Run this cell to display a randomly chosen input and its corresponding predicted output.
# For sliders and dropdown menu and progress bar
from ipywidgets import interact
import ipywidgets as widgets
print('--------------------------------------------------------------')
@interact
def show_QC_results(file = os.listdir(Moving_image_folder)):
moving_image = imread(Moving_image_folder+"/"+file).astype(np.float32)
registered_image = imread(Result_folder+"/"+model_name+"/"+file+"_"+Registration_mode+"_registered.tif").astype(np.float32)
#Here we display one image
f=plt.figure(figsize=(20,20))
plt.subplot(1,5,1)
plt.imshow(I, norm=simple_norm(I, percent = 99), interpolation='nearest')
plt.title('Fixed image')
plt.axis('off');
plt.subplot(1,5,2)
plt.imshow(moving_image, norm=simple_norm(moving_image, percent = 99), interpolation='nearest')
plt.title('Moving image')
plt.axis('off');
plt.subplot(1,5,3)
plt.imshow(registered_image, norm=simple_norm(registered_image, percent = 99), interpolation='nearest')
plt.title("Registered image")
plt.axis('off');
plt.subplot(1,5,4)
plt.imshow(I, norm=simple_norm(I, percent = 99), interpolation='nearest', cmap="Greens")
plt.imshow(moving_image, norm=simple_norm(moving_image, percent = 99), interpolation='nearest', cmap="Oranges", alpha=0.5)
plt.title("Fixed and moving images")
plt.axis('off');
plt.subplot(1,5,5)
plt.imshow(I, norm=simple_norm(I, percent = 99), interpolation='nearest', cmap="Greens")
plt.imshow(registered_image, norm=simple_norm(registered_image, percent = 99), interpolation='nearest', cmap="Oranges", alpha=0.5)
plt.title("Fixed and Registered images")
plt.axis('off');
plt.show()
# + [markdown] id="wgO7Ok1PBFQj"
# ## **4.4. Download your predictions**
# ---
#
# <font size = 4>**Store your data** and ALL its results elsewhere by downloading it from Google Drive and after that clean the original folder tree (datasets, results, etc.) if you plan to train or use new networks. Please note that the notebook will otherwise **OVERWRITE** all files which have the same name.
# + [markdown] id="XXsUh88HqYay"
# # **5. Version log**
# ---
# <font size = 4>**v1.13**:
#
# * This version now includes built-in version check and the version log that you're reading now.
# + [markdown] id="nlyPYwZu4VVS"
# #**Thank you for using DRMIME 2D!**
| Colab_notebooks/Beta notebooks/DRMIME_2D_ZeroCostDL4Mic.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + pycharm={"name": "#%%\n"}
# %load_ext autoreload
# %autoreload 2
# %matplotlib inline
# +
from tqdm.notebook import tqdm
import numpy
from io_utils import DataBaseReader
from time_series_utils import extract_time_series, train_test_split
from plots import get_lengths_plot, get_prices_plots
from arima import evaluate_with_arima, grid_search_arima
# + [markdown] pycharm={"name": "#%% md\n"}
# # Данные
#
# Для начала загрузим данные. Таблицы сразу же объединим в одну через `session_id`. Колонки `date` и `time` соединим в полноценный `timestamp`.
# -
reader = DataBaseReader("trade_info.sqlite3")
# + pycharm={"name": "#%%\n"}
all_data = reader.get_union_data(remove_duplicates=True)
all_data.head()
# + pycharm={"name": "#%%\n"}
reader.close()
# + [markdown] pycharm={"name": "#%% md\n"}
# # Временные ряды
#
# Извлечем из данных временные ряды:
# - Сгруппируем данные по `session_id`
# - Каждую группу преобразуем в лист из событий
# + pycharm={"name": "#%%\n"}
time_series = extract_time_series(all_data)
# + [markdown] pycharm={"name": "#%% md\n"}
# Ряды очень разные, посмотрим как меняются длины таких последовательностей.
# + pycharm={"name": "#%%\n"}
get_lengths_plot(time_series)
# + [markdown] pycharm={"name": "#%% md\n"}
# Очень много последовательностей маленькой длины. Предсказывать для них стоимости довольно сложно -- мало данных, нельзя найти закономерности. Оставим только те последовательности, которые длиннее 10 событий. Таких всё ещё много и уже можно хоть какую-то информацию извлечь.
# + pycharm={"name": "#%%\n"}
time_series_long = {k: v for k, v in time_series.items() if len(v) > 10}
len(time_series_long)
# + [markdown] pycharm={"name": "#%% md\n"}
# Посмотрим на какие-то случайные графики. Интересно же, вдруг что-то и заметить получиться.
# + pycharm={"name": "#%%\n"}
get_prices_plots(time_series_long, 6)
# + [markdown] pycharm={"name": "#%% md\n"}
# # Моделирование
#
# Попробуем промоделировать стоимость на различных сессиях. Для начала необходимо подготовить отложенные выборки. Каждую сессию разобьем на `train` и `test`. Для этого упорядочим внутри группы по времени, а затем отрежем последние 30% под тест.
# + pycharm={"name": "#%%\n"}
split_ts = {k: train_test_split(v) for k, v in time_series_long.items()}
# + [markdown] pycharm={"name": "#%% md\n"}
# В качестве стартовой модели воспользуемся ARIMA. В качестве метрики будет использовать косинусное расстояние:
# $$
# d(u; v) = 1 - \frac{u \cdot v}{||u|| \cdot ||v||}
# $$
# Для каждой сессии обучим отдельную модель. Общая метрика -- это среднее метрик по всем сессиям.
#
# Подберем оптимальные параметры через поиск по сетке.
# + pycharm={"name": "#%%\n"}
grid_search_arima(split_ts, 0, 5)
# -
# Оптимальный параметры: `(p=0, d=1, q=1)`. Посмотрим какой результат показывает модель с такими параметрами.
# + pycharm={"name": "#%%\n"}
metrics = []
for train, test in tqdm(split_ts.values()):
metrics.append(evaluate_with_arima(train, test, (0, 1, 1)))
print(f"{numpy.mean(metrics)} +/- {numpy.std(metrics)}")
# -
# Получили хорошую бейзлайн модель. В дальнейшем можно делать новые модели, например, с помощью бустинга, и сравнивать с этим значениями. Но кажется, что большие модели хорошо извлекают глобальные данные. Поэтому идеальный пайплайн: обучить бустинг на общем корпусе, обучить ARIMA на каждой сессии, итоговая оценка -- это блендинг двух предсказаний.
| main.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %load_ext autoreload
# %autoreload 2
# +
from sklearn import set_config
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import OneHotEncoder, StandardScaler
from sklearn_turbostart.estimators import create_estimators
from sklearn_turbostart.train import (
load_data,
plot_permutation_importance,
plot_prediction,
train_estimators,
)
set_config(display="diagram")
# -
# # Load data
X_train, y_train, X_test, y_test = load_data("../data/data.csv")
# # Preprocessing pipeline
# +
standard_scalar_numerical_columns = [
"Feature 1",
"Feature 2",
"Feature 4",
"Feature 7",
"Feature 9",
]
categorical_columns = ["Feature 5", "Feature 6", "Feature 8"]
preprocessing = ColumnTransformer(
[
(
"numeric_standard_scalar",
StandardScaler(),
standard_scalar_numerical_columns,
),
("cat", OneHotEncoder(), categorical_columns),
]
)
estimators = create_estimators(preprocessing)
# -
# Visualize preprocessing pipeline
preprocessing
# Visualize model pipeline before fitting
estimators["RandomForest"]
# # Fit models
train_estimators(estimators, X_train, y_train, X_test, y_test)
# Visualize model pipeline after fitting. Get to know best fit parameters
estimators["RandomForest"].best_estimator_
# # Plot feature importance
plot_permutation_importance(estimators, X_test, y_test)
| notebooks/Quickstarter.ipynb |
# ---
# title: "Indexing and selecting DataFrames"
# date: 2020-04-12T14:41:32+02:00
# author: "<NAME>"
# type: technical_note
# draft: false
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### DataFrames
# store tabular data where you can label the rows and the columns.
# #### Generate some data
# +
import pandas as pd
# Build cars DataFrame
names = ['United States', 'Australia', 'Japan', 'India', 'Russia', 'Morocco', 'Egypt']
dr = [True, False, False, False, True, True, True]
cpc = [809, 731, 588, 18, 200, 70, 45]
row_labels = ['US', 'AUS', 'JPN', 'IN', 'RU', 'MOR', 'EG']
cars_dict = {'country':names, 'drives_right':dr, 'cars_per_cap':cpc }
cars = pd.DataFrame(cars_dict)
cars.index = row_labels
print(cars)
# -
# ### Square brackets
# The single bracket version gives a Pandas Series, the double bracket version gives a Pandas DataFrame.
# #### Columns
# Print out country column as Pandas Series
cars['country']
# Print out country column as Pandas DataFrame
cars[['country']]
# Print out DataFrame with country and drives_right columns
cars[['country','drives_right']]
# #### Rows
# Print out first 3 observations
cars[0:3]
# Print out fourth, fifth and sixth observation
print(cars[3:6])
# ### Selecting rows and columns with `loc` and `iloc`
# `loc` is label-based, which means that you have to specify rows and columns based on their row and column labels
# `iloc` is integer index based, so you have to specify rows and columns by their integer index like you did in the previous exercise
# Selecting rows based on index column
# One bracket -> Series
cars.loc['RU']
cars.iloc[4]
# Two bracket -> DataFrame
cars.loc[['RU']]
cars.loc[['RU', 'AUS']]
cars.loc[:,['drives_right']]
cars.loc[['US', 'MOR'],['drives_right']]
cars.iloc[0:6,[2]]
cars.loc['US':'MOR',['cars_per_cap']]
| courses/datacamp/notes/python/pandas/indexselectingdata.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# <a href="https://github.com/timeseriesAI/tsai/blob/main/tutorial_nbs/12_Experiment_tracking_with_W%26B.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# created by <NAME> - email: <EMAIL>
# # Purpose 😇
# This brief notebook will demonstrate how you can easily track your experiments using Weights & Biases (W&B).
#
# You can find much more information on the W&B [Experiment tracking](https://docs.wandb.ai/guides/track) page.
# # Import libraries 📚
# +
# # **************** UNCOMMENT AND RUN THIS CELL IF YOU NEED TO INSTALL/ UPGRADE TSAI ****************
# stable = True # Set to True for latest pip version or False for main branch in GitHub
# # !pip install {"tsai -U" if stable else "git+https://github.com/timeseriesAI/tsai.git"} >> /dev/null
# # !pip install wandb -U >> /dev/null
# -
from tsai.all import *
from fastai.callback.wandb import *
import wandb
my_setup(wandb)
# # Login to W&B 🔎
wandb.login()
# # Create a configurable training script 🏋️♂️
# In this notebook we'll run experiments with TSiT which is a new model developed by timeseriesAI inspired by ViT.
#
# We'll first define a baseline we'll then try to improve:
X, y, splits = get_UCR_data('LSST', split_data=False)
tfms = [None, TSClassification()]
batch_tfms = TSStandardize(by_sample=True)
cbs = [ShowGraph()]
learn = TSClassifier(X, y, splits=splits, tfms=tfms, batch_tfms=batch_tfms, arch=TSiTPlus, arch_config={}, metrics=accuracy, cbs=cbs)
learn.fit_one_cycle(10, 1e-3)
# We'll now define which elements we'd like to test. These will be part of the config.
#
# Later we'll be able to modify the training script or the config.
# +
config = AttrDict (
batch_tfms = TSStandardize(by_sample=True),
arch_config = {},
architecture = TSiTPlus,
lr = 1e-3,
n_epoch = 10,
)
X, y, splits = get_UCR_data('LSST', split_data=False)
tfms = [None, TSClassification()]
cbs = [ShowGraph()]
learn = TSClassifier(X, y, splits=splits, tfms=tfms, batch_tfms=config["batch_tfms"], arch=config["architecture"],
arch_config=config["arch_config"], metrics=accuracy, cbs=cbs)
learn.fit_one_cycle(config["n_epoch"], config["lr"])
# -
# # Perform experiments with W&B 🛫
# We just need to add 2 elements to have a configurable training script that can be tracked by W&B:
#
# - A context manager:
#
# ```
# with wandb.init(project="LSST_v01", reinit=True, config=config):
# ```
# It may useful to pass a group, job type, tags, name, notes, etc. You can see the available options [here](https://docs.wandb.ai/ref/python/init).
# - A callback:
#
# ```
# WandbCallback()
# ```
#
#
# ⚠️ If you also want to save your best models, and set log_model=True in the WandbCallback, you'll need to add the SaveModelCallback as well.
#
# There's currently a small bug in the integration between wandb and tsai that doesn't allow to log_preds. This can be used to show predictions in W&B. We recommend setting log_preds=False.
# +
# YOU CAN MODIFY YOUR CONFIG AND/OR TRAINING SCRIPT IN THIS CELL AND RE-RUN MANUAL EXPERIMENTS THAT WILL BE TRACKED BY W&B
config = AttrDict (
batch_tfms = TSStandardize(by_sample=True),
arch = TSiTPlus,
arch_config = {},
lr = 1e-3,
n_epoch = 10,
)
with wandb.init(project="LSST_v01", config=config, name='baseline'):
X, y, splits = get_UCR_data('LSST', split_data=False)
tfms = [None, TSClassification()]
cbs = [ShowGraph(), WandbCallback(log_preds=False, log_model=False, dataset_name='LSST')]
learn = TSClassifier(X, y, splits=splits, tfms=tfms, batch_tfms=config.batch_tfms, arch=config.arch,
arch_config=config.arch_config, metrics=accuracy, cbs=cbs)
learn.fit_one_cycle(config.n_epoch, config.lr)
# -
# # Visualize results 🕸
# You will be able to see your experiment results in the W&B website.
#
# The links are displayed with the run details like this:
#
# ```
# Project page: https://wandb.ai/timeseriesai/LSST_v01
# Run page: https://wandb.ai/timeseriesai/LSST_v01/runs/34lacjyd
# ```
# # How to test multiple values: use loops? 🌀
# We can even run loops. In this case we'll test if adding a convolution/s with different kernel sizes improves performance. You'll be able to check progress in the W&B website during the test. That's why are remove the ShowGraph callback.
for ks in [None, 1, 3, 5, 7, [1, 3, 5, 7]]:
config = AttrDict (
batch_tfms = TSStandardize(by_sample=True),
arch = TSiTPlus,
arch_config = dict(ks=ks),
lr = 1e-3,
n_epoch = 10,
)
with wandb.init(project="LSST_v01", config=config, group='kss'):
X, y, splits = get_UCR_data('LSST', split_data=False)
tfms = [None, TSClassification()]
cbs = [ShowGraph(), WandbCallback(log_preds=False, log_model=False, dataset_name='LSST')]
learn = TSClassifier(X, y, splits=splits, tfms=tfms, batch_tfms=config.batch_tfms, arch=config.arch,
arch_config=config.arch_config, metrics=accuracy, cbs=cbs)
learn.fit_one_cycle(config.n_epoch, config.lr)
# In this case we've learned that setting a ks parameter significantly improves performance. That's a great finding!
# # Conclusion ✅
# `wandb` is a great tool that allows you to easily track your experiments.
#
# It's super-flexible. It allows you to track your dataset, data preprocessing, data transforms, architecutres, architecture configurations, training loop, etc.
#
# And you can group runs in projects to easily compare them.
#
# Here's all the code you need to start running experiments with W&B and `tsai`:
#
# ```
# from tsai.all import *
# from fastai.callback.wandb import *
# import wandb
#
# config = AttrDict (
# batch_tfms = TSStandardize(by_sample=True),
# arch = TSiTPlus,
# arch_config = {},
# lr = 1e-3,
# n_epoch = 10,
# )
#
# with wandb.init(project="LSST_v01", config=config, name='baseline'):
# X, y, splits = get_UCR_data('LSST', split_data=False)
# tfms = [None, TSClassification()]
# cbs = [ShowGraph(), WandbCallback(log_preds=False, log_model=False, dataset_name='LSST')]
# learn = TSClassifier(X, y, splits=splits, tfms=tfms, batch_tfms=config.batch_tfms, arch=config.arch,
# arch_config=config.arch_config, metrics=accuracy, cbs=cbs)
# learn.fit_one_cycle(config.n_epoch, config.lr)
# ```
#
# We've seen here just a small amount of everything W&B has to offer. I hope you'll start benefiting from it!
| tutorial_nbs/12_Experiment_tracking_with_W&B.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Simple Example
#
# [Motivation](./index.ipynb#Simple-Example)
import ipywidgets as W, traitlets as T
from wxyz.dvcs import Git, HeadPicker, Committer
import importnb
with importnb.Notebook():
import wxyz.notebooks.Design.DVCS.Tools as tools
# Make a repository (it will go in a directory called `_foo`)
repo = Git("_design_repo_")
# Make some objective widgets to track.
text = W.Textarea(description="text")
number = W.FloatSlider(description="number")
# Use some ready-made widget templates for version control activities.
committer = Committer(repo=repo)
head = HeadPicker(repo=repo, dom_classes="widget-vbox")
restore = tools.make_commit_switcher(repo)
# Connect widget traits to on-disk representations. The serialization will be inferred from `path`.
repo.track(
tracked_widget=text,
tracked_traits=["value"],
path="text.yaml",
)
repo.track(
tracked_widget=number,
tracked_traits=["value"],
path="number.yaml",
)
# Show the widget!
W.VBox([W.HBox([number, text]), committer, W.HBox([head, restore])])
| src/wxyz_notebooks/src/wxyz/notebooks/Design/DVCS/Simple Example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + tags=["parameters"]
# papermill parameters
output_folder = '../output/'
# -
import requests
import zipfile
import geopandas
# +
# Download Municipalities Dataset
dataset_folder = '../datasets'
url = 'https://media.githubusercontent.com/media/altcoder/philippines-psgc-shapefiles/master/datasets/SHAPEFILES/MunicipalitiesMin.zip'
r = requests.get(url)
shp_zip = '{}/MunicipalitiesMin.zip'.format(dataset_folder)
with open(shp_zip, 'wb') as f:
f.write(r.content)
with zipfile.ZipFile(shp_zip, 'r') as f:
f.extractall(dataset_folder)
# -
# Read shapefile
municities = geopandas.read_file('{}/MunicipalitiesMin.shp'.format(dataset_folder))
municities = municities[['ADM_ID','ADM1_PCODE', 'ADM1_EN', 'ADM2_PCODE', 'ADM2_EN', 'ADM3_PCODE', 'ADM3_EN']]
municities = municities.drop_duplicates() # Entry is per geometry so make sure to dedup
municities
| notebooks/philippines_psgc_shapefiles.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/"} id="xgFQ3iF3F5x-" executionInfo={"status": "ok", "timestamp": 1635696238491, "user_tz": -330, "elapsed": 1968, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "13037694610922482904"}} outputId="b5354a23-fd8a-4f18-e853-2acb36de9379"
# !git clone https://github.com/hoyeoplee/MeLU.git
# + colab={"base_uri": "https://localhost:8080/"} id="LMHcqcJnSA_Y" executionInfo={"status": "ok", "timestamp": 1635699535138, "user_tz": -330, "elapsed": 1022, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "13037694610922482904"}} outputId="0949a542-346f-4236-908a-f0c2d49c61c8"
# %cd MeLU
# + [markdown] id="Q1XFYBBgF8iv"
# ## Preparing dataset
# + id="28aqQZT3SrKq"
import os
from data_generation import generate
# + id="dlgHj1TNSnz1"
master_path= "./ml"
# + colab={"base_uri": "https://localhost:8080/"} id="GxWV9w_MGAMs" executionInfo={"status": "ok", "timestamp": 1635696737504, "user_tz": -330, "elapsed": 477065, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "13037694610922482904"}} outputId="40a4950b-bbc6-4e82-dedb-c0adda85194b"
if not os.path.exists("{}/".format(master_path)):
os.mkdir("{}/".format(master_path))
generate(master_path)
# + [markdown] id="QX7T2hB7GDGs"
# ## Training a model
#
# Our model needs support and query sets. The support set is for local update, and the query set is for global update.
# + id="qJZYKG-aR6Sh"
import torch
import pickle
from MeLU import MeLU
from options import config
from model_training import training
# + colab={"base_uri": "https://localhost:8080/"} id="xtCnvZ14R7JP" executionInfo={"status": "ok", "timestamp": 1635699539016, "user_tz": -330, "elapsed": 14, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "13037694610922482904"}} outputId="c7fec823-73e8-4588-de1b-fb4d87d1ef70"
config
# + id="x3wtFGO2R8RT"
config['use_cuda'] = False
config['num_epoch'] = 1
config['embedding_dim'] = 8
config['first_fc_hidden_dim'] = 16
# + colab={"base_uri": "https://localhost:8080/", "height": 442} id="ePCLKoL_GIwn" executionInfo={"status": "error", "timestamp": 1635699763819, "user_tz": -330, "elapsed": 186033, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "13037694610922482904"}} outputId="f357be3c-e0fa-4a13-9b4b-4dd7acdc2947"
melu = MeLU(config)
model_filename = "{}/models.pkl".format(master_path)
if not os.path.exists(model_filename):
# Load training dataset.
training_set_size = int(len(os.listdir("{}/warm_state".format(master_path))) / 4)
supp_xs_s = []
supp_ys_s = []
query_xs_s = []
query_ys_s = []
for idx in range(training_set_size):
supp_xs_s.append(pickle.load(open("{}/warm_state/supp_x_{}.pkl".format(master_path, idx), "rb")))
supp_ys_s.append(pickle.load(open("{}/warm_state/supp_y_{}.pkl".format(master_path, idx), "rb")))
query_xs_s.append(pickle.load(open("{}/warm_state/query_x_{}.pkl".format(master_path, idx), "rb")))
query_ys_s.append(pickle.load(open("{}/warm_state/query_y_{}.pkl".format(master_path, idx), "rb")))
total_dataset = list(zip(supp_xs_s, supp_ys_s, query_xs_s, query_ys_s))
del(supp_xs_s, supp_ys_s, query_xs_s, query_ys_s)
training(melu, total_dataset, batch_size=config['batch_size'], num_epoch=config['num_epoch'], model_save=True, model_filename=model_filename)
else:
trained_state_dict = torch.load(model_filename)
melu.load_state_dict(trained_state_dict)
# + id="-BgtY3yIRBIp"
# !apt-get install tree
# + id="dNKhNvdxRt-K" colab={"base_uri": "https://localhost:8080/"} executionInfo={"status": "ok", "timestamp": 1635699856717, "user_tz": -330, "elapsed": 539, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a/default-user=s64", "userId": "13037694610922482904"}} outputId="f5e0e912-7eb5-488a-d94e-3bcd615f01f2"
# !tree --du -h -C -L 2 .
# + [markdown] id="vfLfolS6GOwg"
# ## Extracting evidence candidates
#
# We extract evidence candidate list based on the MeLU.
# + id="467NiQOLGQJG"
from evidence_candidate import selection
evidence_candidate_list = selection(melu, master_path, config['num_candidate'])
for movie, score in evidence_candidate_list:
print(movie, score)
| _docs/nbs/T871238-MeLU-on-ML-1m.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] colab_type="text" id="view-in-github"
# <a href="https://colab.research.google.com/github/yhatpub/yhatpub/blob/main/notebooks/fastai/lesson6_multicat.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="brjCYxrw34vF"
# # Fastai Lesson 6 Multicat on YHat.pub
#
# This notebook picks up from [Fastai Fastbook 6 multicat](https://github.com/fastai/fastbook/blob/master/06_multicat.ipynb) X to [YHat.pub](https://yhat.pub)
#
# To save your model, you'll need to save just the weights and balances of the model, the `pth` file for your learner. A really nice and easy to follow tutorial on `pth` files is here [inference-with-fastai](https://benjaminwarner.dev/2021/10/01/inference-with-fastai)
#
# This is because `load_learner` from lesson 6 relies on the serialized `get_x` `get_y` methods, which when unserialzied, need to be on the `__main__` module. If that doesn't make sense, don't worry about it. Just follow the steps below and you'll be fine.
#
#
# On your lesson 6 notebook, after fine tune your learner, do the following to save and download your `pth` file, and labels.
# ```
# learn.save('lesson_6_multi_saved_model', with_opt=False)
# from google.colab import files
# files.download('models/lesson_6_multi_saved_model.pth')
# ```
#
# And do the following to save and download your labels into a file.
# ```
# df = pd.DataFrame(dls.vocab)
# df.to_csv('lesson_6_multi_saved_labels.csv', index=False, header=False)
# files.download('lesson_6_multi_saved_labels.csv')
# ```
# + [markdown] id="lGehttOd6g-I"
# ### Installs
# The following cell installs pytorch, fastai and yhat_params, which is used to decorate your `predict` function.
# + id="7eckIQIo39vS" outputId="74452b43-45a3-4f80-96fb-063aecae8ddd" colab={"base_uri": "https://localhost:8080/"}
# !pip install -q --upgrade --no-cache-dir fastai
# !pip install -q --no-cache-dir git+https://github.com/yhatpub/yhat_params.git@main
# + [markdown] id="RV2jxp1b6o4H"
# Add the following since matplotlib needs to know where to write it's temp files
# + id="Yf4aE1466p2g"
import os
import tempfile
os.environ["MPLCONFIGDIR"] = tempfile.gettempdir()
# + [markdown] id="vXbb-xMt4F10"
# ### Imports
# **Warning** don't place `pip installs` and `imports` in the same cell. The imports might not work correctly if done that way.
# + id="BEou7Z444C0K"
from fastai.vision.all import *
from yhat_params.yhat_tools import FieldType, inference_predict
# + [markdown] id="m8T-fAxr4HCO"
# ### Download Model
# Google drive does not allow direct downloads for files over 100MB, so you'll need to follow the snippet below to get the download url.
# + id="JdBGoC__4GXd" outputId="50fceb3c-df37-4070-a572-d5c38a879ee2" colab={"base_uri": "https://localhost:8080/"}
#cleanup from previous download
# !rm uc*
#file copied from google drive
google_drive_url = "https://drive.google.com/file/d/1lzqJyV1bf7RE3C2Ix_sFDr0bFAM62QJt/view?usp=sharing"
import os
os.environ['GOOGLE_FILE_ID'] = google_drive_url.split('/')[5]
os.environ['GDRIVE_URL'] = f'https://docs.google.com/uc?export=download&id={os.environ["GOOGLE_FILE_ID"]}'
# !echo "This is the Google drive download url $GDRIVE_URL"
# + [markdown] id="q5zE1GY04J10"
# `wget` it from google drive. This script places the model in a `model` folder
# + id="DmoHbybY4JTy"
# !wget -q --no-check-certificate $GDRIVE_URL -r -A 'uc*' -e robots=off -nd
# !mkdir -p models
# !mv $(ls -S uc* | head -1) ./models/export.pth
# + [markdown] id="P_l3UuJJ-OZF"
# Now let's do the same for the labels csv
# + id="ILKW4anz9mho" outputId="f63de61a-d66f-4452-c0a4-8d37c9d7ab7f" colab={"base_uri": "https://localhost:8080/"}
#cleanup from previous download
# !rm uc*
#file copied from google drive
google_drive_url = "https://drive.google.com/file/d/1p6gRb0v8jaBiDSGRKsYPpdnEi4IaJrcw/view?usp=sharing"
import os
os.environ['GOOGLE_FILE_ID'] = google_drive_url.split('/')[5]
os.environ['GDRIVE_URL'] = f'https://docs.google.com/uc?export=download&id={os.environ["GOOGLE_FILE_ID"]}'
# !echo "This is the Google drive download url $GDRIVE_URL"
# + id="EBB1cCqC9mpX"
# !wget -q --no-check-certificate $GDRIVE_URL -r -A 'uc*' -e robots=off -nd
# !mkdir -p models
# !mv $(ls -S uc* | head -1) ./models/vocab.csv
# + [markdown] id="uJO6xPA14OJY"
# verify the model exists. **Warning** YHat is pretty finicky about where you place your models. Make sure you create a `model` directory and download your model(s) there
# + id="CgUQvJsDgbFD" outputId="eee793bc-1e91-40f9-8d0a-ae82b27bf4c7" colab={"base_uri": "https://localhost:8080/"}
# !ls -l models
# + [markdown] id="pGcPiGP3hZqp"
# ### Recreate dataloader and learner
#
# Let's start by creating a dummy image as well as set up our labels for multicategory classification. These are going to be used for our dataloader. One thing to note, our labels are an array of numpy arrays, since we can have multiple classifications for our prediction.
# + id="tJNuCutShnbl"
from PIL import Image
import os
if not os.path.exists('data'):
os.mkdir('data')
img = Image.new('RGB', (1, 1))
img.save('data/dummyimage.jpg')
with open("models/vocab.csv") as f:
lines = f.read().rstrip()
labels = lines.split('\n')
labels = [np.array([label]) for label in labels]
# + [markdown] id="O2z3yntdh315"
# And now, we can make a lightweight `DataBlock`, passing in the single image and labels. We are multiplying the arrays to oversample the dataloader, to ensure the dataloader sees all the possible classes.
# + id="KlOcLmUmgiLA" outputId="006fc7bb-4aa1-4ff4-bad7-0808d9c220ca" colab={"base_uri": "https://localhost:8080/"}
dblock = DataBlock(blocks=(ImageBlock, MultiCategoryBlock),
get_x=ColReader('images'),
get_y=ColReader('labels'),
item_tfms=Resize(192),
batch_tfms=Normalize.from_stats(*imagenet_stats))
df = pd.DataFrame(
{
'images': [
'data/dummyimage.jpg',
]*100,
'labels': labels*5,
'valid': [True] *100
},
)
dls = dblock.dataloaders(df, bs=64, num_workers=1)
learn_inf = cnn_learner(dls, resnet50, metrics=partial(accuracy_multi, thresh=0.2), pretrained=False)
learn_inf.load('export')
learn_inf.model.eval();
# + [markdown] id="kPKXMFUA4PI8"
# ### Load your learner
# The following is the equivalent of torch `torch.load` or ts `model.load_weights`
# + [markdown] id="sZ8WiWaf4Sz2"
# And write your predict function. Note, you will need to decorate your function with <a href="https://github.com/yhatpub/yhat_params">inference_predict</a> which takes 2 parameters, a `dic` for input and output.
#
# **Info** These parameters are how YHat.pub maps your predict functions input/output of the web interface. The `dic` key is how you access the variable and the value is it's type. You can use autocomplete to see all the input/output types and more documentation on `inference_predict` is available at the link.
# + id="Rm0ji7TG4TLD"
input = {"image": FieldType.PIL}
output = {"text": FieldType.Text}
@inference_predict(input=input, output=output)
def predict(params):
img = PILImage.create(np.array(params["image"].convert("RGB")))
result = learn_inf.predict(img)
return {"text": str(result[0])}
# + [markdown] id="7tjb1Hac4V1L"
# ### Test
# First, import `in_colab` since you only want to run this test in colab. YHat will use this colab in a callable API, so you don't want your test to run every time `predict` is called. Next, import `inference_test` which is a function to make sure your `predict` will run with YHat.
#
# Now, inside a `in_colab` boolean, first get whatever test data you'll need, in this case, an image. Then you'll call your predict function, wrapped inside `inference_test`, passing in the same params you defined above. If something is missing, you should see an informative error. Otherwise, you'll see something like
# `Please take a look and verify the results`
# + id="XhNKiYae4WL1" outputId="14d81d18-6219-4c85-86c5-0b9a9a384c1c" colab={"base_uri": "https://localhost:8080/", "height": 102}
from yhat_params.yhat_tools import in_colab, inference_test
if in_colab():
import urllib.request
from PIL import Image
urllib.request.urlretrieve("https://s3.amazonaws.com/cdn-origin-etr.akc.org/wp-content/uploads/2017/11/11234019/Bulldog-standing-in-the-grass.jpg", "input_image.jpg")
img = Image.open("input_image.jpg")
inference_test(predict_func=predict, params={'image': img})
# + [markdown] id="lr0wnsJc4XJA"
# ### That's it
#
# If you run into errors, feel free to hop into Discord.
#
# Otherwise, you'll now want to clear your outputs and save a public repo on Github
| notebooks/fastai/lesson6_multicat.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernel_info:
# name: python3
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # WeatherPy
# ----
#
# ### Analysis
# * As expected, the weather becomes significantly warmer as one approaches the equator (0 Deg. Latitude). More interestingly, however, is the fact that the southern hemisphere tends to be warmer than the northern hemisphere. This may be due to the tilt of the earth.
# * There is no strong correlation between humidity nor cloudiness to latitude. However,the northern sphere is a little more humid than the southern sphere at the time of analysis in March. At certain lattitude, the level of cloudiness are pretty spread similarly.
# * There is no strong relationship between latitude and wind speed. However, in northern hemispheres there are a few more cities with higher wind speed compared to southern hemisphere.
#
# ---
#
# +
# Dependencies and Setup
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import requests
import time
import urllib
# Import API key
from confg import api_key
# Incorporated citipy to determine city based on latitude and longitude
from citipy import citipy
# Output File (CSV)
output_data_file = "output_data/cities.csv"
# Range of latitudes and longitudes
lat_range = (-90, 90)
lng_range = (-180, 180)
# -
# ## Generate Cities List
# +
# List for holding lat_lngs and cities
lat_lngs = []
cities = []
# Create a set of random lat and lng combinations
lats = np.random.uniform(low=-90.000, high=90.000, size=1500)
lngs = np.random.uniform(low=-180.000, high=180.000, size=1500)
lat_lngs = zip(lats, lngs)
# Identify nearest city for each lat, lng combination
for lat_lng in lat_lngs:
city = citipy.nearest_city(lat_lng[0], lat_lng[1]).city_name
# If the city is unique, then add it to a our cities list
if city not in cities:
cities.append(city)
# Print the city count to confirm sufficient count
len(cities)
# -
# ### Perform API Calls
# * Perform a weather check on each city using a series of successive API calls.
# * Include a print log of each city as it'sbeing processed (with the city number and city name).
#
# +
#url for openweathermap.org for the API call
url ="http://api.openweathermap.org/data/2.5/weather?units=Imperial&APPID=" + api_key
# create empty list to hold city information
city_data = []
# starting
print("Beginning Data Retrieval")
print("------------------------")
city_count = 1
# Loop through all cities
for city in cities:
city_url = url + "&q="+city
# Run an API request for each of the cities and check for missing data
try:
# Parsing with json to retrieve data
city_weather = requests.get(city_url).json()
city_lat = city_weather["coord"]["lat"]
city_lng = city_weather["coord"]["lon"]
city_max_temp = city_weather["main"]["temp_max"]
city_humidity = city_weather["main"]["humidity"]
city_clouds = city_weather["clouds"]["all"]
city_wind = city_weather["wind"]["speed"]
city_country = city_weather["sys"]["country"]
city_date = city_weather["dt"]
# Append to city_data list
city_data.append({"City": city,
"Lat": city_lat,
"Lng": city_lng,
"Max Temp": city_max_temp,
"Humidity": city_humidity,
"Cloudiness": city_clouds,
"Wind Speed": city_wind,
"Country": city_country,
"Date": city_date})
#print (city_count, city)
print(f"Processing Record {city_count}|{city}")
print(city_url)
# Adding to count
city_count += 1
# If error, skip to next city
except:
print("City not found. Skipping... ")
continue
print("--------------------")
print("Data Ended")
# -
# ### Convert Raw Data to DataFrame
# * Export the city data into a .csv.
# * Display the DataFrame
# +
# Create Pandas DataFrame
city_data_pd = pd.DataFrame(city_data)
lats = city_data_pd["Lat"]
max_temps = city_data_pd["Max Temp"]
humidity = city_data_pd["Humidity"]
cloudiness = city_data_pd["Cloudiness"]
wind_speed = city_data_pd["Wind Speed"]
# Export City_Data into a csv file
city_data_pd.to_csv(output_data_file, index_label="City_ID")
city_data_pd.count()
# -
city_data_pd.head()
# ### Plotting the Data
# * Use proper labeling of the plots using plot titles (including date of analysis) and axes labels.
# * Save the plotted figures as .pngs.
# #### Latitude vs. Temperature Plot
# +
# Scatter plot for latitude vs temperature
plt.scatter(lats,
max_temps,
edgecolor="black", linewidths=1, marker="o",
alpha=0.8, label="Cities")
plt.title("City Latitude vs. Max Temperature (03/26/19)")
plt.ylabel("Max Temperature (F)")
plt.xlabel("Latitude")
plt.grid(True)
# Save the figure
plt.savefig("output_data/plot1.png")
plt.show()
# -
# #### Latitude vs. Humidity Plot
# +
# Scatter plot for latitude vs Humidity
plt.scatter(lats,
humidity,
edgecolor="black", linewidths=1, marker="o",
alpha=0.8, label="Cities")
plt.title("City Latitude vs. Humidity (03/26/19)")
plt.ylabel("Humidity (%)")
plt.xlabel("Latitude")
plt.grid(True)
# Save the figure
plt.savefig("output_data/plot2.png")
plt.show()
# -
# #### Latitude vs. Cloudiness Plot
# +
# Scatter plots for Latitude vs Cloudiness
plt.scatter(lats,
cloudiness,
edgecolor="black", linewidths=1, marker="o",
alpha=0.8, label="Cities")
plt.title("City Latitude vs. Cloudiness (03/26/19)")
plt.ylabel("Cloudiness (%)")
plt.xlabel("Latitude")
plt.grid(True)
plt.savefig("output_data/plot3.png")
# Show plot
plt.show()
# -
# #### Latitude vs. Wind Speed Plot
# +
# Scatter plots for Latitude vs Windspeed
plt.scatter(lats,
wind_speed,
edgecolor="black", linewidths=1, marker="o",
alpha=0.8, label="Cities")
plt.title("City Latitude vs. Wind Speed (03/26/19)")
plt.ylabel("Wind Speed (mph)")
plt.xlabel("Latitude")
plt.grid(True)
# Save the figure
plt.savefig("output_data/plot4.png")
plt.show()
# -
| WeatherPy2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
#import re
import vocabulary
#from .vocabulary import FREQ, DUR
def music_str_parser(semantic):
# finds string associated with symb
found_str = re.compile(r'((note|gracenote|rest|multirest)(\-)(\S)*)'
).findall(semantic)
music_str = [i[0] for i in found_str]
# finds the note's alphabets
fnd_notes = [re.compile(r'(([A-G](b|#)?[1-6])|rest)'
).findall(note) for note in music_str]
# stores the note's alphabets
notes = [m[0][0] for m in fnd_notes]
found_durs = [re.compile(r'((\_|\-)([a-z]|[0-9])+(\S)*)+'
).findall(note) for note in music_str]
#split by '_' every other string in list found in tuple of lists
durs = [i[0][0][1:].split('_') for i in found_durs]
return notes, durs
# +
#FERMATA
# -
def dur_evaluator(durations):
note_dur_computed = []
for dur in durations:
# if dur_len in DUR dict, get. Else None
dur_len = [DUR.get(i.replace('.','').replace('.',''),
None) for i in dur]
# filter/remove None values, and sum list
dur_len_actual = sum(list(filter(lambda a: a !=None,
dur_len)))
# actual duration * 4 = quadruple
if 'quadruple' in dur:
dur_len_actual = dur_len_actual * 4
# actual duration * 2 = fermata
elif 'fermata' in dur:
dur_len_actual = dur_len_actual * 2
# actual duration + 1/2 of duration = .
elif '.' in ''.join(dur):
dur_len_actual = dur_len_actual + (dur_len_actual * 1/2)
elif '..' in ''.join(dur):
dur_len_actual = dur_len_actual +(2 *(dur_len_actual * 1/2))
# if no special duration string
elif dur[0].isnumeric():
dur_len_actual = float(dur[0]) * .5
note_dur_computed.append(dur_len_actual)
return note_dur_computed
# +
def get_music_note(semantic):
notes, durations = music_str_parser(semantic)
sample_rate = 44100
timestep = []
T = dur_evaluator(durations)
for i in T:
# gets timestep for each sample
timestep.append(np.linspace(0, i, int(i * sample_rate),
False))
def get_freq(notes):
# get pitchs frequency from dict
pitch_freq = [FREQ[i] for i in notes]
return pitch_freq
return timestep, get_freq(notes)
def get_sinewave_audio(semantic):
audio = []
timestep, freq = get_music_note(semantic)
for i in range(len(freq)):
# calculates the sinewave
audio.append(np.sin(
freq[i] * timestep[i] * 2 * np.pi))
return audio
# -
| midi/.ipynb_checkpoints/player-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.7 (base)
# language: python
# name: python3.7-base
# ---
# Given a list of unique random intergers:
# - create a heap.
# - sort the list using a heap.
# ### PREQUISITES
# +
import random
def make(n):
nums = [i for i in range(n)]
for i in range(n):
rnd = random.randint(0, n - 1)
nums[i], nums[rnd] = nums[rnd], nums[i]
return nums
# -
# ### ALGORITHM
# +
def heapify(nums, n, idx):
max_idx = idx
l_idx = idx * 2 + 1
r_idx = l_idx + 1
if l_idx < n and nums[l_idx] > nums[max_idx]:
max_idx = l_idx
if r_idx < n and nums[r_idx] > nums[max_idx]:
max_idx = r_idx
if max_idx != idx:
nums[idx], nums[max_idx] = nums[max_idx], nums[idx]
heapify(nums, n, max_idx)
def heapSort(nums):
for i in range(len(nums) // 2 - 1, -1, -1):
heapify(nums, len(nums), i)
for i in range(len(nums) - 1, -1, -1):
nums[0], nums[i] = nums[i], nums[0]
heapify(nums, i, 0)
# -
# ### TEST
nums = make(26)
heapSort(nums)
print(nums)
| heapsort.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Application: A Face Detection Pipeline
# %matplotlib inline
import matplotlib.pyplot as plt
import seaborn as sns; sns.set()
import numpy as np
# +
from skimage import data, color, feature
import skimage.data
image = color.rgb2gray(data.chelsea())
hog_vec, hog_vis = feature.hog(image, visualise=True)
fig, ax = plt.subplots(1, 2, figsize=(12, 6),
subplot_kw=dict(xticks=[], yticks=[]))
ax[0].imshow(image, cmap='gray')
ax[0].set_title('input image')
ax[1].imshow(hog_vis)
ax[1].set_title('visualization of HOG features');
# -
from sklearn.datasets import fetch_lfw_people
faces = fetch_lfw_people()
positive_patches = faces.images
positive_patches.shape
# +
from skimage import data, transform
imgs_to_use = ['camera', 'text', 'coins', 'moon',
'page', 'clock', 'immunohistochemistry',
'chelsea', 'coffee', 'hubble_deep_field']
images = [color.rgb2gray(getattr(data, name)())
for name in imgs_to_use]
# +
from sklearn.feature_extraction.image import PatchExtractor
def extract_patches(img, N, scale=1.0, patch_size=positive_patches[0].shape):
extracted_patch_size = tuple((scale * np.array(patch_size)).astype(int))
extractor = PatchExtractor(patch_size=extracted_patch_size,
max_patches=N, random_state=0)
patches = extractor.transform(img[np.newaxis])
if scale != 1:
patches = np.array([transform.resize(patch, patch_size)
for patch in patches])
return patches
negative_patches = np.vstack([extract_patches(im, 1000, scale)
for im in images for scale in [0.5, 1.0, 2.0]])
negative_patches.shape
# -
fig, ax = plt.subplots(6, 10)
for i, axi in enumerate(ax.flat):
axi.imshow(negative_patches[500 * i], cmap='gray')
axi.axis('off')
from itertools import chain
X_train = np.array([feature.hog(im)
for im in chain(positive_patches,
negative_patches)])
y_train = np.zeros(X_train.shape[0])
y_train[:positive_patches.shape[0]] = 1
X_train.shape
# +
from sklearn.naive_bayes import GaussianNB
from sklearn.cross_validation import cross_val_score
cross_val_score(GaussianNB(), X_train, y_train)
# -
from sklearn.svm import LinearSVC
from sklearn.grid_search import GridSearchCV
grid = GridSearchCV(LinearSVC(), {'C': [1.0, 2.0, 4.0, 8.0]})
grid.fit(X_train, y_train)
grid.best_score_
grid.best_params_
model = grid.best_estimator_
model.fit(X_train, y_train)
# +
test_image = skimage.data.astronaut()
test_image = skimage.color.rgb2gray(test_image)
test_image = skimage.transform.rescale(test_image, 0.5)
test_image = test_image[:160, 40:180]
plt.imshow(test_image, cmap='gray')
plt.axis('off');
# +
def sliding_window(img, patch_size=positive_patches[0].shape,
istep=2, jstep=2, scale=1.0):
Ni, Nj = (int(scale * s) for s in patch_size)
for i in range(0, img.shape[0] - Ni, istep):
for j in range(0, img.shape[1] - Ni, jstep):
patch = img[i:i + Ni, j:j + Nj]
if scale != 1:
patch = transform.resize(patch, patch_size)
yield (i, j), patch
indices, patches = zip(*sliding_window(test_image))
patches_hog = np.array([feature.hog(patch) for patch in patches])
patches_hog.shape
# -
labels = model.predict(patches_hog)
labels.sum()
# +
fig, ax = plt.subplots()
ax.imshow(test_image, cmap='gray')
ax.axis('off')
Ni, Nj = positive_patches[0].shape
indices = np.array(indices)
for i, j in indices[labels == 1]:
ax.add_patch(plt.Rectangle((j, i), Nj, Ni, edgecolor='red',
alpha=0.3, lw=2, facecolor='none'))
| code_listings/05.14-Image-Features.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import matplotlib.pyplot as plt
# %matplotlib inline
import random
import numpy as np
import pandas as pd
from sklearn import datasets, svm, cross_validation, tree, preprocessing, metrics
import sklearn.ensemble as ske
import tensorflow as tf
import tensorflow.contrib.learn as skflow
titanic_df = pd.read_excel('titanic3.xls', 'titanic3', index_col=None, na_values=['NA'])
titanic_df.head()
titanic_df['survived'].mean()
titanic_df.groupby('pclass').mean()
class_sex_grouping = titanic_df.groupby(['pclass','sex']).mean()
class_sex_grouping
class_sex_grouping['survived'].plot.bar()
group_by_age = pd.cut(titanic_df["age"], np.arange(0, 90, 10))
age_grouping = titanic_df.groupby(group_by_age).mean()
age_grouping['survived'].plot.bar()
titanic_df.count()
titanic_df = titanic_df.drop(['body','cabin','boat'], axis=1)
titanic_df["home.dest"] = titanic_df["home.dest"].fillna("NA")
titanic_df = titanic_df.dropna()
titanic_df.count()
def preprocess_titanic_df(df):
processed_df = df.copy()
le = preprocessing.LabelEncoder()
processed_df.sex = le.fit_transform(processed_df.sex)
processed_df.embarked = le.fit_transform(processed_df.embarked)
processed_df = processed_df.drop(['name','ticket','home.dest'],axis=1)
return processed_df
processed_df = preprocess_titanic_df(titanic_df)
processed_df.count()
processed_df.head()
X = processed_df.drop(['survived'], axis=1).values
y = processed_df['survived'].values
X_train, X_test, y_train, y_test = cross_validation.train_test_split(X,y,test_size=0.2)
clf_dt = tree.DecisionTreeClassifier(max_depth=10)
clf_dt.fit (X_train, y_train)
clf_dt.score (X_test, y_test)
shuffle_validator = cross_validation.ShuffleSplit(len(X), n_iter=20, test_size=0.2, random_state=0)
def test_classifier(clf):
scores = cross_validation.cross_val_score(clf, X, y, cv=shuffle_validator)
print("Accuracy: %0.4f (+/- %0.2f)" % (scores.mean(), scores.std()))
test_classifier(clf_dt)
clf_rf = ske.RandomForestClassifier(n_estimators=50)
test_classifier(clf_rf)
clf_gb = ske.GradientBoostingClassifier(n_estimators=50)
test_classifier(clf_gb)
| titanic/Titanic.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
# Create a validation set of 20 images randomly selected from the holdout set
import ujson, gzip, random
SATURATION_THRESHOLD = 0.1
dataset_name = "imagenet_train_256"
with gzip.open('../dataset_indexes/{}_saturation_values.json.gz'.format(dataset_name), 'rt') as f:
saturation_indexes = ujson.load(f)
saturated_filepaths = [(path, value) for path, value in saturation_indexes.items() if value > SATURATION_THRESHOLD]
# Sort the images by category
category_index = {}
for (path, value) in saturated_filepaths:
category = path.split("/")[0] if dataset_name != "places_2_train_256" else path.split("/")[1]
filelist = category_index.get(category, [])
filelist.append(path)
category_index[category] = filelist
# print(category_index["n01807496"])
# save the category index
# with gzip.open('../dataset_indexes/{}_category_paths.json.gz'.format(dataset_name), 'wt') as f:
# ujson.dump(category_index, f)
# +
# Check the index
with gzip.open('../dataset_indexes/{}_category_paths.json.gz'.format(dataset_name), 'rt') as f:
category_index = ujson.load(f)
print(category_index["house"])
# +
# Check the distribution of the index
import matplotlib.pyplot as plt
# %matplotlib inline
distribution_counts = [len(items) for category,items in category_index.items()]
# print(distribution_counts[:25])
plt.hist(distribution_counts, bins='auto')
plt.title("Distribution of training data")
plt.show()
# +
# Make the training distribution more even
biggest_category = max(distribution_counts)
for category, filenames in category_index.items():
original_number_of_files = len(filenames)
while len(filenames) < biggest_category:
filenames.append(filenames[int(random.random()*original_number_of_files)])
# Check the new training set's distribution
distribution_counts = [len(items) for category,items in category_index.items()]
plt.hist(distribution_counts, bins='auto')
plt.title("Distribution of reweighted training data")
plt.show()
# -
# Save the new distribution.
with gzip.open('../dataset_indexes/{}_category_paths_reweighted.json.gz'.format(dataset_name), 'wt') as f:
ujson.dump(category_index, f)
| notebooks/Create category index.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from FUNCTIONS import *
# %matplotlib inline
import sklearn.discriminant_analysis
random.seed(42)
# +
# MAIN
data_s = np.array(pd.read_csv('data_s_180k').iloc[:, 1:])
data_b = np.array(pd.read_csv('data_b_180k').iloc[:, 1:])
# Slice & Save
events_no = int(50000)
data_s = data_s[0:events_no*40, 0:40]
data_b = data_b[0:events_no*40, 0:40]
# # Save Datasets
# pd.DataFrame(data_s).to_csv('data_s_1000')
# pd.DataFrame(data_b).to_csv('data_b_1000')
train_examples, train_labels, val_examples, val_labels, test_examples, test_labels = preprocess_ML_sklearn(data_s, data_b)
# -
naive_bayes = MultinomialNB()
random_forest = RandomForestClassifier(random_state=42, n_jobs=-1)
SVM = SVC(random_state=42)
KNN = sklearn.neighbors.KNeighborsClassifier(n_jobs=-1)
bagging = BaggingClassifier(random_state=42, n_jobs=-1)
# +
from sklearn.metrics import accuracy_score
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import make_scorer
import time
# Naive Bayes
alpha = [0, 0.001, 0.01, 0.1, 0.5, 1, 1.5, 2, 5, 10, 15, 20, 30, 40, 50, 80, 100, 150]
# Random Forest
criterion = ['gini', 'entropy']
n_estimators_rf = [10, 50, 100, 200]
max_depth_rf = [5, 20, 50, None]
min_samples_leaf_rf = [1, 2, 15, 40]
min_samples_split_rf = [2, 5, 10, 50]
# KNN
n_neighbors=[1, 3, 5, 12]
weights_knn=['uniform', 'distance']
leaf_size_knn=[10, 30, 60]
p= [1, 2]
# Bagging
n_estimators_bagging = [10, 20, 50, 100, 200]
max_samples_bagging = [0.1, 0.2, 1.0, 2.0, 3.0]
max_features_bagging = [0.5, 1.0, 2.0, 50.0]
# SVM
c_parameter = [0.1, 1, 10]
gamma = [0.001, 0.01, 0.1, 1, 5]
# Hyperparameters
naive_bayes_parameters = {'alpha': alpha}
random_forest_parameters = {'criterion': criterion, 'n_estimators': n_estimators_rf, 'max_depth': max_depth_rf, 'min_samples_leaf': min_samples_leaf_rf, 'min_samples_split': min_samples_split_rf}
KNN_parameters = {'n_neighbors': n_neighbors, 'weights': weights_knn, 'leaf_size': leaf_size_knn, 'p': p}
bagging_parameters = {'n_estimators': n_estimators_bagging, 'max_samples': max_samples_bagging, 'max_features': max_features_bagging}
SVM_parameters = {'c_parameter': c_parameter, 'gamma': gamma}
# Scoring object using accuracy
scorer = make_scorer(accuracy_score)
clfs_param =[(bagging, bagging_parameters)]
# +
# Create list to store models
models = []
unopt_accuracy = []
accuracies = []
# Perform grid search
for clf, parameter in clfs_param:
print('\n{}\n'.format(clf.__class__.__name__))
grid_obj = GridSearchCV(clf, parameter, scoring=scorer, n_jobs = -1)
# Perform grid search
start = time.time()
grid_fit = grid_obj.fit(train_examples, train_labels)
end = time.time()
print('Time to tune: {}s'.format(round(end - start), 2))
# Get best estimator
best_clf = grid_fit.best_estimator_
models.append(best_clf)
# Make predictions using the unoptimized and model
start = time.time()
predictions = (clf.fit(train_examples, train_labels)).predict(val_examples)
best_predictions = best_clf.predict(val_examples)
predictions_train = (clf.fit(train_examples, train_labels)).predict(train_examples)
best_predictions_train = best_clf.predict(train_examples)
end = time.time()
print('Time to fit-predict: {}s\n'.format(round(end - start), 2))
# Check hyperparameters
print('Unoptimised: {}\n'.format(clf.get_params(deep = True)))
print('Optimised: {}\n'.format(best_clf.get_params(deep = True)))
# Print Results
print("\nUnoptimised-accuracy-training: {:.4f}".format(accuracy_score(train_labels, predictions_train)))
print("Optimised-accuracy-training: {:.4f}".format(accuracy_score(train_labels, best_predictions_train)))
print("\nUnoptimised-accuracy-validation: {:.4f}".format(accuracy_score(val_labels, predictions)))
print("Optimised-accuracy-validation: {:.4f}".format(accuracy_score(val_labels, best_predictions)))
print('\n\n=============================================================================================')
unopt_accuracy.append(accuracy_score(val_labels, predictions))
accuracies.append(accuracy_score(val_labels, best_predictions))
print('All unoptimised accuracy (validation): {}'.format(unopt_accuracy))
print('Best unoptimised accuracy (validation): {}\n'.format(max(unopt_accuracy)))
print('All optimised accuracy (validation): {}'.format(accuracies))
print('Best optimised accuracy (validation): {}'.format(max(accuracies)))
# -
print(models)
cmx_sklearn(models, test_examples, test_labels, dim=4)
| 2Second_Elimination/2elimRAW-Baggingg.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Importing the required libraries
import numpy as np
import pandas as pd
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import Pipeline
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import BayesianRidge
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error as mse
from sklearn.metrics import mean_absolute_error as mae
import joblib
filename='1885.csv'
#reading the splitted datasets
df= pd.read_csv(filename)
df.head()
df.drop(columns=['meal_id', 'week','diff'], inplace=True)
df.head()
df['checkout_price'], df['base_price'], df['op_area']=df['checkout_price']/200, df['base_price']/200, df['op_area']/4
X= df.drop(columns=['num_orders']).values
Y= df.num_orders.values
X.reshape(-1,16)
Y.reshape(-1)
# ### Splitting data into train and validation sets
X_train, X_val, y_train, y_val= train_test_split( X, Y, test_size=0.15, random_state=101)
# ### Creating the first pipeline
pipe1= Pipeline([('poly', PolynomialFeatures(degree=1, include_bias=True)),('ext', RandomForestRegressor( random_state=101, max_depth=10))])
t=pipe1.fit(X_train, y_train)
pred1= pipe1.predict(X_val)
mse(y_val, pred1)
#evaluating the results of pipe1
print(mae(y_train,pipe1.predict(X_train)))
mae(y_val, pred1)
# ### Visualizing the performance of model
#train visual
from matplotlib import pyplot
pyplot.figure(figsize=(14, 16))
pyplot.plot(pred1[:200], color= 'red', label='predicted value')
pyplot.plot(y_val[:200], color='green', label='Actual Data')
pyplot.title("Forecasting food Sales")
pyplot.xlabel("Time")
pyplot.ylabel("Number of Orders")
pyplot.legend()
pyplot.show()
# ### Adding the predictions of pipe1 as a extra parameter for pipe2
df['y1']= pipe1.predict(X)
df.tail()
# ### Splitting the modified dataset
X= df.drop(columns=['num_orders']).values
Y= df.num_orders.values
X.reshape(-1,17)
Y.reshape(-1)
X_train1, X_val1, y_train1, y_val1 = train_test_split( X, Y, test_size=0.2, random_state=70)
# ### Creating the second pipeline
# +
pipe2= Pipeline([('poly', PolynomialFeatures(degree=3, include_bias=True)),('lin', BayesianRidge(tol= 1e-8, n_iter=300, normalize=True))])
# -
t=pipe2.fit(X_train1, y_train1)
pred2= pipe2.predict(X_val1)
mse(y_val1, pred2)
#evaluating the predictions of pipe2
print(mae(y_train1,pipe2.predict(X_train1)))
mae(y_val1, pred2)
pred1= pipe2.predict(X_val1)
print(mae(y_val1, pred1))
mae(y_train1, pipe2.predict(X_train1))
#train visual
from matplotlib import pyplot
pyplot.figure(figsize=(14, 16))
pyplot.plot(pipe2.predict(X_train1)[:200], color= 'red', label='predicted value')
pyplot.plot(y_val1[:200], color='green', label='Actual Data')
pyplot.title("Forecasting food Sales")
pyplot.xlabel("Time")
pyplot.ylabel("Number of Orders")
pyplot.legend()
pyplot.show()
| Training and Hyperparameter Tuning.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="bvxSPAKQj9gd" colab_type="code" colab={}
# %tensorflow_version 1.x
# %cd /content
# ! rm -rf gan-tools
# !git clone --single-branch --depth=1 --branch master https://github.com/hannesdm/gan-tools.git
# %cd gan-tools
from keras.datasets import mnist
import impl
from impl import *
from core import vis
from core import gan
from core import constraint
import matplotlib.pyplot as plt
plt.rcParams['image.cmap'] = 'gray'
plt.rcParams['axes.grid'] = False
# + [markdown] id="RqQjkSRHseDK" colab_type="text"
# ## Load the cifar10 data
# The CIFAR-10 dataset consists of 60000 32x32 colour images in 10 classes, with 6000 images per class. There are 50000 training images and 10000 test images. <br/>
# **Exercise** We will select a single class of this dataset to model. This can be done by setting the **model_class** variable to the corresponding class. <br/>
# One cell lower, a few images of the selected class are shown.
# + id="XbdA5WBEkf05" colab_type="code" colab={}
model_class = 1
(X_train_original, Y_train), (_, _) = cifar10.load_data()
X_train_single_class = X_train_original[np.where(np.squeeze(Y_train) == model_class)]
X_train = X_train_single_class / 127.5 - 1.
# + id="kHRNCEkA-VA6" colab_type="code" colab={}
grid = vis.image_grid(X_train_single_class[0:20], 5)
plt.imshow(grid)
# + [markdown] id="edP72fs0v6bs" colab_type="text"
# ## Train the DCGAN
# <img src="https://i.imgur.com/NFUiEf5.png" width="450"> <br/>
# The following code will train a GAN with a working DCGAN architecture. This training can be controlled by the following parameters:
#
#
# * **batches**: The number of batches the GAN should train on.
# * **batch_size**: The size of each batch.
# * **plot_interval**: After how many batches the generator should be sampled and the images shown.
#
# The default parameters may be kept. <br/>
# Make sure to train the GAN for a sufficient amount of time in order to see realistic samples. At any point, the training may be stopped by clicking on the stop button or on 'interrupt execution' in the runtime menu at the top of the page.<br/> In the same menu, the runtime type may also be changed to 'GPU'. This will speed up the training of the models. <br/>
# **Exercise** Comment on the loss and accuracy of the generator and discriminator, shown during training and discuss its stability.
#
#
#
#
# + id="yWgM6KsDmVxU" colab_type="code" colab={}
gan = cifar10_dcgan()
gan.train_random_batches(X_train, batches = 20000, batch_size=32, plot_interval = 50)
vis.show_gan_image_predictions(gan, 32)
# + [markdown] id="c_s7JNgnDNMq" colab_type="text"
# ## Stability in GANs
# Sadly, training a GAN is not always this easy. <br/>
# Stability during training is important for both discriminator and generator to learn. <br/>
# Below is a short video (50s) showing the intermediate results of a GAN being trained on mnist. The final result is a phenomenon known as 'mode collapse'. <br/>
# <img src='https://i.imgur.com/lG35xDP.gif'>
#
| assignment 4/exercise session 4/ANN notebooks/DCGAN.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Examples of Using Pymdwizard library to batch-update XML Metadata
#
# * Change all instances of an author's name
# * Update project webpage & publication date
# * Reset title to match that stored in "Citation" field
#
#
import glob
import pandas as pd
from lxml import etree
import sys
import os
from pymdwizard.core.xml_utils import XMLRecord, XMLNode #import in this manner requires adding a path file to your site-packages directory
# ** List the existing XML files you wish to edit **
# +
#folder with xml metadata
xml_fldr=r"..\_XMLMetadata"
os.chdir(xml_fldr)
#Pattern of the XML files you wish to select
xml_pattern="*RawGPR.xml"
#List RawGPR xmls
xmls=glob.glob(xml_pattern)
# -
# ** Update Project Webpage **
for fl in xmls:
metd=XMLRecord(fl)
new_website="https://staging-www.usgs.gov/climate_landuse/clu_rd/glacierstudies/data.asp"
metd.metadata.idinfo.citation.citeinfo.lworkcit.citeinfo.onlink.text=new_website
metd.save()
# ** Change Person's Name (e.g. add middle initial) **
#Edit Name for given person
replace_name="<NAME>"
new_name="<NAME>"
for fl in xmls:
metd=XMLRecord(fl)
for name in metd.metadata.idinfo.citation.citeinfo.origin:
if name.text==replace_name: #replace old name with new name
name.text=new_name
metd.save()
# ** Update Publication Date **
pubdate="201707" #set date as you wish it to appear in publication node
for fl in xmls:
metd=XMLRecord(fl)
metd.metadata.idinfo.citation.citeinfo.pubdate=pubdate
metd.save()
# ** Change List of Authors in Larger Work Citation **
# Author list can be either read in from an XMl fragment:
all_author_path=r"../people.xml"
all_author=XMLNode(open(all_author_path, 'r').read()) #Must read in text file as string to convert to node
# Or, created from text entered directly in the notebook:
all_author=XMLNode(r'''
<people>
<origin><NAME> (ORCID 0000-0002-9185-0144)</origin>
<origin><NAME> (ORCID 0000-0002-9462-6842)</origin>
<origin><NAME></origin>
<origin><NAME></origin>
<origin><NAME> (ORCID 0000-0003-4677-029X)</origin>
<origin><NAME> (ORCID 0000-0003-4170-0428)</origin>
<origin><NAME> (OrcID 0000-0002-0938-3496)</origin>
<origin><NAME> (ORCID 0000-0001-7665-7795)</origin>
<origin><NAME></origin>
<origin><NAME></origin>
<origin><NAME></origin>
<origin><NAME></origin>
<origin><NAME></origin>
</people>)''')
# Then, this list of authors can be added to each record, after initial contents is deleted
for fl in xmls:
metd=XMLRecord(fl) #read xml
metd.metadata.idinfo.citation.citeinfo.lworkcit.citeinfo.clear_children('origin') #Clear contents of current author list
for author in all_author.people.origin: #add authors from xml list back in to the larger work cited 'origin' tag
metd.metadata.idinfo.citation.citeinfo.lworkcit.citeinfo.add_child(author)
metd.save()
# ** Reset title of dataset to match that in the Citation section**
#
# The two should match, but this may not always be the case (e.g. date ranges)
for fl in xmls:
metd=XMLRecord(fl)
#this will need to be re-written depending on how it is possible to split your desired "Title" from the larger citation.
title_from_citation=metd.metadata.idinfo.citation.citeinfo.othercit.text.split("2017, ")[1].split(".")[0]
#the above line splits "Title" from the citation, in format of "Author. et. al, 2017, Title."
metd.metadata.idinfo.citation.citeinfo.title.text=title_from_citation
metd.save()
| examples/BatchUpdateAuthorsDatesEtc.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + pycharm={"name": "#%% imports\n"}
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
# %matplotlib inline
# + pycharm={"name": "#%% load mnist\n"}
mnist = tf.keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
# + pycharm={"name": "#%% model definition\n"}
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(10)
])
# + pycharm={"name": "#%% setup model\n"}
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
model.compile(optimizer='adam',
loss=loss_fn,
metrics=['accuracy'])
# + pycharm={"name": "#%% fit model\n"}
model.fit(x_train, y_train, epochs=5)
# + pycharm={"name": "#%% evaluate model\n"}
model.evaluate(x_test, y_test, verbose=2)
probability_model = tf.keras.Sequential([
model,
tf.keras.layers.Softmax()
])
# + pycharm={"name": "#%% plot a sample and model prediction\n"}
image = x_test[70]
fig = plt.figure()
plt.imshow(image, cmap='gray')
plt.show()
image = np.expand_dims(image,0)
print("Model predicted " + str(np.argmax(probability_model.predict(image))))
#print(probability_model.predict(image))
# + pycharm={"name": "#%%\n"}
print(y_train[70])
# -
| spring2021/6-tensorflow/mnist.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Part I. ETL Pipeline for Pre-Processing the Files
# ## PLEASE RUN THE FOLLOWING CODE FOR PRE-PROCESSING THE FILES
# #### Import Python packages
# Import Python packages
import pandas as pd
import cassandra
import re
import os
import glob
import numpy as np
import json
import csv
# #### Creating list of filepaths to process original event csv data files
# +
# Checking current working directory
print(os.getcwd())
# Get your current folder and subfolder event data
filepath = os.getcwd() + '/event_data'
# Get the file paths with the subdirectories using glob
file_path_list = glob.glob(os.path.join(filepath,'*.csv'))
#print(file_path_list)
# -
# #### Processing the files to create the data file csv that will be used for Apache Casssandra tables
# +
# Initiating an empty list of rows
full_data_rows_list = []
# For every filepath in the file path list
for f in file_path_list:
# Read csv file
with open(f, 'r', encoding = 'utf8', newline='') as csvfile:
# Create a CSV reader object
csvreader = csv.reader(csvfile)
next(csvreader)
# Extract each data row one by one and append it
for line in csvreader:
#print(line)
full_data_rows_list.append(line)
# Uncomment the code below to print the total number of rows
print(len(full_data_rows_list))
# Uncomment the code below to print the contents of event data rows
#print(full_data_rows_list)
# Creating a smaller event data CSV file called event_datafile_full csv that will be used to insert data into the
# Apache Cassandra tables
csv.register_dialect('myDialect', quoting=csv.QUOTE_ALL, skipinitialspace=True)
with open('event_datafile_new.csv', 'w', encoding = 'utf8', newline='') as f:
writer = csv.writer(f, dialect='myDialect')
writer.writerow(['artist','firstName','gender','itemInSession','lastName','length',\
'level','location','sessionId','song','userId'])
for row in full_data_rows_list:
if (row[0] == ''):
continue
writer.writerow((row[0], row[2], row[3], row[4], row[5], row[6], row[7], row[8], row[12], row[13], row[16]))
# -
# Print the number of rows in the CSV file
with open('event_datafile_new.csv', 'r', encoding = 'utf8') as f:
print(sum(1 for line in f))
# # Part II. Complete the Apache Cassandra coding portion of your project.
#
# ## Now you are ready to work with the CSV file titled <font color=red>event_datafile_new.csv</font>, located within the Workspace directory. The event_datafile_new.csv contains the following columns:
# - artist
# - firstName of user
# - gender of user
# - item number in session
# - last name of user
# - length of the song
# - level (paid or free song)
# - location of the user
# - sessionId
# - song title
# - userId
#
# The image below is a screenshot of what the denormalized data should appear like in the <font color=red>**event_datafile_new.csv**</font> after the code above is run:<br>
#
# <img src="images/image_event_datafile_new.jpg">
# ## Begin writing your Apache Cassandra code in the cells below
# #### Creating a Cluster
# +
# Connect to a Cassandra instance in local machine
from cassandra.cluster import Cluster
cluster = Cluster(['127.0.0.1'])
# Connect to a session
session = cluster.connect()
# -
# #### Create Keyspace
# Create a keyspace to store the tables
try:
session.execute("""
CREATE KEYSPACE IF NOT EXISTS sparkify
WITH REPLICATION =
{ 'class' : 'SimpleStrategy', 'replication_factor' : 1}
""")
except Exception as exep:
print(exep)
# #### Set Keyspace
# Set the keyspace to sparkify
try:
session.set_keyspace('sparkify')
except Exception as exep:
print(exep)
# ### Now we need to create tables to run the following queries. Remember, with Apache Cassandra you model the database tables on the queries you want to run.
# ## Create queries to ask the following three questions of the data
#
# ### 1. Give me the artist, song title and song's length in the music app history that was heard during sessionId = 338, and itemInSession = 4
#
#
# ### 2. Give me only the following: name of artist, song (sorted by itemInSession) and user (first and last name) for userid = 10, sessionid = 182
#
#
# ### 3. Give me every user name (first and last) in my music app history who listened to the song 'All Hands Against His Own'
#
#
#
# ## Cassandra Data Modeling
# ### Query 1 - Retrieve the artist, song title and song's length in the music app history that was heard during a given session ID and itemInSession
# +
# A table specific to the query has to be created
# Here the PRIMARY KEY is chosen to be ((sessionId), item_in_session) and PARTITION KEY to be sessionId
# We are querying on these two attributes and sessionId will be the better choice as PARTITION KEY as it will be hashed to find the cluster
# Since we need to query on itemInSession, it is chosen to be a clustering column and is added in the PRIMARY KEY
# Create table
query = "CREATE TABLE IF NOT EXISTS music_library "
query = query + "(session_id INT, item_in_session INT, artist_name VARCHAR, song_title text, song_duration DECIMAL, PRIMARY KEY ((session_id), item_in_session))"
try:
session.execute(query)
except Exception as exep:
print(exep)
# -
# #### Insert data into the tables
# +
# Load the data from the CSV file and insert into the Cassandra table
file = 'event_datafile_new.csv'
with open(file, encoding = 'utf8') as f:
csvreader = csv.reader(f)
next(csvreader)
for line in csvreader:
# Insert into the table
query = "INSERT INTO music_library (session_id, item_in_session, artist_name, song_title, song_duration) "
query = query + "VALUES (%s, %s, %s, %s, %s)"
# Properly assign the values from CSV to the table attributes
try:
session.execute(query, (int(line[8]), int(line[3]), line[0], line[9], float(line[5])))
except Exception as exep:
print(exep)
# -
# #### Verifing that the data have been inserted into table
# +
# SELECT statement to verify whether the data was properly entered into the table
select_query = "SELECT artist_name, song_title, song_duration FROM music_library WHERE session_id=338 AND item_in_session=4"
try:
rows = session.execute(select_query)
except Exception as exep:
print(exep)
# Print the results
for row in rows:
print(row.artist_name, row.song_title, row.song_duration)
# -
# ### COPY AND REPEAT THE ABOVE THREE CELLS FOR EACH OF THE THREE QUESTIONS
# ### Query 2 - Retrieve the name of artist, song (sorted by itemInSession) and user name (first and last name) given an user ID and session ID
# +
# A table specific to the query has to be created
# Here the PRIMARY KEY is chosen to be ((user_id, sessionId), item_in_session) and PARTITION KEY to be (user_id, sessionId)
# We are querying on these two attributes and we want the results to be sorted in order of itemInSession
# If only userId was used as PARTITION KEY, then there exists a high chance of data aggregating in a single node (depends on data)
# Hence using (user_id, sessionId) as PARTITION KEY, it is more likely to be distributed across nodes as sessionId changes periodically
# Since the output needs to be sorted in itemInSession, it is chosen to be a clustering column and is added in the PRIMARY KEY
# Create table
query = "CREATE TABLE IF NOT EXISTS artist_library "
query = query + "(user_id INT, session_id INT, item_in_session INT, artist_name VARCHAR, song_title TEXT, first_name VARCHAR, last_name VARCHAR, \
PRIMARY KEY ((user_id, session_id), item_in_session))"
try:
session.execute(query)
except Exception as exep:
print(exep)
# -
# #### Insert data into the tables
# +
# Load the data from the CSV file and insert into the Cassandra table
file = 'event_datafile_new.csv'
with open(file, encoding = 'utf8') as f:
csvreader = csv.reader(f)
next(csvreader)
for line in csvreader:
# Insert into the table
query = "INSERT INTO artist_library (user_id, session_id, item_in_session, artist_name, song_title, first_name, last_name) "
query = query + "VALUES (%s, %s, %s, %s, %s, %s, %s)"
# Properly assign the values from CSV to the table attributes
try:
session.execute(query, (int(line[10]), int(line[8]), int(line[3]), line[0], line[9], line[1], line[4]))
except Exception as exep:
print(exep)
# -
# #### Verifing that the data have been inserted into table
# +
# SELECT statement to verify whether the data was properly entered into the table
select_query = "SELECT artist_name, song_title, first_name, last_name FROM artist_library WHERE user_id=10 AND session_id=182"
try:
rows = session.execute(select_query)
except Exception as exep:
print(exep)
# Print the results
for row in rows:
print(row.artist_name, row.song_title, row.first_name, row.last_name)
# -
# ### Query 3 - Retrieve the user name (first and last) in the music app history who listened to a particular song
# +
# A table specific to the query has to be created
# Here the PRIMARY KEY is chosen to be ((song_title), userId) and PARTITION KEY to be song_title
# We are querying on the song_title and hence it should be the PARTITION KEY
# If only song_title was as PRIMARY KEY, since Cassandra does not allow duplicate entries, the entries will be overwritten
# Hence using (song_title, userId) as PRIMARY KEY, we can overcome this issue without violating the query constraints
# Since the output needs to be sorted in itemInSession, it is chosen to be a clustering column and is added in the PRIMARY KEY
# Create table
query = "CREATE TABLE IF NOT EXISTS play_history "
query = query + "(song_title TEXT, user_id INT, first_name VARCHAR, last_name VARCHAR, PRIMARY KEY ((song_title), user_id))"
try:
session.execute(query)
except Exception as exep:
print(exep)
# -
# #### Insert data into the tables
# +
# Load the data from the CSV file and insert into the Cassandra table
file = 'event_datafile_new.csv'
with open(file, encoding = 'utf8') as f:
csvreader = csv.reader(f)
next(csvreader) # skip header
for line in csvreader:
# Insert into the table
query = "INSERT INTO play_history (song_title, user_id, first_name, last_name) "
query = query + "VALUES (%s, %s, %s, %s)"
# Properly assign the values from CSV to the table attributes
try:
session.execute(query, (line[1], int(line[10]), line[4], line[9]))
except Exception as exep:
print(exep)
# -
# #### Verifing that the data have been inserted into table
# +
# SELECT statement to verify whether the data was properly entered into the table
select_query = "SELECT first_name, last_name FROM play_history WHERE song_title='All Hands Against His Own'"
try:
rows = session.execute(select_query)
except Exception as exep:
print(exep)
# Print the results
for row in rows:
print(row.artist_name, row.song_title, row.song_duration)
# -
# ### Drop the tables before closing out the sessions
# +
try:
session.execute("DROP TABLE IF EXISTS music_library")
except Execption as exep:
print(exep)
try:
session.execute("DROP TABLE IF EXISTS artist_library")
except Execption as exep:
print(exep)
try:
session.execute("DROP TABLE IF EXISTS play_history")
except Execption as exep:
print(exep)
# -
# ### Close the session and cluster connection¶
session.shutdown()
cluster.shutdown()
| Project 2 Data Modeling with Apache Cassandra/Project 2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# 
#
# <a href="https://hub.callysto.ca/jupyter/hub/user-redirect/git-pull?repo=https%3A%2F%2Fgithub.com%2Fcallysto%2Fcurriculum-notebooks&branch=master&subPath=Science/LightOpticalSystems/light-optical-systems.ipynb&depth=1" target="_parent"><img src="https://raw.githubusercontent.com/callysto/curriculum-notebooks/master/open-in-callysto-button.svg?sanitize=true" width="123" height="24" alt="Open in Callysto"/></a>
# + tags=["hide-input"]
import matplotlib.pyplot as plt
import plotly as py
import plotly.graph_objs as go
import numpy as np
import math
import ipywidgets as widgets
from IPython.display import display, Math, Latex, HTML, IFrame
from astropy.table import Table, Column
from ipywidgets import interact, interactive
py.offline.init_notebook_mode(connected=True)
# %matplotlib inline
font = {'family' : 'sans-serif',
'weight' : 'normal',
'size' : 14}
plt.rc('font', **font)
'''Above, we are importing all the necessary modules in order to run the notebook.
Numpy allows us to define arrays of values for our variables to plot them
matplotlib is what we use to create the figures
the display and widgets are to make the notebook look neat
'''
HTML('''<script>
function code_toggle() {
if (code_shown){
$('div.input').hide('500');
$('#toggleButton').val('Show Code')
} else {
$('div.input').show('500');
$('#toggleButton').val('Hide Code')
}
code_shown = !code_shown
}
$( document ).ready(function(){
code_shown=false;
$('div.input').hide()
});
</script>
<form action="javascript:code_toggle()"><input type="submit" id="toggleButton" value="Show Code"></form>''')
# -
# ***
# # Light and Optics
# ***
#
# <img src="https://media.giphy.com/media/3o7OsM9vKFH2ESl0KA/giphy.gif" alt="Drawing" style="width: 600px;"/>
# <center>Gif taken from https://giphy.com/gifs/fandor-sun-eclipse-3o7OsM9vKFH2ESl0KA/links, August 1st, 2018.</center>
# <center> Figure 1: For hundreds of years, scientists have tried to understand the nature of light. With advances in technology, and inventions like telescopes, we have been able to see farther than ever before. </center>
#
# ***
#
# ## Introduction
#
# Throughout most of history, humans did not understand light as we do today. As science and technology have progressed over time, so too has our knowledge of the nature of light.
#
# In this lesson, when we say the word "light", we will be referring to visible light (light that comes from the sun, lightbulbs etc.). We will go over how a few key experiments kickstarted a new way of thinking, and a few of the ways that we are able to manipulate light. We will also talk about how our eyes enable us to see.
#
# ## Background
#
# If you had to describe to someone what light is, you may have a hard time. Some people think of light as the absence of darkness, but even that doesn't say much about light itself.
#
# Our understanding of light truly began around the 17th century, when a few individuals started to realize that light was not a mystical substance. Scientists (or "natural philosophers", as they were called during that time) recognized that certain properties of light were measurable, and that some properties could be manipulated. Sir <NAME> and <NAME> were among the first scientists to take a step in this direction.
#
#
# > ### <NAME>'s Prism Experiment
#
# Sir <NAME> has made contributions to many fields of science and mathematics. In 1666, while spending time at his childhood home in Lincolnshire, England, Newton began experimenting with light.
#
# Using a small slit in his window shutters, Newton passed a narrow beam of sunlight through a glass prism. The light travelled through the prism, and projected a rainbow of color on the other side!
#
# <img src="http://media.web.britannica.com/eb-media/10/7710-050-36C066AC.jpg" >
#
# <center>Picture taken from http://media.web.britannica.com/eb-media/10/7710-050-36C066AC.jpg.</center>
# <center> Figure 2: This picture shows how a prism can create a spectrum of color. This is what Newton would have seen in 1666.</center>
#
# Later on, scientists determined that the prism was actually splitting light into its component parts. This phenomenon is called **dispersion**.
#
# Through this experiment, Newton demonstrated that white light was actually made up of all the individual colors of the rainbow!
#
# > ### <NAME> and the Speed of Light
#
# For many years, people thought that if somebody lit a match, the light from that match would be instantly visible to everyone, no matter how far away they were. However, in 1676 <NAME> proved that this is not the case.
#
# Rømer spent a long time studying the orbit of Io, one of Jupiter's moons. As part of his study, he began predicting the times when Io should be hidden behind Jupiter's shadow (these periods are called eclipses). However, Rømer saw that his predictions for when these eclipses should occur were not always accurate.
# <img src="https://media.giphy.com/media/DXIa1beDspYRy/giphy.gif" alt="Drawing" style="width: 300px;"/>
# <center>Gif taken from https://giphy.com/gifs/timelapse-DXIa1beDspYRy, August 1st, 2018.</center>
# <center> Figure 3: Here we can see Jupiter as it looks through a telescope. You might be able to see a black spot move from the left to the right across Jupiter's surface. This is actually one of Jupiter's many moons!</center>
# Rømer then realized that these errors may be because the distance between Io and the Earth was always changing. Rømer thought that when the distance between Io and the Earth increased, it might take a longer time for light coming from Io to reach Earth. If this were the case, then the light must be travelling at a finite speed!
#
# After taking many measurements and using some clever mathematics, Rømer calculated the speed of light to be roughly 220,000,000 m/s, or 792,000,000 km/h.
#
# Today, we have measured the speed of light to be 299,792,458 m/s. Although he was not exactly right, Rømer provided one of the first mathematical calculations for the speed of light.
#
# ***
# Since the time of Rømer and Newton, scientists have made many new discoveries about the nature of light. While not all of these discoveries agree with one another, here are two things we know for sure:
# - Light is made up of a spectrum of color
# - Light travels at a speed of 299,792,458 m/s
#
# Now let's talk about some of the ways we can manipulate light.
# ***
#
# ## Reflection
#
# We are all familiar with reflection; chances are, you look at your reflection more than once a day. But have you ever stopped to wonder what is really going on?
#
# Reflection is the term used to describe how light can change direction when it comes into contact with certain surfaces.
#
# When incoming light rays encounter a reflective surface, they bounce off the surface and continue moving in a new direction. The new direction in which it moves is determined by the **law of reflection**.
#
# \begin{equation}
# \rm Law\: of\: Reflection: Angle\: of\: Incidence = Angle\: of\: Reflection
# \end{equation}
#
# On the animation below, click on the flashlight to turn it on, and move your mouse to change the angle of incidence.
# + tags=["hide-input"]
#IFrame('Animations/reflect.html',width=500,height=320)
# + tags=["hide-input"] language="html"
# <iframe src='Animations/reflect.html' width=500 height=350></iframe>
#
# -
# As seen above, the **normal** is what we call the line that forms a 90$^{\circ}$ angle with the surface. The **angle of incidence** is what we call the angle between the flash lights beam and the normal. Similarly, the **angle of reflection** is the angle that the newly reflected light beam makes with the normal. The law of reflection states that these two angles will always be equal.
#
#
#
# ## Refraction
#
# Have you ever tried to reach down and grab an object sitting at the bottom of a pool of water? If you have, you may have noticed that the object isn't actually in the location that you thought it was.
# <img src="http://legacy.sciencelearn.org.nz/var/sciencelearn/storage/images/contexts/light-and-sight/sci-media/video/refraction/668954-1-eng-NZ/Refraction.jpg" alt="Drawing" style="width: 450px;"/>
# <center> Image taken from http://legacy.sciencelearn.org.nz/Contexts/Light-and-Sight/Sci-Media/Video/Refraction/(quality)/hi on August 3rd, 2018.</center>
# <center> Figure 4: When you are looking into a body of water from above, the objects you see beneath the surface are not actually where they appear to be. </center>
# This phenomenon occurs because the light travelling to your eyes from the bottom of the pool **refracts**, or changes its direction of travel, when it transitions from water to air.
#
# The **index of refraction** is a value that we use to show how much light will bend when travelling through a substance. For example, the index of refraction for air is approximately 1.00, and the index of refraction for water is about 1.33. Because these indexes are different, light will bend when passing from water to air, or vice versa.
#
# Use the animation below to see how light refracts when passing from air to water. Click on the flashlight to turn it on.
# + tags=["hide-input"]
#IFrame('Animations/refract.html',width=520,height=320)
# + tags=["hide-input"] language="html"
# <iframe src='Animations/refract.html' width=520 height=320></iframe>
#
# -
# Mathematically, reflection can be described using the following equation, known as Snell's Law:
#
# \begin{equation}
# \textrm{Snells Law:}\: n_1\sin(\theta_1) = n_2\sin(\theta_2)
# \end{equation}
#
# where $n_1$ is the index of refraction for the first medium, $\theta_1$ is the incident angle, $n_2$ is the index of refraction for the second medium, and $\theta_2$ is the angle of refraction.
#
# Light will bend *towards* the normal when travelling from a medium with a *lower* index of refraction to one with a *higher* index of refraction, and vice versa.
#
# ***
# Some of the most beautiful sights in nature are caused by reflection and refraction. Here are a couple of examples:
#
# ### Rainbows
#
# Rainbows are a result of both reflection and refraction. As its raining, each water droplet acts like a tiny prism, just like the one we saw in Figure 2. The water droplets split visible light into colors, and these colors are then reflected back towards our eyes.
#
# <img src="http://waterstories.nestle-waters.com/wp-content/uploads/2015/04/How-rainbow-forms-waterstories.jpg" alt="Drawing" style="width: 400px;"/>
# <center> Image taken from https://waterstories.nestle-waters.com/environment/how-does-a-rainbow-form/ on August 3rd, 2018.</center>
# <center> Figure 5: Water droplets use reflection and refraction to create the beautiful rainbows that we see while it is raining.</center>
#
#
#
# ### Mirages
#
# Have you ever been driving on a sunny day, and up ahead it looks as though a stream of water is running across the road? You are really seeing a mirage.
# Mirages also occur because of refraction, but they do not result in a display of color like a rainbow. This type of refraction occurs due to a difference in temperature between separate layers of air.
#
# As we were describing before, refraction occurs when light travels from one substance to another. Well, it turns out that hot air and cold air are actually different enough to act as different substances. Therefore, light will refract when passing through one to the other.
#
# <img src="http://edex.s3-us-west-2.amazonaws.com/styles/kraken_optimized/s3/banner-mirage.jpg?itok=YXSTIo8_" alt="Drawing" style="width: 400px;"/>
# <center> Image taken from https://edexcellence.net/articles/what-the-mirage-gets-wrong-on-teacher-development on August 3rd, 2018.</center>
# <center> Figure 6: Although it may look like water running across the road, it is actually a mirage. These commonly occur in desert areas, where the road can become very hot.</center>
#
# When you are looking at a mirage, it can look as though the air is wavy and fluid, which is why it is common to think that you are looking at water. This appearance occurs when layers of hot and cold air are mixing together, and light passing through these layers is constantly being refracted in different directions.
#
# You may see a mirage appear on top of a hot roadway, behind the exhaust pipe of a plane or car, or around any other source of heat.
#
# ## Applications of Reflection and Refraction
#
# ### Lenses
#
# If you have glasses, or contact lenses, then you are constantly using refraction in order to help you see! Lenses use refraction to point light in specific directions.
#
# Generally speaking, there are two types of lenses: **convex** and **concave**.
#
# To see how each type of lens affects light, use the following animation.
# + tags=["hide-input"] active=""
# #IFrame('Animations/convex.html',width=520,height=420)
# + tags=["hide-input"] language="html"
# <iframe src='Animations/convex.html' width=520 height=430></iframe>
#
# -
# As seen above, a convex lens focuses light towards a specific point, while a concave lens will spread light away from a point. These lenses can be combined in many ways in order to produce different effects. For example, a camera lens uses a series of both convex and concave lenses in order to direct incoming light towards the back of the camera.
#
# <img src="http://i.imgur.com/IH2ymaj.jpg" alt="Drawing" style="width: 400px;"/>
# <center> Image taken from https://www.reddit.com/r/pic/comments/3o3b7w/camera_lens_cut_in_half/ on August 3rd, 2018.</center>
# <center> Figure 5: This is what the inside of a camera lens looks like. The photographer can adjust how they want the picture to look by changing the distance between the individual lenses.</center>
#
#
#
#
# ## Vision
#
# Our eyes are very complex organs, but the process that enables us to see is actually pretty simple. The basic steps are as follows:
#
# 1. Light enters the eye through the **pupil**
# 2. The convex **lens** behind the pupil directs incoming light towards the **retina**, which is like a screen at the back of our eye.
# 3. The retina then sends this image to the brain.
# 4. The brain then interprets the image.
#
# <img src="https://openclipart.org/image/2400px/svg_to_png/261647/EyeDiagram.png" alt="Drawing" style="width: 400px;"/>
# <center> Image taken from https://openclipart.org/detail/261647/eye-diagram on August 3rd, 2018. </center>
# <center> Figure 6: This diagram shows some of the key components of the eye that enable us to see.</center>
#
# However, the image that is projected onto the retina is actually upside down!
#
# <img src="https://m.eet.com/media/1077748/max-hfield-01.gif" alt="Drawing" style="width: 400px;"/>
# <center> Image taken from https://www.eetimes.com/author.asp?section_id=14&doc_id=1282795 on August 3rd, 2018.</center>
# <center> Figure 7: The convex lens at the front of our eye actually flips images upside down.</center>
#
# So the retina actually sends an upside down image to the brain, and the brain automatically flips the image rightside up.
#
# Use the following link to see an animation showing how a convex lens flips images upside down:https://phet.colorado.edu/sims/geometric-optics/geometric-optics_en.html.
#
#
# ## Technology & Inventions
#
# ### The Telescope
#
# The first telescope was made by <NAME> in 1608, but it was Galileo Galilee who became famous by using it for astronomy. There are many different types of telescopes, but they all use reflection and refraction to make far away objects appear closer.
#
# A telescope uses a large opening to collect incoming light, and then directs this light towards your eye by using mirrors and lenses.
#
# <img src="https://www.skyandtelescope.com/wp-content/uploads/three-scopes.jpg" alt="Drawing" style="width: 400px;"/>
# <center> Image taken from https://www.skyandtelescope.com/press-releases/tips-for-first-time-telescope-buyers/ on August 3rd, 2018.</center>
# <center> Figure 8: Telescopes come in many different shapes and sizes.</center>
#
# The reason why things look bigger when looking through a telescope is because of the lenses.
#
#
# ### The Microscope
#
#
# - talk about invention of microscope
# - why it works
#
#
# ## Conclusion
#
# Our understanding of light is the result of hundreds of years of research and innovation. Along the way, we have created incredible new technologies that have allowed us to look further than ever before.
#
# <img src="https://xenlife.com.au/wp-content/uploads/Hubble-Space-Telescope-650x250-1078x516.jpg" alt="Drawing" style="width: 400px;"/>
# <center> Image taken from https://xenlife.com.au/hubble-space-telescope-important/ on August 3rd, 2018.</center>
# <center> Figure 9: The Hubble Space Telescope has shown us pictures of galaxies that are billions of light years away.</center>
#
#
#
# [](https://github.com/callysto/curriculum-notebooks/blob/master/LICENSE.md)
| _build/html/_sources/curriculum-notebooks/Science/LightOpticalSystems/light-optical-systems.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# +
# #!/usr/bin/python2
""" Linear Regression Example """
from __future__ import absolute_import, division, print_function
import tflearn
# Regression data
X = [3.3, 4.4, 5.5, 6.71, 6.93, 4.168, 9.779, 6.182, 7.59, 2.167, 7.042, 10.791, 5.313, 7.997, 5.654, 9.27, 3.1]
Y = [1.7, 2.76, 2.09, 3.19, 1.694, 1.573, 3.366, 2.596, 2.53, 1.221, 2.827, 3.465, 1.65, 2.904, 2.42, 2.94, 1.3]
# Linear Regression graph
input_ = tflearn.input_data(shape=[None])
linear = tflearn.single_unit(input_)
regression = tflearn.regression(linear, optimizer = 'sgd', loss='mean_square',
metric='R2', learning_rate=0.01);
m = tflearn.DNN(regression)
m.fit(X, Y, n_epoch=1000, show_metric = True, snapshot_epoch=False )
print("\n Regression result:")
print("Y = " + str(m.get_weights(linear.W)) +
"*X + " + str(m.get_weights(linear.b)))
print("\n Test Prediction for x = 3.2, 3.3, 3.4")
print(m.predict([3.2, 3.3, 3.4]))
# +
#Training Logica Operatos
import tensorflow as tf
import tflearn as t
from __future__ import absolute_import, division, print_function
X = [[0.], [1.]]
Y = [[1.], [0.]]
| Python/Game AI/learn_TFLearn.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# # (VAD) Velocity Azimuth Display
#
# Argonne National Laboratory
#
# Original code by <NAME>
import glob
from matplotlib import animation
import matplotlib.pyplot as plt
import numpy.ma as ma
import numpy as np
import pyart
# import warnings
# warnings.filterwarnings("ignore")
# %matplotlib inline
# +
"""
pyart.retrieve.velocity_azimuth_display
=======================================
Retrieval of VADs from a radar object.
.. autosummary::
:toctreeL generated/
:template: dev_template.rst
velocity_azimuth_display
_vad_calculation
_inverse_dist_squared
_Average1D
"""
import numpy as np
from pyart.core import HorizontalWindProfile
def velocity_azimuth_display(
radar, velocity, z_want=None,
valid_ray_min=16, gatefilter=None, window=2,
weight='equal'):
"""
Velocity azimuth display.
Note: If a specific sweep is desired, before using the
velocity_azimuth_display function, use, for example:
one_sweep_radar = radar.extract_sweeps([0])
Parameters
----------
radar : Radar
Radar object used.
velocity : string
Velocity field to use for VAD calculation.
Other Parameters
----------------
z_want : array
Array of desired heights to be sampled for the vad
calculation.
valid_ray_min : int
Amount of rays required to include that level in
the VAD calculation.
gatefilter : GateFilter
A GateFilter indicating radar gates that should be excluded when
from the vad calculation.
window : int
Value to use for window when determing new values in the
_Averag1D function.
weight : string
A string to indicate weighting method to use. 'equal' for
equal weighting when interpolating or 'idw' for inverse
distribution squared weighting for interpolating.
Default is 'equal'.
Returns
-------
height : array
Heights in meters above sea level at which horizontal winds were
sampled.
speed : array
Horizontal wind speed in meters per second at each height.
direction : array
Horizontal wind direction in degrees at each height.
u_wind : array
U-wind mean in meters per second.
v_wind : array
V-wind mean in meters per second.
Reference
----------
<NAME> and <NAME>, 1968: The Determination
of Kinematic Properties of a Wind Field Using Doppler
Radar. J. Appl. Meteor., 7, 105–113
"""
velocities = radar.fields[velocity]['data']
if gatefilter is not None:
velocities = np.ma.masked_where(
gatefilter.gate_excluded, velocities)
#mask=velocities.mask
#velocities[np.where(mask)]=np.nan
azimuths = radar.azimuth['data'][:]
elevation = radar.fixed_angle['data'][0]
u_wind, v_wind = _vad_calculation(velocities, azimuths,
elevation, valid_ray_min)
bad = np.logical_or(np.isnan(u_wind), np.isnan(v_wind))
good_u_wind = u_wind[~bad]
good_v_wind = v_wind[~bad]
radar_height = radar.gate_z['data'][0]
good_height = radar_height[~bad]
if z_want is None:
z_want = np.linspace(0, 1000, 100)
else:
z_want
try:
print('max height', np.max(good_height), ' meters')
print('min height', np.min(good_height), ' meters')
except ValueError:
raise ValueError('Not enough data in this radar sweep ' \
'for a vad calculation.')
u_interp = _Average1D(good_height, good_u_wind,
z_want[1] - z_want[0] / window, weight)
v_interp = _Average1D(good_height, good_v_wind,
z_want[1] - z_want[0] / window, weight)
u_wanted = u_interp(z_want)
v_wanted = v_interp(z_want)
u_wanted = np.ma.masked_equal(u_wanted, 99999.)
v_wanted = np.ma.masked_equal(v_wanted, 99999.)
vad = HorizontalWindProfile.from_u_and_v(
z_want, u_wanted, v_wanted)
return vad
def _vad_calculation(velocities, azimuths,
elevation, valid_ray_min):
""" Calculates VAD for a scan and returns u_mean and
v_mean. velocities is a 2D array, azimuths is a 1D
array, elevation is a number.
Jonathan's Note:
We need to solve: Ax = b
where:
A = [sum_sin_squared_az, sum_sin_cos_az ] = [a, b]
[sum_sin_cos_az, sum_cos_squared_az] [c, d]
b = [sum_sin_vel_dev] = [b_1]
[sum_cos_vel_dev] [b_2]
The solution to this is:
x = A-1 * b
A-1 is:
1 [ d, -b ]
--- * [ -c, a ]
|A|
and the determinate, det is: det = a*d - b*c
Therefore the elements of x are:
x_1 = (d* b_1 + -b * b_2) / det = (d*b_1 - b*b_2) / det
x_2 = (-c * b_1 + a * b_2) / det = (a*b_2 - c*b_1) / det
"""
velocities = velocities.filled(np.nan)
shape = velocities.shape
_, nbins = velocities.shape
invalid = np.isnan(velocities)
valid_rays_per_gate = np.sum(~np.isnan(velocities), axis=0)
too_few_valid_rays = valid_rays_per_gate < valid_ray_min
invalid[:, too_few_valid_rays] = True
sin_az = np.sin(np.deg2rad(azimuths))
cos_az = np.cos(np.deg2rad(azimuths))
sin_az = np.repeat(sin_az, nbins).reshape(shape)
cos_az = np.repeat(cos_az, nbins).reshape(shape)
sin_az[invalid] = np.nan
cos_az[invalid] = np.nan
mean_velocity_per_gate = np.nanmean(velocities, axis=0).reshape(1, -1)
velocity_deviation = velocities - mean_velocity_per_gate
sum_cos_vel_dev = np.nansum(cos_az * velocity_deviation, axis=0)
sum_sin_vel_dev = np.nansum(sin_az * velocity_deviation, axis=0)
sum_sin_cos_az = np.nansum(sin_az * cos_az, axis=0)
sum_sin_squared_az = np.nansum(sin_az**2, axis=0)
sum_cos_squared_az = np.nansum(cos_az**2, axis=0)
# The A matrix
a = sum_sin_squared_az
b = sum_sin_cos_az
c = sum_sin_cos_az
d = sum_cos_squared_az
# The b vector
b_1 = sum_sin_vel_dev
b_2 = sum_cos_vel_dev
# solve for the x vector
determinant = a*d - b*c
x_1 = (d*b_1 - b*b_2) / determinant
x_2 = (a*b_2 - c*b_1) / determinant
# calculate horizontal components of winds
elevation_scale = 1 / np.cos(np.deg2rad(elevation))
u_mean = x_1 * elevation_scale
v_mean = x_2 * elevation_scale
return u_mean, v_mean
def _inverse_dist_squared(dist):
""" Obtaining distance weights by using distance weighting
interpolation, using the inverse distance-squared relationship.
"""
weights = 1 / (dist * dist)
weights[np.isnan(weights)] = 99999.
return weights
class _Average1D(object):
""" Used to find the nearest gate height and horizontal wind
value with respect to the user's desired height. """
def __init__(self, x, y, window, weight,
fill_value=99999.):
sort_idx = np.argsort(x)
self.x_sorted = x[sort_idx]
self.y_sorted = y[sort_idx]
self.window = window
self.fill_value = fill_value
if weight == 'equal':
self.weight_func = lambda x: None
elif weight == 'idw':
self.weight_func = _inverse_dist_squared
elif callable(weight):
self.weight_func = weight
else:
raise ValueError("Invalid weight argument:", weight)
def __call__(self, x_new, window=None):
if window is None:
window = self.window
y_new = np.zeros_like(x_new, dtype=self.y_sorted.dtype)
for i, center in enumerate(x_new):
bottom = center - window
top = center + window
start = np.searchsorted(self.x_sorted, bottom)
stop = np.searchsorted(self.x_sorted, top)
x_in_window = self.x_sorted[start:stop]
y_in_window = self.y_sorted[start:stop]
if len(x_in_window) == 0:
y_new[i] = self.fill_value
else:
distances = x_in_window - center
weights = self.weight_func(distances)
y_new[i] = np.average(y_in_window, weights=weights)
return y_new
# +
# create a profile of 100 heights up to 500 m with 5 m/s winds at 25 degrees
height = np.linspace(0, 1000, 100)
speed = np.ones_like(height) * 5
direction = np.ones_like(height) * 25
profile = pyart.core.HorizontalWindProfile(height, speed, direction)
# simulate a single sweep radar with a velocity field from the profile
test_radar = pyart.testing.make_target_radar()
test_radar.elevation['data'][:] = 45.0
test_radar.fixed_angle['data'][:] = 45.0
print("max height:", test_radar.gate_z['data'].max())
sim_vel = pyart.util.simulated_vel_from_profile(test_radar, profile)
test_radar.add_field('velocity', sim_vel, replace_existing=True)
# perform a VAD retrieval
vad_test = velocity_azimuth_display(
test_radar, 'velocity', valid_ray_min=16,
z_want=height, window=2,
gatefilter=None)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(profile.u_wind, profile.height, 'r-')
ax.plot(profile.v_wind, profile.height, 'b-')
ax.plot(vad_test.u_wind, vad_test.height, 'g--')
ax.plot(vad_test.v_wind, vad_test.height, 'k--')
ax.set_xlim(-5, 0)
plt.show()
# print out the results
print("Height")
print(profile.height[:5])
print(vad_test.height[:5])
print(profile.height[-5:])
print(vad_test.height[-5:])
print("u_wind:")
print(profile.u_wind[:5])
print(vad_test.u_wind[:5])
print(profile.u_wind[-5:])
print(vad_test.u_wind[-5:])
print("v_wind:")
print(profile.v_wind)
print(vad_test.v_wind)
#print(profile.v_wind[-5:])
#print(vad_test.v_wind[-5:])
print("speed:")
print(profile.speed[:5])
print(vad_test.speed[:5])
print(vad_test.speed[-5:])
print("direction:")
print(profile.direction[:5])
print(vad_test.direction[:5])
print(vad_test.direction[-5:])
# -
filename = '/home/zsherman/dev/training_exercises/data/KLOT20130417_235520_V06.gz'
myradar = pyart.io.read(filename)
# +
test_radar = pyart.io.read(pyart.testing.NEXRAD_ARCHIVE_MSG1_FILE)
new_radar = test_radar.extract_sweeps([4])
radar_sweep = myradar.extract_sweeps([5])
height = np.linspace(50, 10766, 101)
valid_ray_min = 16
window = 2
print("max height:", new_radar.gate_z['data'][0].max())
# perform a VAD retrieval
vad_test = velocity_azimuth_display(
radar_sweep, 'velocity', z_want=height, gatefilter=None,
valid_ray_min=valid_ray_min)
# print out the results
print("Height")
print(vad_test.height)
print("u_wind:")
print(vad_test.u_wind)
print("v_wind:")
print(vad_test.v_wind)
print("speed:")
print(vad_test.speed)
print("direction:")
print(vad_test.direction)
# -
# Without in velocity_azimuth_display function:
# mask=velocities.mask
# velocities[np.where(mask)]=np.nan
fig = plt.figure()
plt.plot(vad_test.u_wind, vad_test.height, 'b-', label='U Wind')
plt.plot(vad_test.v_wind, vad_test.height, 'r-', label='V Wind')
plt.xlim(-25, 35)
plt.ylim(-500, 11000)
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.show()
files = sorted(glob.glob('/home/zsherman/dev/xsapr_sgp/data/05_19_2017/*'))[0:25]
# +
def animate(nframe):
plt.clf()
radar = pyart.io.read(files[nframe])
radar_sweep = radar.extract_sweeps([11])
height = np.linspace(101, 11000, 101)
valid_ray_min = 16
# perform a VAD retrieval
vad = velocity_azimuth_display(
radar_sweep, 'velocity', z_want=height, gatefilter=None,
valid_ray_min=valid_ray_min)
ax1 = fig.add_subplot(121)
display = pyart.graph.RadarMapDisplay(radar_sweep)
display.plot_ppi_map('reflectivity', resolution='l',
vmin=-8, vmax=64, mask_outside=False,
cmap=pyart.graph.cm.NWSRef,
lat_lines=None, ax=ax1, lon_lines=None)
display.basemap.drawcounties()
ax2 = fig.add_subplot(122)
plt.plot(vad.u_wind, vad.height, 'b-', label='U Wind')
plt.plot(vad.v_wind, vad.height, 'r-', label='V Wind')
plt.xlim(-25, 35)
plt.ylim(101, 11000)
plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
del radar
del radar_sweep
fig = plt.figure(figsize=(15, 6))
anim = animation.FuncAnimation(fig, animate,
frames=len(files))
anim.save('/home/zsherman/vad_jonathan.gif', writer='imagemagick', fps=1)
plt.close()
# -
| notebooks/vad_function_jonathans_code.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:py36]
# language: python
# name: conda-env-py36-py
# ---
# ## Lists
# Lists are written within square brackets [ ]
# | | | |
# --- | --- | --- | --- | ---
# z =| [3, | 7, | 4, | 2]
# index | 0 | 1 | 2 | 3
# Defining a list
z = [3, 7, 4, 2]
# ### Accessing Values in List
# The first element of a list is at index 0
z[0]
z[2]
# Access Last Element of List
z[-2]
# ### Slicing Lists
# first index is inclusive (before the :) and last (after the :) is not.
# not including index 2
z[0:2]
# everything up to index 3
z[:3]
# index 1 to end of list
z[1:]
# ### Minimum, Maximum, Length, and Sum of a list
print(min(z), max(z), len(z), sum(z))
# ### Count Number of Times Value is in List
random_list = [4, 1, 5, 4, 10, 4]
random_list.count(4)
# ### Return First Index of Value
# | | | | | |
# --- | --- | --- | --- | --- | --- | ---
# random_list =| [4, | 1, | 5, | 4, | 10, | 4]
# index=| 0 | 1 | 2 | 3 | 4 | 5
random_list.index(4)
# you can specify where you start your search
random_list.index(4, 3)
# random_list.index(value, [start, stop])
random_list.index(4, 5, 6)
# ### Sorting a list
x = [3, 7, 2, 11, 8, 10, 4]
y = ['Steve', 'Rachel', 'Michael', 'Adam', 'Monica', 'Jessica', 'Lester']
# Sorting and Altering original list
# low to high
x.sort()
print(x)
# Sorting and Altering original list
# high to low
x.sort(reverse = True)
print(x)
# Sorting and Altering original list
# A-Z
y.sort()
print(y)
# Sorting and Altering original list
# Z-A
y.sort(reverse = True)
print(y)
# sorting list WITHOUT altering original list
new_list = sorted(y)
new_list
# notice y is unchanged
y
# ### Add to the End of List
x
x.append(3)
print(x)
# ### Remove First Occurence of Value in List
x.remove(10)
print(x)
# ### Remove Item at the Index you Provide
# List before you remove an item
print(x)
# Remove item at the index
# this function will also return the item you removed from the list
# Default is the last index
x.pop(3)
print(x)
# ### Extend (Useful for Concatenating Lists)
# Extend list by appending elements (iterable)
x.extend([4, 5])
x
# You can also use + for concatenating lists
# +
# lists can be diverse, each element in the list can be of a different type.
# lists are really list of pointers, and these pointers can
# point to anything.
# Concatenating Lists
print('x+y=',x+y)
# -
# ### Insert Item before the Index you Provide
x
x.insert(4, [4, 5])
x
# **if this tutorial doesn't cover what you are looking for, please leave a comment on the youtube video and I will try to cover what you are interested in. (Please subscribe if you can!)**
# https://www.youtube.com/watch?v=w9I8R3WSVqc
| Python_Basics/Intro/Python3Basics_Video6_Blog_Companion.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Interpreting nodes and edges with saliency maps in GCN (sparse)
#
# + [markdown] nbsphinx="hidden" tags=["CloudRunner"]
# <table><tr><td>Run the latest release of this notebook:</td><td><a href="https://mybinder.org/v2/gh/stellargraph/stellargraph/master?urlpath=lab/tree/demos/interpretability/gcn-sparse-node-link-importance.ipynb" alt="Open In Binder" target="_parent"><img src="https://mybinder.org/badge_logo.svg"/></a></td><td><a href="https://colab.research.google.com/github/stellargraph/stellargraph/blob/master/demos/interpretability/gcn-sparse-node-link-importance.ipynb" alt="Open In Colab" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg"/></a></td></tr></table>
# -
# This demo shows how to use integrated gradients in graph convolutional networks to obtain accurate importance estimations for both the nodes and edges. The notebook consists of three parts:
# - setting up the node classification problem for Cora citation network
# - training and evaluating a GCN model for node classification
# - calculating node and edge importances for model's predictions of query ("target") nodes
#
# <a name="refs"></a>
# **References**
#
# [1] Axiomatic Attribution for Deep Networks. <NAME>, <NAME>, and <NAME>.
# Proceedings of the 34th International Conference on Machine Learning, Sydney, Australia, PMLR 70, 2017
# ([link](https://arxiv.org/pdf/1703.01365.pdf)).
#
# [2] Adversarial Examples on Graph Data: Deep Insights into Attack and Defense. <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>. arXiv: 1903.01610 ([link](https://arxiv.org/abs/1903.01610)).
#
# + nbsphinx="hidden" tags=["CloudRunner"]
# install StellarGraph if running on Google Colab
import sys
if 'google.colab' in sys.modules:
# %pip install -q stellargraph[demos]==1.2.1
# + nbsphinx="hidden" tags=["VersionCheck"]
# verify that we're using the correct version of StellarGraph for this notebook
import stellargraph as sg
try:
sg.utils.validate_notebook_version("1.2.1")
except AttributeError:
raise ValueError(
f"This notebook requires StellarGraph version 1.2.1, but a different version {sg.__version__} is installed. Please see <https://github.com/stellargraph/stellargraph/issues/1172>."
) from None
# -
import networkx as nx
import pandas as pd
import numpy as np
from scipy import stats
import os
import time
import stellargraph as sg
from stellargraph.mapper import FullBatchNodeGenerator
from stellargraph.layer import GCN
from tensorflow import keras
from tensorflow.keras import layers, optimizers, losses, metrics, Model, regularizers
from sklearn import preprocessing, feature_extraction, model_selection
from copy import deepcopy
import matplotlib.pyplot as plt
from stellargraph import datasets
from IPython.display import display, HTML
# %matplotlib inline
# ## Loading the CORA network
# + [markdown] tags=["DataLoadingLinks"]
# (See [the "Loading from Pandas" demo](../basics/loading-pandas.ipynb) for details on how data can be loaded.)
# + tags=["DataLoading"]
dataset = datasets.Cora()
display(HTML(dataset.description))
G, subjects = dataset.load()
# -
# ### Splitting the data
# For machine learning we want to take a subset of the nodes for training, and use the rest for validation and testing. We'll use scikit-learn again to do this.
#
# Here we're taking 140 node labels for training, 500 for validation, and the rest for testing.
train_subjects, test_subjects = model_selection.train_test_split(
subjects, train_size=140, test_size=None, stratify=subjects
)
val_subjects, test_subjects = model_selection.train_test_split(
test_subjects, train_size=500, test_size=None, stratify=test_subjects
)
# ### Converting to numeric arrays
# For our categorical target, we will use one-hot vectors that will be fed into a soft-max Keras layer during training. To do this conversion ...
# +
target_encoding = preprocessing.LabelBinarizer()
train_targets = target_encoding.fit_transform(train_subjects)
val_targets = target_encoding.transform(val_subjects)
test_targets = target_encoding.transform(test_subjects)
all_targets = target_encoding.transform(subjects)
# -
# ### Creating the GCN model in Keras
# To feed data from the graph to the Keras model we need a generator. Since GCN is a full-batch model, we use the `FullBatchNodeGenerator` class.
generator = FullBatchNodeGenerator(G, sparse=True)
# For training we map only the training nodes returned from our splitter and the target values.
train_gen = generator.flow(train_subjects.index, train_targets)
# Now we can specify our machine learning model: tn this example we use two GCN layers with 16-dimensional hidden node features at each layer with ELU activation functions.
layer_sizes = [16, 16]
gcn = GCN(
layer_sizes=layer_sizes,
activations=["elu", "elu"],
generator=generator,
dropout=0.3,
kernel_regularizer=regularizers.l2(5e-4),
)
# Expose the input and output tensors of the GCN model for node prediction, via GCN.in_out_tensors() method:
x_inp, x_out = gcn.in_out_tensors()
# Snap the final estimator layer to x_out
x_out = layers.Dense(units=train_targets.shape[1], activation="softmax")(x_out)
# ### Training the model
# Now let's create the actual Keras model with the input tensors `x_inp` and output tensors being the predictions `x_out` from the final dense layer
# +
model = keras.Model(inputs=x_inp, outputs=x_out)
model.compile(
optimizer=optimizers.Adam(lr=0.01), # decay=0.001),
loss=losses.categorical_crossentropy,
metrics=[metrics.categorical_accuracy],
)
# -
# Train the model, keeping track of its loss and accuracy on the training set, and its generalisation performance on the validation set (we need to create another generator over the validation data for this)
val_gen = generator.flow(val_subjects.index, val_targets)
# Train the model
history = model.fit(
train_gen, shuffle=False, epochs=20, verbose=2, validation_data=val_gen
)
sg.utils.plot_history(history)
# Evaluate the trained model on the test set
test_gen = generator.flow(test_subjects.index, test_targets)
test_metrics = model.evaluate(test_gen)
print("\nTest Set Metrics:")
for name, val in zip(model.metrics_names, test_metrics):
print("\t{}: {:0.4f}".format(name, val))
# ## Node and link importance via saliency maps
# In order to understand why a selected node is predicted as a certain class we want to find the node feature importance, total node importance, and link importance for nodes and edges in the selected node's neighbourhood (ego-net). These importances give information about the effect of changes in the node's features and its neighbourhood on the prediction of the node, specifically:
#
# - **Node feature importance**: Given the selected node $t$ and the model's prediction $s(c)$ for class $c$. The feature importance can be calculated for each node $v$ in the selected node's ego-net where the importance of feature $f$ for node $v$ is the change predicted score $s(c)$ for the selected node when the feature $f$ of node $v$ is perturbed.
# - **Total node importance**: This is defined as the sum of the feature importances for node $v$ for all features. Nodes with high importance (positive or negative) affect the prediction for the selected node more than links with low importance.
# - **Link importance**: This is defined as the change in the selected node's predicted score $s(c)$ if the link $e=(u, v)$ is removed from the graph. Links with high importance (positive or negative) affect the prediction for the selected node more than links with low importance.
#
# Node and link importances can be used to assess the role of nodes and links in model's predictions for the node(s) of interest (the selected node). For datasets like CORA-ML, the features and edges are binary, vanilla gradients may not perform well so we use integrated gradients [[1]](#refs) to compute them.
#
# Another interesting application of node and link importances is to identify model vulnerabilities to attacks via perturbing node features and graph structure (see [[2]](#refs)).
# To investigate these importances we use the StellarGraph `saliency_maps` routines:
from stellargraph.interpretability.saliency_maps import IntegratedGradients
# Select the target node whose prediction is to be interpreted
graph_nodes = list(G.nodes())
target_nid = 1109199
target_idx = graph_nodes.index(target_nid)
y_true = all_targets[target_idx] # true class of the target node
# +
all_gen = generator.flow(graph_nodes)
y_pred = model.predict(all_gen)[0, target_idx]
class_of_interest = np.argmax(y_pred)
print(
"Selected node id: {}, \nTrue label: {}, \nPredicted scores: {}".format(
target_nid, y_true, y_pred.round(2)
)
)
# -
# Get the node feature importance by using integrated gradients
int_grad_saliency = IntegratedGradients(model, train_gen)
# For the parameters of `get_node_importance` method, `X` and `A` are the feature and adjacency matrices, respectively. If `sparse` option is enabled, `A` will be the non-zero values of the adjacency matrix with `A_index` being the indices. `target_idx` is the node of interest, and `class_of_interest` is set as the predicted label of the node. `steps` indicates the number of steps used to approximate the integration in integrated gradients calculation. A larger value of `steps` gives better approximation, at the cost of higher computational overhead.
integrated_node_importance = int_grad_saliency.get_node_importance(
target_idx, class_of_interest, steps=50
)
integrated_node_importance.shape
print("\nintegrated_node_importance", integrated_node_importance.round(2))
print("integrate_node_importance.shape = {}".format(integrated_node_importance.shape))
print(
"integrated self-importance of target node {}: {}".format(
target_nid, integrated_node_importance[target_idx].round(2)
)
)
# Check that number of non-zero node importance values is less or equal the number of nodes in target node's K-hop ego net (where K is the number of GCN layers in the model)
G_ego = nx.ego_graph(G.to_networkx(), target_nid, radius=len(gcn.activations))
print("Number of nodes in the ego graph: {}".format(len(G_ego.nodes())))
print(
"Number of non-zero elements in integrated_node_importance: {}".format(
np.count_nonzero(integrated_node_importance)
)
)
# We now compute the link importance using integrated gradients [[1]](#refs). Integrated gradients are obtained by accumulating the gradients along the path between the baseline (all-zero graph) and the state of the graph. They provide better sensitivity for the graphs with binary features and edges compared with the vanilla gradients.
integrate_link_importance = int_grad_saliency.get_integrated_link_masks(
target_idx, class_of_interest, steps=50
)
integrate_link_importance_dense = np.array(integrate_link_importance.todense())
print("integrate_link_importance.shape = {}".format(integrate_link_importance.shape))
print(
"Number of non-zero elements in integrate_link_importance: {}".format(
np.count_nonzero(integrate_link_importance.todense())
)
)
# We can now find the nodes that have the highest importance to the prediction of the selected node:
sorted_indices = np.argsort(integrate_link_importance_dense.flatten())
N = len(graph_nodes)
integrated_link_importance_rank = [(k // N, k % N) for k in sorted_indices[::-1]]
topk = 10
# integrate_link_importance = integrate_link_importance_dense
print(
"Top {} most important links by integrated gradients are:\n {}".format(
topk, integrated_link_importance_rank[:topk]
)
)
# Set the labels as an attribute for the nodes in the graph. The labels are used to color the nodes in different classes.
nx.set_node_attributes(G_ego, values={x[0]: {"subject": x[1]} for x in subjects.items()})
# In the following, we plot the link and node importance (computed by integrated gradients) of the nodes within the ego graph of the target node.
#
# For nodes, the shape of the node indicates the positive/negative importance the node has. 'round' nodes have positive importance while 'diamond' nodes have negative importance. The size of the node indicates the value of the importance, e.g., a large diamond node has higher negative importance.
#
# For links, the color of the link indicates the positive/negative importance the link has. 'red' links have positive importance while 'blue' links have negative importance. The width of the link indicates the value of the importance, e.g., a thicker blue link has higher negative importance.
integrated_node_importance.max()
integrate_link_importance.max()
# +
node_size_factor = 1e2
link_width_factor = 2
nodes = list(G_ego.nodes())
colors = pd.DataFrame(
[v[1]["subject"] for v in G_ego.nodes(data=True)], index=nodes, columns=["subject"]
)
colors = np.argmax(target_encoding.transform(colors), axis=1) + 1
fig, ax = plt.subplots(1, 1, figsize=(15, 10))
pos = nx.spring_layout(G_ego)
# Draw ego as large and red
node_sizes = [integrated_node_importance[graph_nodes.index(k)] for k in nodes]
node_shapes = ["o" if w > 0 else "d" for w in node_sizes]
positive_colors, negative_colors = [], []
positive_node_sizes, negative_node_sizes = [], []
positive_nodes, negative_nodes = [], []
node_size_scale = node_size_factor / np.max(node_sizes)
for k in range(len(nodes)):
if nodes[k] == target_idx:
continue
if node_shapes[k] == "o":
positive_colors.append(colors[k])
positive_nodes.append(nodes[k])
positive_node_sizes.append(node_size_scale * node_sizes[k])
else:
negative_colors.append(colors[k])
negative_nodes.append(nodes[k])
negative_node_sizes.append(node_size_scale * abs(node_sizes[k]))
# Plot the ego network with the node importances
cmap = plt.get_cmap("jet", np.max(colors) - np.min(colors) + 1)
nc = nx.draw_networkx_nodes(
G_ego,
pos,
nodelist=positive_nodes,
node_color=positive_colors,
cmap=cmap,
node_size=positive_node_sizes,
with_labels=False,
vmin=np.min(colors) - 0.5,
vmax=np.max(colors) + 0.5,
node_shape="o",
)
nc = nx.draw_networkx_nodes(
G_ego,
pos,
nodelist=negative_nodes,
node_color=negative_colors,
cmap=cmap,
node_size=negative_node_sizes,
with_labels=False,
vmin=np.min(colors) - 0.5,
vmax=np.max(colors) + 0.5,
node_shape="d",
)
# Draw the target node as a large star colored by its true subject
nx.draw_networkx_nodes(
G_ego,
pos,
nodelist=[target_nid],
node_size=50 * abs(node_sizes[nodes.index(target_nid)]),
node_shape="*",
node_color=[colors[nodes.index(target_nid)]],
cmap=cmap,
vmin=np.min(colors) - 0.5,
vmax=np.max(colors) + 0.5,
label="Target",
)
# Draw the edges with the edge importances
edges = G_ego.edges()
weights = [
integrate_link_importance[graph_nodes.index(u), graph_nodes.index(v)]
for u, v in edges
]
edge_colors = ["red" if w > 0 else "blue" for w in weights]
weights = link_width_factor * np.abs(weights) / np.max(weights)
ec = nx.draw_networkx_edges(G_ego, pos, edge_color=edge_colors, width=weights)
plt.legend()
plt.colorbar(nc, ticks=np.arange(np.min(colors), np.max(colors) + 1))
plt.axis("off")
plt.show()
# -
# We then remove the node or edge in the ego graph one by one and check how the prediction changes. By doing so, we can obtain the ground truth importance of the nodes and edges. Comparing the following figure and the above one can show the effectiveness of integrated gradients as the importance approximations are relatively consistent with the ground truth.
(X, _, A_index, A), _ = train_gen[0]
# +
X_bk = deepcopy(X)
A_bk = deepcopy(A)
selected_nodes = np.array([[target_idx]], dtype="int32")
nodes = [graph_nodes.index(v) for v in G_ego.nodes()]
edges = [(graph_nodes.index(u), graph_nodes.index(v)) for u, v in G_ego.edges()]
clean_prediction = model.predict([X, selected_nodes, A_index, A]).squeeze()
predict_label = np.argmax(clean_prediction)
groud_truth_node_importance = np.zeros((N,))
for node in nodes:
# we set all the features of the node to zero to check the ground truth node importance.
X_perturb = deepcopy(X_bk)
X_perturb[:, node, :] = 0
predict_after_perturb = model.predict(
[X_perturb, selected_nodes, A_index, A]
).squeeze()
groud_truth_node_importance[node] = (
clean_prediction[predict_label] - predict_after_perturb[predict_label]
)
node_shapes = [
"o" if groud_truth_node_importance[k] > 0 else "d" for k in range(len(nodes))
]
positive_colors, negative_colors = [], []
positive_node_sizes, negative_node_sizes = [], []
positive_nodes, negative_nodes = [], []
# node_size_scale is used for better visulization of nodes
node_size_scale = node_size_factor / max(groud_truth_node_importance)
for k in range(len(node_shapes)):
if nodes[k] == target_idx:
continue
if node_shapes[k] == "o":
positive_colors.append(colors[k])
positive_nodes.append(graph_nodes[nodes[k]])
positive_node_sizes.append(
node_size_scale * groud_truth_node_importance[nodes[k]]
)
else:
negative_colors.append(colors[k])
negative_nodes.append(graph_nodes[nodes[k]])
negative_node_sizes.append(
node_size_scale * abs(groud_truth_node_importance[nodes[k]])
)
X = deepcopy(X_bk)
groud_truth_edge_importance = np.zeros((N, N))
G_edge_indices = [(A_index[0, k, 0], A_index[0, k, 1]) for k in range(A.shape[1])]
for edge in edges:
edge_index = G_edge_indices.index((edge[0], edge[1]))
origin_val = A[0, edge_index]
A[0, edge_index] = 0
# we set the weight of a given edge to zero to check the ground truth link importance
predict_after_perturb = model.predict([X, selected_nodes, A_index, A]).squeeze()
groud_truth_edge_importance[edge[0], edge[1]] = (
predict_after_perturb[predict_label] - clean_prediction[predict_label]
) / (0 - 1)
A[0, edge_index] = origin_val
fig, ax = plt.subplots(1, 1, figsize=(15, 10))
cmap = plt.get_cmap("jet", np.max(colors) - np.min(colors) + 1)
# Draw the target node as a large star colored by its true subject
nx.draw_networkx_nodes(
G_ego,
pos,
nodelist=[target_nid],
node_size=50 * abs(node_sizes[nodes.index(target_idx)]),
node_color=[colors[nodes.index(target_idx)]],
cmap=cmap,
node_shape="*",
vmin=np.min(colors) - 0.5,
vmax=np.max(colors) + 0.5,
label="Target",
)
# Draw the ego net
nc = nx.draw_networkx_nodes(
G_ego,
pos,
nodelist=positive_nodes,
node_color=positive_colors,
cmap=cmap,
node_size=positive_node_sizes,
with_labels=False,
vmin=np.min(colors) - 0.5,
vmax=np.max(colors) + 0.5,
node_shape="o",
)
nc = nx.draw_networkx_nodes(
G_ego,
pos,
nodelist=negative_nodes,
node_color=negative_colors,
cmap=cmap,
node_size=negative_node_sizes,
with_labels=False,
vmin=np.min(colors) - 0.5,
vmax=np.max(colors) + 0.5,
node_shape="d",
)
edges = G_ego.edges()
weights = [
groud_truth_edge_importance[graph_nodes.index(u), graph_nodes.index(v)]
for u, v in edges
]
edge_colors = ["red" if w > 0 else "blue" for w in weights]
weights = link_width_factor * np.abs(weights) / np.max(weights)
ec = nx.draw_networkx_edges(G_ego, pos, edge_color=edge_colors, width=weights)
plt.legend()
plt.colorbar(nc, ticks=np.arange(np.min(colors), np.max(colors) + 1))
plt.axis("off")
plt.show()
# -
# By comparing the above two figures, one can see that the integrated gradients are quite consistent with the brute-force approach. The main benefit of using integrated gradients is scalability. The gradient operations are very efficient to compute on deep learning frameworks with the parallelism provided by GPUs. Also, integrated gradients can give the importance of individual node features, for all nodes in the graph. Achieving this by brute-force approach is often non-trivial.
# + [markdown] nbsphinx="hidden" tags=["CloudRunner"]
# <table><tr><td>Run the latest release of this notebook:</td><td><a href="https://mybinder.org/v2/gh/stellargraph/stellargraph/master?urlpath=lab/tree/demos/interpretability/gcn-sparse-node-link-importance.ipynb" alt="Open In Binder" target="_parent"><img src="https://mybinder.org/badge_logo.svg"/></a></td><td><a href="https://colab.research.google.com/github/stellargraph/stellargraph/blob/master/demos/interpretability/gcn-sparse-node-link-importance.ipynb" alt="Open In Colab" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg"/></a></td></tr></table>
| demos/interpretability/gcn-sparse-node-link-importance.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Deep Convolutional Generative Adversarial Network (DCGAN) on Cats
#
# This notebook runs the code in dcgan.py on grayscale 64x64 images of cats as the training set
# + id="YfIk2es3hJEd"
# %load_ext autoreload
# %autoreload 2
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
from dcgan import *
# + [markdown] id="iYn4MdZnKCey"
# ### Load and prepare the dataset and the GAN
# + id="S4PIDhoDLbsZ"
imgres = 64
noise_dim = 100
foldername = 'cats_{}x{}_grayscale/all'.format(imgres, imgres)
gan = DCGAN(foldername, imgres, noise_dim)
# -
# Generate a random image, which should look like noise at this point
gan.generate_random_image()
# + [markdown] id="GyWgG09LCSJl"
# ### Run the training loop
# + id="Ly3UN0SLLY2l"
all_gen_losses, all_disc_losses = gan.train(40)
# -
# Generate a new random image, which should now look something like a cat each time you run this cell
gan.generate_random_image()
| CatsGAN.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="GxqD3g0u06PA" outputId="e72424a8-c946-495c-9a91-2a81fd7a8ebf" colab={"base_uri": "https://localhost:8080/"}
# %cd ..
# %ls
# + id="G2KpTfd91Wgr" outputId="31c8a982-cd2b-4378-c016-e77ccc85ca40" colab={"base_uri": "https://localhost:8080/"}
# !pip uninstall tensorflow
# !pip install tensorflow==1.15.0
# + id="BN-UNfPq1ZR4" outputId="fa82de71-9895-4a9f-f08d-ad48e512ae82" colab={"base_uri": "https://localhost:8080/"}
# %cd content/
# + id="dXs7oHMS16Wc" outputId="8fc7ad10-5ead-44b7-c62a-f5e9d3c868c8" colab={"base_uri": "https://localhost:8080/"}
# !git clone https://github.com/tensorflow/models.git
# + id="2bz87BvZ18bJ" outputId="784f0643-c8d8-4e41-b604-a8b2a41c6ba2" colab={"base_uri": "https://localhost:8080/"}
# %cd models/research/deeplab
# + id="DNVwxfV72z6p" outputId="fa05460f-0b96-49a7-a12f-e8861184d2eb" colab={"base_uri": "https://localhost:8080/"}
# %env PYTHONPATH=/env/python/:/content/models/research/:/content/models/research/slim
# + id="CctS_-T-2527" outputId="41405e1c-71ae-44ad-c24b-aef4c75e3c1f" colab={"base_uri": "https://localhost:8080/"}
# !pip install tf_slim
# !python model_test.py
# + id="YJTro9bV3ChD" outputId="95b23674-dc95-4a87-f9ff-3d7bf4b04ad2" colab={"base_uri": "https://localhost:8080/"}
# !python datasets/build_voc2012_data.py \
# --image_folder="/content/models/research/deeplab/datasets/PQR/dataset/JPEGImages" \
# --semantic_segmentation_folder="/content/models/research/deeplab/datasets/PQR/dataset/SegmentationClassRaw" \
# --list_folder="/content/models/research/deeplab/datasets/PQR/dataset/ImageSets" \
# --image_format="jpg" \
# --output_dir="/content/models/research/deeplab/datasets/PQR/tfrecord"
# + id="TDovqyQd1bI_" outputId="744aa4d4-595b-4348-d816-c26e90aba2e2" colab={"base_uri": "https://localhost:8080/"}
# %cd backbone/
# + id="oflmYmjh1dwT"
# !tar -xf deeplabv3_cityscapes_train_2018_02_06.tar.gz
# + id="CmbxHJ4C1hu9" outputId="d0cc062b-5ef1-4a54-e641-1fb98fcc34ba" colab={"base_uri": "https://localhost:8080/"}
# %cd ..
# + id="J48OWkpR1kUS" outputId="2c4d899e-76d8-4aa6-94aa-082dc975a36d" colab={"base_uri": "https://localhost:8080/"}
# !sh ./train-pqr.sh
# + id="4CzdCqmU1riD" outputId="92d38bca-2d9c-4336-c4f1-857cfab4e43c" colab={"base_uri": "https://localhost:8080/"}
# !python eval.py \
# --logtostderr \
# --eval_split="val" \
# --model_variant="xception_65" \
# --dataset="pqr" \
# --atrous_rates=6 \
# --atrous_rates=12 \
# --atrous_rates=18 \
# --output_stride=16 \
# --decoder_output_stride=4 \
# --eval_crop_size=1025,2049 \
# --checkpoint_dir="/content/models/research/deeplab/datasets/PQR/exp/train_on_trainval_set/train" \
# --eval_logdir="/content/models/research/deeplab/datasets/PQR/exp/train_on_trainval_set/eval" \
# --dataset_dir="/content/models/research/deeplab/datasets/PQR/tfrecord" \
# --max_number_of_evaluations=1
# + id="k67kxABjILUt" outputId="0d6fc6be-db96-4c76-bff3-199edd2075e4" colab={"base_uri": "https://localhost:8080/"}
#issue with tensorflow solved
# !python vis.py \
# --logtostderr \
# --vis_split="val" \
# --model_variant="xception_65" \
# --dataset="pqr" \
# --atrous_rates=6 \
# --atrous_rates=12 \
# --atrous_rates=18 \
# --output_stride=16 \
# --decoder_output_stride=4 \
# --vis_crop_size=1025,2049 \
# --checkpoint_dir="/content/models/research/deeplab/datasets/PQR/exp/train_on_trainval_set/train" \
# --vis_logdir="/content/models/research/deeplab/datasets/PQR/exp/train_on_trainval_set/vis" \
# --dataset_dir="/content/models/research/deeplab/datasets/PQR/tfrecord" \
# --max_number_of_iterations=1
# + id="fjpDbZuccb7y"
| Custom_Deeplab (1).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="view-in-github"
# <a href="https://colab.research.google.com/github/MedleyHealth/TypeAssist/blob/master/TypeAssist_Training.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] colab_type="text" id="179maOLRNVcK"
# # **Important: Do not save the output from code cells in this notebook to Github (or any other public location). Access to the dataset is restricted and we cannot leak any information about individual samples.**
#
# To suppress the output in Google Colab:
#
# 1. Go to Edit > Notebook Settings
# 2. Make sure the checkbox is ticked for "Omit code cell output when saving this notebook"
#
# # **If you have any doubts about what this means, message me first before committing.**
# + [markdown] colab_type="text" id="d7zOLwPlJq_E"
# ### Modified from [code](https://nbviewer.jupyter.org/github/PrithivirajDamodaran/NLP-Experiments/blob/master/Gmail_style_smart_compose_with_char_ngram_based_language_model.ipynb) created by [<NAME>](https://github.com/PrithivirajDamodaran)
# + [markdown] colab_type="text" id="Xo_mZJwRNXmn"
# # Data Loading
# + [markdown] colab_type="text" id="AENHqAEUOXHN"
# ### Import libraries and set seeds (must use Tensorflow 1.x)
# + colab={} colab_type="code" id="dOejHYgt_0U7"
# %tensorflow_version 1.x
# %matplotlib inline
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, LSTM, Dense, CuDNNLSTM, Embedding, Flatten, TimeDistributed, Dropout, LSTMCell, RNN, Bidirectional, Concatenate, Layer
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.python.keras.utils import tf_utils
from tensorflow.keras import backend as K
from tensorflow.keras.models import model_from_json
from tensorflow.keras.models import load_model
from datetime import datetime
import random
import unicodedata
import re
import os
import time
import shutil
import string
import os
seed = 23
random.seed(seed)
np.random.seed(seed)
tf.__version__
# + [markdown] colab_type="text" id="nW_oNNe-OYox"
# ### Mount Google Drive where dataset is saved
# + colab={} colab_type="code" id="Q5G_v6QLT8k3"
from google.colab import drive
drive.mount('/content/drive')
# + [markdown] colab_type="text" id="oiomWx6YOd5n"
# ### Load dataset from path in Google Drive (change path to your location)
# + colab={} colab_type="code" id="5cj9Bgc3T-fV"
path = '/content/drive/My Drive/4 Archive/MIMIC/NOTEEVENTS.csv'
df = pd.read_csv(path)
df[:5]
# + [markdown] colab_type="text" id="QGfmFP7dxCxJ"
# # Data Preprocessing
# + [markdown] colab_type="text" id="9y25WeFeBZpO"
# ### Select notes that are less than 100 characters long
# + colab={} colab_type="code" id="iFNVPeh6xJAj"
corpus = [note for note in df['TEXT'] if len(note) < 100]
print('Number of Notes with Length < 100:', len(corpus), '\n')
corpus[:10]
# + [markdown] colab_type="text" id="IqUl-zVQBitc"
# ### Split notes on newline characters
# + colab={} colab_type="code" id="hWByIkj3yDcu"
corpus = [note.split('\n') for note in corpus]
corpus[:10]
# + [markdown] colab_type="text" id="WtEAYjXgBoT-"
# ### Collapse the nested list structure from splitting on newline characters
# + colab={} colab_type="code" id="UJPgUUXKypJR"
corpus = [split_note for note in corpus for split_note in note if len(split_note) > 10]
print('Number of notes after merging sublists:', len(corpus), '\n')
corpus[:10]
# + [markdown] colab_type="text" id="O6oJ5NW4Bt4K"
# ### Drop any notes that contain PHI tags
# + colab={} colab_type="code" id="eRClYGBuzG-O"
phi_pattern = '(\[\*\*(.*)\*\*\])'
corpus = [note for note in corpus if re.search(phi_pattern, note) is None]
print('Number of notes after removing any note that contains a PHI tag:', len(corpus), '\n')
corpus[:10]
# + [markdown] colab_type="text" id="fOb39w9uePjn"
# ### Convert all notes to lower case
# + colab={} colab_type="code" id="RYtvhwviLjZ5"
corpus = [note.lower() for note in corpus]
corpus[:10]
# + [markdown] colab_type="text" id="XCr_7FSPCmgn"
# ### Preprocessing methods
# + colab={} colab_type="code" id="c4RFKRQT_KZZ"
class LanguageIndex():
def __init__(self, lang):
self.lang = lang
self.word2idx = {}
self.idx2word = {}
self.vocab = set()
self.create_index()
def create_index(self):
for phrase in self.lang:
self.vocab.update(phrase.split(' '))
self.vocab = sorted(self.vocab)
self.word2idx["<pad>"] = 0
self.idx2word[0] = "<pad>"
for i,word in enumerate(self.vocab):
self.word2idx[word] = i + 1
self.idx2word[i+1] = word
def max_length(t):
return max(len(i) for i in t)
def clean_special_chars(text):
punct='#$%&*+-/<=>@[\\]^_`{|}~\t\n'
for p in punct:
text = text.replace(p, '')
return text
def generate_ngrams(corpus):
processed_corpus = [clean_special_chars(line) for line in corpus]
output = []
for token_list in processed_corpus:
for i in range(1, len(token_list)):
x_ngram = '<start> ' + token_list[:i+1] + ' <end>'
y_ngram = '<start> ' + token_list[i+1:] + ' <end>'
output.append([x_ngram, y_ngram])
return output
# + [markdown] colab_type="text" id="fg6v1aczI40o"
# ### Generate n-gram pairs with prefixes and suffixes for teacher forcing technique
# + colab={} colab_type="code" id="NlxanDbbIi_o"
pairs = generate_ngrams(corpus)
dummy_df = pd.DataFrame(pairs, columns=['input (i)','output (o)'])
print('Shape of n-gram pairs: {}\n'.format(dummy_df.shape))
dummy_df[:5]
# + [markdown] colab_type="text" id="GRihntexL9gs"
# ### Convert words to index integers for input / output
# + colab={} colab_type="code" id="C7wzZ_PkL7mB"
out_lang = LanguageIndex(o for i, o in pairs)
in_lang = LanguageIndex(i for i, o in pairs)
# + [markdown] colab_type="text" id="OE6GMI-1MLDO"
# ### Generate word embeddings for input / output
# + colab={} colab_type="code" id="qAVp4odXMKZa"
input_data = [[in_lang.word2idx[word] for word in i.split(' ')] for i, o in pairs]
output_data = [[out_lang.word2idx[word] for word in o.split(' ')] for i, o in pairs]
print('input_data:', input_data[0])
print('output_data:', output_data[0])
# + [markdown] colab_type="text" id="6I7adoyQPaWt"
# ### Calculate the max length of tokens for input and output
# + colab={} colab_type="code" id="Z4A6lxE2OiOE"
maxlen_in = max_length(input_data)
maxlen_out = max_length(output_data)
print('maxlen_in:', maxlen_in)
print('maxlen_out:', maxlen_out)
# + [markdown] colab_type="text" id="Gjigk49sPd4O"
# ### Add padding to the input and output
# + colab={} colab_type="code" id="mBurnHy8M0V4"
input_data = pad_sequences(input_data, maxlen=maxlen_in, padding="post")
output_data = pad_sequences(output_data, maxlen=maxlen_out, padding="post")
print('input_data (padded):', input_data[0], '\n')
print('output_data (padded):', output_data[0])
# + [markdown] colab_type="text" id="pK_jatyC89xC"
# ### Create target data
# + colab={} colab_type="code" id="p8h4JZyj4-HE"
target_data = [[output_data[n][i+1] for i in range(len(output_data[n])-1)] for n in range(len(output_data))]
target_data = pad_sequences(target_data, maxlen=maxlen_out, padding="post")
print('target_data:', target_data[:3])
print('target_data (padded:', target_data[:3])
# + [markdown] colab_type="text" id="k3ky45tx_TV5"
# ### Reshape target_data
# + colab={} colab_type="code" id="TV4cR-AP_SpP"
target_shape = (target_data.shape[0], target_data.shape[1], 1)
print('Using target shape:', target_shape)
target_data = target_data.reshape(target_shape)
# + [markdown] colab_type="text" id="_quw3j-t_Jjh"
# ### Shuffle the data
# + colab={} colab_type="code" id="0ptEcWP8_Jsm"
p = np.random.permutation(len(input_data))
input_data = input_data[p]
output_data = output_data[p]
target_data = target_data[p]
print('input_data:', input_data)
print('output_data:', output_data)
print('target_data:', target_data)
# + [markdown] colab_type="text" id="lXEOVEhp0mXG"
# ### Configuration parameters
# + colab={} colab_type="code" id="F1OQjAzK611B"
BUFFER_SIZE = len(input_data)
BATCH_SIZE = 128
embedding_dim = 300
units = 128
vocab_in_size = len(in_lang.word2idx)
vocab_out_size = len(out_lang.word2idx)
loss = 'sparse_categorical_crossentropy'
metrics = ['sparse_categorical_accuracy']
# + [markdown] colab_type="text" id="z65NY6IU0oYs"
# ### Build model
# + colab={} colab_type="code" id="aO4yCHXN7iPb"
# Create the Encoder layers first.
encoder_inputs = Input(shape=(maxlen_in,))
encoder_emb = Embedding(input_dim=vocab_in_size, output_dim=embedding_dim)
# Use this if you dont need Bidirectional LSTM
# encoder_lstm = CuDNNLSTM(units=units, return_sequences=True, return_state=True)
# encoder_out, state_h, state_c = encoder_lstm(encoder_emb(encoder_inputs))
encoder_lstm = Bidirectional(CuDNNLSTM(units=units, return_sequences=True, return_state=True))
encoder_out, fstate_h, fstate_c, bstate_h, bstate_c = encoder_lstm(encoder_emb(encoder_inputs))
state_h = Concatenate()([fstate_h,bstate_h])
state_c = Concatenate()([bstate_h,bstate_c])
encoder_states = [state_h, state_c]
# Now create the Decoder layers.
decoder_inputs = Input(shape=(None,))
decoder_emb = Embedding(input_dim=vocab_out_size, output_dim=embedding_dim)
decoder_lstm = CuDNNLSTM(units=units*2, return_sequences=True, return_state=True)
decoder_lstm_out, _, _ = decoder_lstm(decoder_emb(decoder_inputs), initial_state=encoder_states)
# Two dense layers added to this model to improve inference capabilities.
decoder_d1 = Dense(units, activation="relu")
decoder_d2 = Dense(vocab_out_size, activation="softmax")
decoder_out = decoder_d2(Dropout(rate=.2)(decoder_d1(Dropout(rate=.2)(decoder_lstm_out))))
# Finally, create a training model which combines the encoder and the decoder.
# Note that this model has three inputs:
model = Model(inputs=[encoder_inputs, decoder_inputs], outputs=decoder_out)
opt = tf.train.AdamOptimizer()
# We'll use sparse_categorical_crossentropy so we don't have to expand decoder_out into a massive one-hot array.
model.compile(optimizer=opt, loss=loss, metrics=metrics)
model.summary()
# + [markdown] colab_type="text" id="PtnVKzzv0thn"
# ### Train model
# + colab={} colab_type="code" id="b25pCbWI7nG5"
epochs = 10
N = 100000
history = model.fit([input_data[:N], output_data[:N]], target_data[:N],
batch_size=BATCH_SIZE,
epochs=epochs,
validation_split=0.2)
# + [markdown] colab_type="text" id="mQwZq-Ed00H9"
# ### Plot training vs. validation loss for signs of overfitting
# + colab={} colab_type="code" id="ey-ray5o8pZU"
plt.plot(history.history['loss'], label="Training loss")
plt.plot(history.history['val_loss'], label="Validation loss")
plt.legend()
plt.show()
# + [markdown] colab_type="text" id="4gb1niLf053-"
# ### Create encoder model
# + colab={} colab_type="code" id="5xbhHIls8reI"
# Create the encoder model from the tensors we previously declared.
encoder_model = Model(encoder_inputs, [encoder_out, state_h, state_c])
# Generate a new set of tensors for our new inference decoder. Note that we are using new tensors,
# this does not preclude using the same underlying layers that we trained on. (e.g. weights/biases).
inf_decoder_inputs = Input(shape=(None,), name="inf_decoder_inputs")
# We'll need to force feed the two state variables into the decoder each step.
state_input_h = Input(shape=(units*2,), name="state_input_h")
state_input_c = Input(shape=(units*2,), name="state_input_c")
decoder_res, decoder_h, decoder_c = decoder_lstm(
decoder_emb(inf_decoder_inputs),
initial_state=[state_input_h, state_input_c])
inf_decoder_out = decoder_d2(decoder_d1(decoder_res))
inf_model = Model(inputs=[inf_decoder_inputs, state_input_h, state_input_c],
outputs=[inf_decoder_out, decoder_h, decoder_c])
# + [markdown] colab_type="text" id="sHFBCwfF5J2F"
# ### Methods for inference
# + colab={} colab_type="code" id="ZyBX54qc8tX1"
def sentence_to_vector(sentence, lang):
"""
Converts the given sentence (just a string) into a vector of word IDs
Output is 1-D: [timesteps/words]
"""
pre = sentence
vec = np.zeros(maxlen_in)
sentence_list = [lang.word2idx[s] for s in pre.split(' ')]
for i,w in enumerate(sentence_list):
vec[i] = w
return vec
def translate(input_sentence, infenc_model, infmodel):
"""
Given an input string, an encoder model (infenc_model)
and a decoder model (infmodel).
"""
sv = sentence_to_vector(input_sentence, in_lang)
sv = sv.reshape(1,len(sv))
[emb_out, sh, sc] = infenc_model.predict(x=sv)
i = 0
start_vec = out_lang.word2idx["<start>"]
stop_vec = out_lang.word2idx["<end>"]
cur_vec = np.zeros((1,1))
cur_vec[0,0] = start_vec
cur_word = "<start>"
output_sentence = ""
while cur_word != "<end>" and i < (maxlen_out-1):
i += 1
if cur_word != "<start>":
output_sentence = output_sentence + " " + cur_word
x_in = [cur_vec, sh, sc]
[nvec, sh, sc] = infmodel.predict(x=x_in)
cur_vec[0,0] = np.argmax(nvec[0,0])
cur_word = out_lang.idx2word[np.argmax(nvec[0,0])]
return output_sentence
# + [markdown] colab_type="text" id="8f3YKGwQ5FOu"
# ### Run tests to see how the model performs (we want inference < 100ms)
# + colab={} colab_type="code" id="15CHnwXF8vxd"
# Note that only words that we've trained the model on will be available, otherwise you'll get an error.
test = [
'discha', #arge summary
'left v', #entricular hypertrophy
'no ch', #ange from previous
'ventr', #ricular paced
'no sig', #nificant change
'previ', #ious tracing
'no ma', #ajor change
'sinu', #s rhythm
'R wav', #e progression,
'hydroc', #hlorothiazide
]
output = []
for t in test:
input_seq = t.lower()
t0 = datetime.now()
pred_seq = translate(t.lower(), encoder_model, inf_model)
t1 = datetime.now()
print('Inference time:', (t1-t0).total_seconds())
output.append({"Input Sequence": input_seq, "Predicted Sequence": pred_seq})
results_df = pd.DataFrame.from_dict(output)
results_df.head(len(test))
# + [markdown] colab_type="text" id="7e3YP0_X4SaW"
# ### Save the model to JSON and weights to H5 (change save_path to your location)
# + colab={} colab_type="code" id="mArZClfFeAOE"
save_path = '/content/drive/My Drive/3 Reference/TypeAssist/model_1'
if os.path.exists('{}.json'.format(save_path)):
raise BaseException('WARNING. Save path exists. Please increment the model number.')
# serialize model to JSON
# the keras model which is trained is defined as 'model' in this example
model_json = inf_model.to_json()
with open('{}.json'.format(save_path), 'w') as f:
f.write(model_json)
# serialize weights to HDF5
inf_model.save_weights('{}.h5'.format(save_path))
# + colab={} colab_type="code" id="u5mh_DiDHVwQ"
| examples/TypeAssist_Training.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Dependencies
import requests
import json
import gmaps
import numpy as np
import pandas as pd
import requests
import time
from matplotlib import pyplot as plt
# Google developer API key
from config import gkey
# -
#read the weather file
weather_data_df = pd.read_csv('weather_data.csv')
weather_data_df.dropna()
weather_data_df.head()
# +
# Store latitude and longitude in locations
locations = weather_data_df[["Lat", "Lng"]]
# Fill NaN values and convert to float
humidity = weather_data_df["Humidity"].astype(float)
# +
# Plot Heatmap
fig = gmaps.figure(center = [0,0] ,zoom_level = 2)
# Create heat layer
heat_layer = gmaps.heatmap_layer(locations, weights=humidity,
dissipating=False, max_intensity=200,
point_radius=3)
# Add layer
fig.add_layer(heat_layer)
# Display figure
fig
# -
# Configurate the perfect weather condition
temp=weather_data_df.loc[(weather_data_df['Temperature'] >= 24)&(weather_data_df['Temperature'] <= 35)]
wind=temp.loc[(temp['Wind Speed']<=10)]
cloudiness=wind.loc[(wind['Cloudiness']==0)]
cloudiness.count()
# +
# create hotel file
hotel_df = cloudiness.reset_index(drop=True)
hotel_df["Hotel Name"] = ""
# parameters
params = {
"radius": 50000,
"types": "hotel",
"keyword": "hotels",
"key": gkey
}
# Use the lat/lng we recovered to identify hotels
for index, row in hotel_df.iterrows():
# latitudes and longitudes from df
lat = row["Lat"]
lng = row["Lng"]
# change location each iteration while leaving original params in place
params["location"] = f"{lat},{lng}"
# Use the search term: "Hotels" and our lat/lng
base_url = "https://maps.googleapis.com/maps/api/place/nearbysearch/json"
# make request and print url
hotel_name = requests.get(base_url, params=params)
# convert to json
hotel_name = hotel_name.json()
# Since some data may be missing we incorporate a try-except to skip any that are missing a data point.
try:
hotel_df.loc[index, "Hotel Name"] = hotel_name["results"][0]["name"]
except (KeyError, IndexError):
print("Missing field/result... skipping.")
hotel_df
# +
locations = hotel_df[["Lat", "Lng"]]
figure_layout = {
'width': '400px',
'height': '300px',
'border': '1px solid black',
'padding': '1px',
'margin': '0 auto 0 auto'
}
fig = gmaps.figure(layout=figure_layout)
# Assign the marker layer to a variable
markers = gmaps.marker_layer(locations)
# Add the layer to the map
fig.add_layer(heat_layer)
fig.add_layer(markers)
fig
# -
| VacationPY/VacationPY.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # 5 - Train and Classify on original data
# ## 0 - Imports
# +
import json
from pandas import DataFrame
from ydata.connectors import LocalConnector
from ydata.connectors.filetype import FileType
from ydata.utils.formats import read_json
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score
# -
# ## 1 - Read Data
# +
# Initialize the YData's connector
connector = LocalConnector()
# Read the data
orig_df = connector.read_file('data_processed.csv', file_type = FileType.CSV).to_pandas()
# -
# ## 2 - Classification
# ### 2.1 - Split Data
# Split into train and test
x_train, x_test = train_test_split(orig_df, random_state=6)
# ### 2.2 - Train and Predict
# Initialize Classifier and predict results
orig_tree_clf = DecisionTreeClassifier(random_state=4)
orig_tree_clf.fit(x_train.drop('Unusual', axis=1), x_train['Unusual'])
preds = orig_tree_clf.predict(x_test.drop('Unusual', axis=1))
# ### 2.3 - Calculate Metrics
# Calculate the scores
acc = accuracy_score(x_test['Unusual'].values, preds)
f1 = f1_score(x_test['Unusual'].values, preds)
recall = recall_score(x_test['Unusual'].values, preds)
precision = precision_score(x_test['Unusual'].values, preds)
# ## 3 - Create Artifact
# +
# Create Artifact. The table with the metrics will be shown on the "Run Output" section of the "Runs".
metrics = {
'metrics': [
{
'name': 'Accuracy-score',
'numberValue': acc,
'format': 'PERCENTAGE'
},
{
'name': 'F1-score',
'numberValue': f1,
'format': 'PERCENTAGE'
},
{
'name': 'Recall',
'numberValue': recall,
'format': 'PERCENTAGE'
},
{
'name': 'Precision',
'numberValue': precision,
'format': 'PERCENTAGE'
}
]
}
with open("mlpipeline-metrics.json", 'w') as f:
json.dump(metrics, f)
# +
from sklearn.metrics import confusion_matrix
pos_neg = confusion_matrix(x_test['Unusual'].values, preds).ravel()
matrix = [
['normal', 'normal', pos_neg[0]],
['normal', 'unusual', pos_neg[1]],
['unusual', 'normal', pos_neg[2]],
['unusual', 'unusual', pos_neg[3]]
]
df = DataFrame(matrix,columns=['target','predicted','count'])
metadata = {
"outputs": [
{
"type": "confusion_matrix",
"format": "csv",
"schema": [
{
"name": "target",
"type": "CATEGORY"
},
{
"name": "predicted",
"type": "CATEGORY"
},
{
"name": "count",
"type": "NUMBER"
}
],
"source": df.to_csv(header=False, index=False),
"storage": "inline",
"labels": [
"normal",
"unusual"
]
}
]
}
with open('mlpipeline-ui-metadata.json', 'w') as f:
json.dump(metadata, f)
# -
# ## 4 - Store Data
# Pass onto the next pipeline the test set
x_test.index.name = 'test_ind'
x_test.reset_index(inplace=True)
connector.write_file(data=x_test, path='test_set.csv', index=True)
| 5 - synthetic-data-applications/regular-tabular/mobile_network-anomaly_detection/5_train_and_detect.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
import tensorflow as tf
from keras.preprocessing.image import ImageDataGenerator
train_datagen = ImageDataGenerator(
rescale=1./255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True
)
train_generator = train_datagen.flow_from_directory(
'../data/training_set/',
target_size=(64, 64),
batch_size=64,
class_mode='binary'
)
test_datagen = ImageDataGenerator(rescale=1./255)
test_generator = test_datagen.flow_from_directory(
'../data/test_set/',
target_size=(64, 64),
batch_size=64,
class_mode='binary'
)
from keras.models import Sequential
from keras.layers import Dense, Conv2D, MaxPool2D, Flatten
cnn = Sequential()
cnn.add(Conv2D(filters=32, input_shape=[64, 64, 3], kernel_size=3, activation='relu'))
cnn.add(MaxPool2D(pool_size=2, strides=2))
cnn.add(Conv2D(filters=32, kernel_size=3, activation='relu'))
cnn.add(MaxPool2D(pool_size=2, strides=2))
cnn.add(Flatten())
cnn.add(Dense(units=128, activation='relu'))
cnn.add(Dense(units=1, activation='sigmoid'))
cnn.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
history = cnn.fit(x=train_generator, validation_data=test_generator, epochs=20)
cnn.summary()
import matplotlib.pyplot as plt
# %matplotlib inline
plt.figure(figsize=(12,8))
plt.gca().set_facecolor('k')
plt.plot(history.history['loss'], 'o-', color='red', label='loss')
plt.plot(history.history['accuracy'], 'o-', color='green', label='accuracy')
plt.xlabel('epoch', fontsize=20)
plt.ylabel('loss', fontsize=20)
plt.yticks(fontsize=16)
plt.xticks(fontsize=16)
plt.legend();
import numpy as np
from keras.preprocessing import image
print(train_generator.class_indices)
test_image = image.load_img('../data/single_prediction/cat_or_dog_1.jpg', target_size=(64, 64))
test_image = image.img_to_array(test_image)
test_image = np.expand_dims(test_image, axis=0)
result = cnn.predict(test_image)
print(result)
| notebooks/convolutional_neural_network.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: VPython
# language: python
# name: vpython
# ---
# +
import ipywidgets as wd
from vpython import *
# This version uses a Jupyter notebook menu
# See Textures2 for a version that uses a VPython menu
scene.width = 600
scene.height = 600
show = 'box'
last_show = show
R = 0.4 # radius of sphere
D = 0.7 # size of box
texs = [textures.flower, textures.granite, textures.gravel, textures.metal, textures.rock, textures.rough,
textures.rug, textures.stones, textures.stucco, textures.wood, textures.wood_old, textures.earth]
bumps = [None, None, bumpmaps.gravel, None, bumpmaps.rock, None, None,
bumpmaps.stones, bumpmaps.stucco, None, bumpmaps.wood_old]
texnames = ['flower', 'granite', 'gravel', 'metal', 'rock', 'rough', 'rug', 'stones', 'stucco', 'wood', 'wood_old', 'earth']
bumpnames = [ None, None, 'gravel', None, 'rock', None, None, 'stones', 'stucco', None, 'wood_old']
labels = []
def erase():
objects = scene.objects
for obj in objects:
obj.visible = False
def show_object(index, x, y):
T = texs[index]
B = None
# Bump maps aren't very salient unless one moves the light or rotates the object,
# so don't bother with bump maps unless there's an option to move the light or object.
#if (bumps[index] !== null) B = bumpmaps[bumps[index]]
if show == 'box':
c = box(pos=vec(x,y,0), size=D*vec(1,1,1))
elif show == 'sphere':
c = sphere(pos=vec(x,y,0), size=D*vec(1,1,1))
elif show == 'cylinder':
c = cylinder(pos=vec(x-D/2,y,0), size=D*vec(1,1,1))
elif show == 'cone':
c = cone(pos=vec(x-D/2,y,0), size=D*vec(1,1,1))
elif show == 'pyramid':
c = pyramid(pos=vec(x-D/2,y,0), size=D*vec(1,1,1))
c.index = index
c.shininess = 0
c.texture = dict(file=T, bumpmap=B)
labels.append(label(pos=vec(x,y-.5,0), box=0, text='textures.'+texnames[index]))
def start_setup():
scene.range = 2.2
scene.fov = 0.2
scene.center = vec(1.5,2,0)
scene.forward = vec(0,0,-1)
erase()
#scene.visible = False
index = 0
y = 3.3
while y > 0:
for x in range(4):
if index >= len(texs): break;
show_object(index, x, y)
index += 1
y -= 1.3
def end_setup():
#scene.visible = True
scene.title = 'Click an object to enlarge it.'
def setup():
start_setup()
end_setup()
start_setup()
#scene.caption = "Loading textures..."
#scene.waitfor("textures") # not yet implemented in Jupyter VPython
m = wd.Dropdown(options=['box', 'sphere', 'cylinder', 'cone', 'pyramid'], value='box',
description="Change the type of object: ")
container = wd.HBox(children=[m])
display(container)
def m_handler(s):
global show
show = s['new']
m.observe(m_handler, names='value')
end_setup()
hit = None
clicked = False
def click(ev):
global hit, clicked
hit = scene.mouse.pick
clicked = True
scene.bind("click", click)
def single_object(index):
erase()
scene.center = vec(0,-.1*R,0)
scene.range = 1.5*R
show_object(index, 0, 0)
scene.title = 'Click anywhere to see all textures.'
picked = None
while True:
rate(30)
if show != last_show:
last_show = show
if picked != None:
single_object(picked.index)
else:
setup()
if clicked:
clicked = False
if picked != None:
picked = None
setup()
elif picked == None and hit != None:
picked = hit
hit = None
single_object(picked.index)
# -
| Demos/Textures1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Set project root
import os
os.chdir('./..')
# Setting locale for pretty printing
import locale
locale.setlocale(locale.LC_ALL, '')
# -
# Importing something that can be useful, I hope
from Utils.Data.Data import get_feature, get_dataset, get_dictionary
import matplotlib.pyplot as plt
plt.rcParams['figure.dpi'] = 110
import pandas as pd
import numpy as np
# # Train dataset analysis
columns = [
"mapped_feature_tweet_id",
"tweet_feature_number_of_video",
"tweet_feature_number_of_gif",
"tweet_feature_number_of_photo",
"tweet_feature_number_of_media",
"mapped_feature_tweet_media"
]
dataset_id = "train"
# Load the data
dataframe = get_dataset(columns, dataset_id)
dataframe
dataframe.info()
# + active=""
# dataframe.memory_usage(deep=True)
# + active=""
# Index 128
# mapped_feature_tweet_id 1184601904
# tweet_feature_number_of_video 1184601904
# tweet_feature_number_of_gif 1184601904
# tweet_feature_number_of_photo 1184601904
# tweet_feature_number_of_media 1184601904
# mapped_feature_tweet_media 8348286792
# dtype: int64
# -
# ## Yeah! Array sucks... 8GB
dataframe[:50]
unique_tweet_dataframe = dataframe.drop_duplicates("mapped_feature_tweet_id")
total_amount_of_photo = unique_tweet_dataframe['tweet_feature_number_of_photo'].sum()
total_amount_of_video = unique_tweet_dataframe['tweet_feature_number_of_video'].sum()
total_amount_of_gif = unique_tweet_dataframe['tweet_feature_number_of_gif'].sum()
total_amount_of_media = unique_tweet_dataframe['tweet_feature_number_of_media'].sum()
print(f"The amount of photos are {total_amount_of_photo}, that are {total_amount_of_photo/total_amount_of_media:.4f} of all media")
print(f"The amount of photos are {total_amount_of_video}, that are {total_amount_of_video/total_amount_of_media:.4f} of all media")
print(f"The amount of photos are {total_amount_of_gif}, that are {total_amount_of_gif/total_amount_of_media:.4f} of all media")
plt.pie([total_amount_of_photo, total_amount_of_video, total_amount_of_gif], labels=["Photo", "Video", "GIF"])
plt.show()
unique_tweet_dataframe[['tweet_feature_number_of_video']].groupby('tweet_feature_number_of_video').size()
unique_tweet_dataframe[['tweet_feature_number_of_gif']].groupby('tweet_feature_number_of_gif').size()
unique_tweet_dataframe[['tweet_feature_number_of_media']].groupby('tweet_feature_number_of_media').size()
aggregate_result = pd.DataFrame(unique_tweet_dataframe[['tweet_feature_number_of_photo']].groupby("tweet_feature_number_of_photo").size())
print(aggregate_result)
plot = aggregate_result.plot(kind="bar")
plot.set_xlabel('Number of Photo')
plot.set_ylabel('Number of tweet with X photo')
aggregate_result = pd.DataFrame(unique_tweet_dataframe[['tweet_feature_number_of_video']].groupby('tweet_feature_number_of_video').size())
print(aggregate_result)
plot = aggregate_result.plot(kind="bar")
plot.set_xlabel('Number of Video')
plot.set_ylabel('Number of tweet with X video')
aggregate_result = pd.DataFrame(unique_tweet_dataframe[['tweet_feature_number_of_gif']].groupby("tweet_feature_number_of_gif").size())
print(aggregate_result)
plot = aggregate_result.plot(kind="bar")
plot.set_xlabel('Number of GIF')
plot.set_ylabel('Number of tweet with X gif')
aggregate_result = pd.DataFrame(unique_tweet_dataframe[['tweet_feature_number_of_media']].groupby("tweet_feature_number_of_media").size())
print(aggregate_result)
plot = aggregate_result.plot(kind="bar")
plot.set_xlabel('Number of Media')
plot.set_ylabel('Number of tweet with X media')
# ## Let's see if there's a correlation between number of media and the type of engagement involved in the engagement
del unique_tweet_dataframe
# +
# Load the engagement type
cols = [
"tweet_feature_engagement_is_like",
"tweet_feature_engagement_is_retweet",
"tweet_feature_engagement_is_reply",
"tweet_feature_engagement_is_comment",
"tweet_feature_engagement_is_negative"
]
# Load the data
dataframe = pd.concat([
dataframe[['tweet_feature_number_of_media']],
get_dataset(cols, dataset_id)
], axis=1)
dataframe['tweet_feature_engagement_is_positive'] = dataframe[[
"tweet_feature_engagement_is_like",
"tweet_feature_engagement_is_retweet",
"tweet_feature_engagement_is_reply",
"tweet_feature_engagement_is_comment"
]].sum(axis=1)
dataframe
# -
dataframe.info()
result = pd.DataFrame(dataframe.groupby("tweet_feature_number_of_media").sum())
result['n_engagements'] = pd.DataFrame(dataframe.groupby("tweet_feature_number_of_media").size())
result['like_ratio'] = result['tweet_feature_engagement_is_like'] / result['n_engagements']
result['retweet_ratio'] = result['tweet_feature_engagement_is_retweet'] / result['n_engagements']
result['reply_ratio'] = result['tweet_feature_engagement_is_reply'] / result['n_engagements']
result['comment_ratio'] = result['tweet_feature_engagement_is_comment'] / result['n_engagements']
result['negative_ratio'] = result['tweet_feature_engagement_is_negative'] / result['n_engagements']
# #### Plot for all type of engagement
plt.rcParams['figure.figsize'] = [15, 5]
plot = result[[
'like_ratio',
'reply_ratio',
'retweet_ratio',
'comment_ratio',
'negative_ratio'
]].plot(kind="bar")
plot.set_xlabel("Number of media in a tweet")
plot.set_ylabel("Probability it is involved in that type of engagement")
plt.rcParams['figure.figsize'] = [10, 5]
plot = result[[
'like_ratio'
]].plot(kind="bar")
plot.set_xlabel("Number of media in a tweet")
plot.set_ylabel("Like ratio")
plot.axhline(0.4392, color="red") # Computed before, see notebook that analyze the engagements
plt.rcParams['figure.figsize'] = [10, 5]
plot = result[[
'reply_ratio'
]].plot(kind="bar")
plot.set_xlabel("Number of media in a tweet")
plot.set_ylabel("Reply ratio")
plot.axhline(0.0274, color="red") # Computed before, see notebook that analyze the engagements
plt.rcParams['figure.figsize'] = [10, 5]
plot = result[[
'retweet_ratio'
]].plot(kind="bar")
plot.set_xlabel("Number of media in a tweet")
plot.set_ylabel("Retweet ratio")
plot.axhline(0.1131, color="red") # Computed before, see notebook that analyze the engagements
plt.rcParams['figure.figsize'] = [10, 5]
plot = result[[
'comment_ratio'
]].plot(kind="bar")
plot.set_xlabel("Number of media in a tweet")
plot.set_ylabel("Retweet with comment ratio")
plot.axhline(0.0078, color="red") # Computed before, see notebook that analyze the engagements
| Notebooks/Analysis_Media.ipynb |