code
stringlengths 38
801k
| repo_path
stringlengths 6
263
|
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Homework 2 - Berkeley STAT 157
#
# Handout 1/29/2019, due 2/5/2019 by 4pm in Git by committing to your repository.
from mxnet import nd, autograd, gluon
# # 1. Multinomial Sampling
#
# Implement a sampler from a discrete distribution from scratch, mimicking the function `mxnet.ndarray.random.multinomial`. Its arguments should be a vector of probabilities $p$. You can assume that the probabilities are normalized, i.e. tha they sum up to $1$. Make the call signature as follows:
#
# ```
# samples = sampler(probs, shape)
#
# probs : An ndarray vector of size n of nonnegative numbers summing up to 1
# shape : A list of dimensions for the output
# samples : Samples from probs with shape matching shape
# ```
#
# Hints:
#
# 1. Use `mxnet.ndarray.random.uniform` to get a sample from $U[0,1]$.
# 1. You can simplify things for `probs` by computing the cumulative sum over `probs`.
# +
def sampler(probs, shape):
## Add your codes here
return nd.zeros(shape)
# a simple test
sampler(nd.array([0.2, 0.3, 0.5]), (2,3))
# -
# # 2. Central Limit Theorem
#
# Let's explore the Central Limit Theorem when applied to text processing.
#
# * Download [https://www.gutenberg.org/ebooks/84](https://www.gutenberg.org/files/84/84-0.txt) from Project Gutenberg
# * Remove punctuation, uppercase / lowercase, and split the text up into individual tokens (words).
# * For the words `a`, `and`, `the`, `i`, `is` compute their respective counts as the book progresses, i.e.
# $$n_\mathrm{the}[i] = \sum_{j = 1}^i \{w_j = \mathrm{the}\}$$
# * Plot the proportions $n_\mathrm{word}[i] / i$ over the document in one plot.
# * Find an envelope of the shape $O(1/\sqrt{i})$ for each of these five words. (Hint, check the last page of the [sampling notebook](http://courses.d2l.ai/berkeley-stat-157/slides/1_24/sampling.pdf))
# * Why can we **not** apply the Central Limit Theorem directly?
# * How would we have to change the text for it to apply?
# * Why does it still work quite well?
# +
filename = gluon.utils.download('https://www.gutenberg.org/files/84/84-0.txt')
with open(filename) as f:
book = f.read()
print(book[0:100])
## Add your codes here
# -
# ## 3. Denominator-layout notation
#
# We used the numerator-layout notation for matrix calculus in class, now let's examine the denominator-layout notation.
#
# Given $x, y\in\mathbb R$, $\mathbf x\in\mathbb R^n$ and $\mathbf y \in \mathbb R^m$, we have
#
# $$
# \frac{\partial y}{\partial \mathbf{x}}=\begin{bmatrix}
# \frac{\partial y}{\partial x_1}\\
# \frac{\partial y}{\partial x_2}\\
# \vdots\\
# \frac{\partial y}{\partial x_n}
# \end{bmatrix},\quad
# \frac{\partial \mathbf y}{\partial {x}}=\begin{bmatrix}
# \frac{\partial y_1}{\partial x},
# \frac{\partial y_2}{\partial x},
# \ldots,
# \frac{\partial y_m}{\partial x}
# \end{bmatrix}
# $$
#
# and
#
# $$
# \frac{\partial \mathbf y}{\partial \mathbf{x}}
# =\begin{bmatrix}
# \frac{\partial \mathbf y}{\partial {x_1}}\\
# \frac{\partial \mathbf y}{\partial {x_2}}\\
# \vdots\\
# \frac{\partial \mathbf y}{\partial {x_3}}\\
# \end{bmatrix}
# =\begin{bmatrix}
# \frac{\partial y_1}{\partial x_1},
# \frac{\partial y_2}{\partial x_1},
# \ldots,
# \frac{\partial y_m}{\partial x_1}
# \\
# \frac{\partial y_1}{\partial x_2},
# \frac{\partial y_2}{\partial x_2},
# \ldots,
# \frac{\partial y_m}{\partial x_2}\\
# \vdots\\
# \frac{\partial y_1}{\partial x_n},
# \frac{\partial y_2}{\partial x_n},
# \ldots,
# \frac{\partial y_m}{\partial x_n}
# \end{bmatrix}
# $$
#
# Questions:
#
# 1. Assume $\mathbf y = f(\mathbf u)$ and $\mathbf u = g(\mathbf x)$, write down the chain rule for $\frac {\partial\mathbf y}{\partial\mathbf x}$
# 2. Given $\mathbf X \in \mathbb R^{m\times n},\ \mathbf w \in \mathbb R^n, \ \mathbf y \in \mathbb R^m$, assume $z = \| \mathbf X \mathbf w - \mathbf y\|^2$, compute $\frac{\partial z}{\partial\mathbf w}$.
# ## 4. Numerical Precision
#
# Given scalars `x` and `y`, implement the following `log_exp` function such that it returns
# $$-\log\left(\frac{e^x}{e^x+e^y}\right)$$.
def log_exp(x, y):
## add your solution here
pass
# Test your codes with normal inputs:
x, y = nd.array([2]), nd.array([3])
z = log_exp(x, y)
z
# Now implement a function to compute $\partial z/\partial x$ and $\partial z/\partial y$ with `autograd`
def grad(forward_func, x, y):
## Add your codes here
print('x.grad =', x.grad)
print('y.grad =', y.grad)
# Test your codes, it should print the results nicely.
grad(log_exp, x, y)
# But now let's try some "hard" inputs
x, y = nd.array([50]), nd.array([100])
grad(log_exp, x, y)
# Does your code return correct results? If not, try to understand the reason. (Hint, evaluate `exp(100)`). Now develop a new function `stable_log_exp` that is identical to `log_exp` in math, but returns a more numerical stable result.
# +
def stable_log_exp(x, y):
## Add your codes here
pass
grad(stable_log_exp, x, y)
# -
|
homeworks/homework2.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
from fractions import Fraction
def cantor(n):
endpoints = [[Fraction(0), Fraction(1)]]
next_split = []
for i in range(n):
while len(endpoints):
pair = endpoints.pop()
dist = pair[1] - pair[0]
split1 = [pair[0], pair[0] + dist/3]
split2 = [pair[1] - dist/3, pair[1]]
next_split += [split1, split2]
endpoints = next_split
next_split = []
# return list(map(lambda pair: [float(pair[0]), float(pair[1])], sorted(endpoints, key=lambda pair: pair[0])))
return sorted(endpoints, key=lambda pair: pair[0])
# + tags=[]
[pair for pair in cantor(100) if pair[0] <= 1/4 and pair[1] >= 1/4]
# -
math.floor(math.log(0.1, 3))
divmod(1, 3)
# +
import math
def intBaseConversion(x, base=3):
x_newBase = ""
buf_num = int(x)
if buf_num == 0:
return "0"
while buf_num != 0:
buf_num, dig = divmod(buf_num, base)
x_newBase = str(dig) + x_newBase
return x_newBase
def baseConversion( x=1, base=3, decimals=2 ):
x_dec_newbase = ""
x_dec = x - int(x)
for i in range(decimals+1):
x_dec *= base
buf_num = int(x_dec)
x_dec_newbase += intBaseConversion(buf_num, base=base)
x_dec -= int(x_dec)
return f"{intBaseConversion(x, base)}.{x_dec_newbase}"
baseConversion(1/3, base=8, decimals=10)
|
pyprojects/notebooks/cantor.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# ###Set up working directory
# cd ~/Desktop/SSUsearch/
# mkdir -p ./workdir
#check seqfile files to process in data directory (make sure you still remember the data directory)
# !ls ./data/test/data
# #README
#
# ## This part of pipeline search for the SSU rRNA gene fragments, classify them, and extract reads aligned specific region. It is also heavy lifting part of the whole pipeline (more cpu will help).
#
# ## This part works with one seqfile a time. You just need to change the "Seqfile" and maybe other parameters in the two cells bellow.
#
# ## To run commands, click "Cell" then "Run All". After it finishes, you will see "\*** pipeline runs successsfully :)" at bottom of this pape.
#
# ##If your computer has many processors, there are two ways to make use of the resource:
#
# 1. Set "Cpu" higher number.
#
# 2. make more copies of this notebook (click "File" then "Make a copy" in menu bar), so you can run the step on multiple files at the same time.
#
# (Again we assume the "Seqfile" is quality trimmed.)
#
# ###Here we will process one file at a time; set the "Seqfile" variable to the seqfile name to be be processed
# ###First part of seqfile basename (separated by ".") will be the label of this sample, so named it properly.
# e.g. for "/usr/local/notebooks/data/test/data/1c.fa", "1c" will the label of this sample.
Seqfile='./data/test/data/1c.fa'
# ###Other parameters to set
# +
Cpu='1' # number of maxixum threads for search and alignment
Hmm='./data/SSUsearch_db/Hmm.ssu.hmm' # hmm model for ssu
Gene='ssu'
Script_dir='./scripts'
Gene_model_org='./data/SSUsearch_db/Gene_model_org.16s_ecoli_J01695.fasta'
Ali_template='./data/SSUsearch_db/Ali_template.silva_ssu.fasta'
Start='577' #pick regions for de novo clustering
End='727'
Len_cutoff='100' # min length for reads picked for the region
Gene_tax='./data/SSUsearch_db/Gene_tax.silva_taxa_family.tax' # silva 108 ref
Gene_db='./data/SSUsearch_db/Gene_db.silva_108_rep_set.fasta'
Gene_tax_cc='./data/SSUsearch_db/Gene_tax_cc.greengene_97_otus.tax' # greengene 2012.10 ref for copy correction
Gene_db_cc='./data/SSUsearch_db/Gene_db_cc.greengene_97_otus.fasta'
# -
# first part of file basename will the label of this sample
import os
Filename=os.path.basename(Seqfile)
Tag=Filename.split('.')[0]
# +
import os
New_path = '{}:{}'.format('~/Desktop/SSUsearch/external_tools/bin/', os.environ['PATH'])
Hmm=os.path.abspath(Hmm)
Seqfile=os.path.abspath(Seqfile)
Script_dir=os.path.abspath(Script_dir)
Gene_model_org=os.path.abspath(Gene_model_org)
Ali_template=os.path.abspath(Ali_template)
Gene_tax=os.path.abspath(Gene_tax)
Gene_db=os.path.abspath(Gene_db)
Gene_tax_cc=os.path.abspath(Gene_tax_cc)
Gene_db_cc=os.path.abspath(Gene_db_cc)
os.environ.update(
{'PATH':New_path,
'Cpu':Cpu,
'Hmm':os.path.abspath(Hmm),
'Gene':Gene,
'Seqfile':os.path.abspath(Seqfile),
'Filename':Filename,
'Tag':Tag,
'Script_dir':os.path.abspath(Script_dir),
'Gene_model_org':os.path.abspath(Gene_model_org),
'Ali_template':os.path.abspath(Ali_template),
'Start':Start,
'End':End,
'Len_cutoff':Len_cutoff,
'Gene_tax':os.path.abspath(Gene_tax),
'Gene_db':os.path.abspath(Gene_db),
'Gene_tax_cc':os.path.abspath(Gene_tax_cc),
'Gene_db_cc':os.path.abspath(Gene_db_cc)})
# -
# !echo "*** make sure: parameters are right"
# !echo "Seqfile: $Seqfile\nCpu: $Cpu\nFilename: $Filename\nTag: $Tag"
# cd workdir
# mkdir -p $Tag.ssu.out
# +
### start hmmsearch
# + language="bash"
# echo "*** hmmsearch starting"
# time hmmsearch --incE 10 --incdomE 10 --cpu $Cpu \
# --domtblout $Tag.ssu.out/$Tag.qc.$Gene.hmmdomtblout \
# -o /dev/null -A $Tag.ssu.out/$Tag.qc.$Gene.sto \
# $Hmm $Seqfile
# echo "*** hmmsearch finished"
# -
# !python $Script_dir/get-seq-from-hmmout.py \
# $Tag.ssu.out/$Tag.qc.$Gene.hmmdomtblout \
# $Tag.ssu.out/$Tag.qc.$Gene.sto \
# $Tag.ssu.out/$Tag.qc.$Gene
# ### Pass hits to mothur aligner
# + language="bash"
# echo "*** Starting mothur align"
# cat $Gene_model_org $Tag.ssu.out/$Tag.qc.$Gene > $Tag.ssu.out/$Tag.qc.$Gene.RFadded
#
# # mothur does not allow tab between its flags, thus no indents here
# time mothur "#align.seqs(candidate=$Tag.ssu.out/$Tag.qc.$Gene.RFadded, template=$Ali_template, threshold=0.5, flip=t, processors=$Cpu)"
#
# rm -f mothur.*.logfile
# -
# ### Get aligned seqs that have > 50% matched to references
# !python $Script_dir/mothur-align-report-parser-cutoff.py \
# $Tag.ssu.out/$Tag.qc.$Gene.align.report \
# $Tag.ssu.out/$Tag.qc.$Gene.align \
# $Tag.ssu.out/$Tag.qc.$Gene.align.filter \
# 0.5
# !python $Script_dir/remove-gap.py $Tag.ssu.out/$Tag.qc.$Gene.align.filter $Tag.ssu.out/$Tag.qc.$Gene.align.filter.fa
# ### Search is done here (the computational intensive part). Hooray!
#
# - \$Tag.ssu.out/\$Tag.qc.\$Gene.align.filter:
# aligned SSU rRNA gene fragments
#
#
#
# - \$Tag.ssu.out/\$Tag.qc.\$Gene.align.filter.fa:
# unaligned SSU rRNA gene fragments
#
# ### Extract the reads mapped 150bp region in V4 (577-727 in *E.coli* SSU rRNA gene position) for unsupervised clustering
# +
# !python $Script_dir/region-cut.py $Tag.ssu.out/$Tag.qc.$Gene.align.filter $Start $End $Len_cutoff
# !mv $Tag.ssu.out/$Tag.qc.$Gene.align.filter."$Start"to"$End".cut.lenscreen $Tag.ssu.out/$Tag.forclust
# -
# ### Classify SSU rRNA gene seqs using SILVA
# + language="bash"
# rm -f $Tag.ssu.out/$Tag.qc.$Gene.align.filter.silva_taxa_family*.taxonomy
# mothur "#classify.seqs(fasta=$Tag.ssu.out/$Tag.qc.$Gene.align.filter.fa, template=$Gene_db, taxonomy=$Gene_tax, cutoff=50, processors=$Cpu)"
# mv $Tag.ssu.out/$Tag.qc.$Gene.align.filter.silva_taxa_family*.taxonomy \
# $Tag.ssu.out/$Tag.qc.$Gene.align.filter.wang.silva.taxonomy
# -
# !python $Script_dir/count-taxon.py \
# $Tag.ssu.out/$Tag.qc.$Gene.align.filter.wang.silva.taxonomy \
# $Tag.ssu.out/$Tag.qc.$Gene.align.filter.wang.silva.taxonomy.count
# !rm -f mothur.*.logfile
# ### Classify SSU rRNA gene seqs with Greengene for copy correction later
# + language="bash"
# rm -f $Tag.ssu.out/$Tag.qc.$Gene.align.filter.greengene_97_otus*.taxonomy
# mothur "#classify.seqs(fasta=$Tag.ssu.out/$Tag.qc.$Gene.align.filter.fa, template=$Gene_db_cc, taxonomy=$Gene_tax_cc, cutoff=50, processors=$Cpu)"
# mv $Tag.ssu.out/$Tag.qc.$Gene.align.filter.greengene_97_otus*.taxonomy \
# $Tag.ssu.out/$Tag.qc.$Gene.align.filter.wang.gg.taxonomy
# -
# !python $Script_dir/count-taxon.py \
# $Tag.ssu.out/$Tag.qc.$Gene.align.filter.wang.gg.taxonomy \
# $Tag.ssu.out/$Tag.qc.$Gene.align.filter.wang.gg.taxonomy.count
# !rm -f mothur.*.logfile
# check the output directory
# !ls $Tag.ssu.out
# ### This part of pipeline (working with one sequence file) finishes here. Next we will combine samples for community analysis (see unsupervised analysis).
#
# Following are files useful for community analysis:
#
# * 1c.577to727: aligned fasta file of seqs mapped to target region for de novo clustering
# * 1c.qc.ssu.align.filter: aligned fasta file of all SSU rRNA gene fragments
# * 1c.qc.ssu.align.filter.wang.gg.taxonomy: Greengene taxonomy (for copy correction)
# * 1c.qc.ssu.align.filter.wang.silva.taxonomy: SILVA taxonomy
# !echo "*** pipeline runs successsfully :)"
|
notebooks-pc-linux/ssu-search-Copy1.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from rdkit import Chem, DataStructs
from rdkit.Chem import PandasTools, AllChem
from molvecgen.vectorizers import SmilesVectorizer
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset
from torch.optim.lr_scheduler import ReduceLROnPlateau
import pandas, numpy, random
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
# To make simpler model, we setup a baseline, when molecular's mu > baseline, we set it to 1, otherwise 0
baseline = 2.3
# dataset
class SMILESMolDataset(Dataset):
def __init__(self, molecules, y, vectorizer):
self.molecules = molecules
self.y = y
self.vectorizer = vectorizer
def __len__(self):
return len(self.molecules)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
mols = self.molecules[idx]
#The vectorizer was written to work with batches,
#but PyTorch datasets unfortunately works with single samples
sample = self.vectorizer.transform([mols])[0]
label = float(self.y[idx] > baseline)
#print("self.y[idx]", self.y[idx], 'label', label)
target = torch.FloatTensor([label])
return sample, label, target
# +
# read qm9.csv to get data
dataset = pandas.read_csv('dataset/qm9.csv',skiprows=lambda i: i % 2 != 0 and i == 0,nrows=6000)
PandasTools.AddMoleculeColumnToFrame(dataset,'smiles','Molecule')
smivec = SmilesVectorizer(pad=1, leftpad=True, canonical=False, augment=True)
smivec.fit(dataset.Molecule.values, )
dataset[200:203]
# +
# x_data(SMILES/Molecule) is the data for Generator,but we don't use y_data, we will use random y;
# x_train, y_train are real examples, it will pass to Discriminator;
x_train, x_data, y_train, y_data = train_test_split(dataset[['smiles','Molecule']].values, dataset['mu'].values, test_size=0.5)
x_train = pd.DataFrame(x_train, columns=['smiles','Molecule'])
x_data = pd.DataFrame(x_data, columns=['smiles','Molecule'])
print('length of x_train:',len(x_train),'x_data',len(x_data),'y_train', len(y_train), 'y_data', len(y_data))
# +
# data statistics
max_mu = max(y_data)
min_mu = min(y_data)
number_of_data = len(y_data)
number1 = len(y_data[y_data > baseline])
number2 = len(y_data[y_data <= baseline])
print(' > baseline', number1, ' <= baseline', number2)
print("number_of_data", number_of_data, "max_mu", max_mu, "min_mu", min_mu)
#real_data.head(5)
#print(real_data['mu'])
# +
# functions to generate random data
def generate_random_seed_G(size):
random_data = torch.randn(size)
return random_data
def generate_random_value(size):
random_data = np.random.randint(0, baseline * 2,(1, size))
random_data = torch.FloatTensor(random_data)
return random_data.view(-1)
def generate_random_label(size):
random_data = np.random.randint(0, 2,(1, size))
random_data = torch.FloatTensor(random_data)
return random_data.view(-1)
class View(nn.Module):
def __init__(self, shape):
super().__init__()
self.shape = shape,
def forward(self, x):
return x.view(*self.shape)
print(generate_random_value(5))
print(generate_random_label(5))
# -
# dataset for Generator, we 'Molecule' data will be used and actually ignore y_data in Generator, instead we use
# random value, but keeping it simple, we shape one SMILESMolDataset class.
data_dataset = SMILESMolDataset(x_data['Molecule'], y_data, smivec)
print('data_dataset[10]', data_dataset[10], 'y_data', y_data[10] )
# Real examples
train_dataset = SMILESMolDataset(x_train['Molecule'], y_train, smivec)
train_dataset[10]
# +
# return a random SMILES
def generate_random_seed(size):
index_list = np.random.randint(1, number_of_data, (1, size))[0]
random_data = []
for i in index_list:
random_data.append( data_dataset[i] )
#random_data = data_dataset[10]
return random_data
generate_random_seed(2)[0][1]
# +
# Model setting
epochs = 16
dims = smivec.dims
batch_size = 1 # The mini_batch size during training
G_input_size = 100 # The Generator input data size
learning_rate_D = 0.00070 # The Discriminator initial learning rate for the optimizer
learning_rate_G = 0.00070 # The Generator initial learning rate for the optimizer
# -
# discriminator class
class Discriminator(nn.Module):
def __init__(self):
# initialise parent pytorch class
super().__init__()
length = dims[0]
number_tokens = dims[1] + 1 # add the label layer
self.model = nn.Sequential(
View(length * number_tokens),
nn.Linear(length * number_tokens, 200),
nn.LeakyReLU(0.02),
nn.LayerNorm(200),
nn.Linear(200, 1),
nn.Sigmoid()
)
self.loss_function = nn.MSELoss()
self.optimiser = torch.optim.Adam(self.parameters(), lr=learning_rate_D)
# monitor
self.counter = 0;
self.progress = []
pass
def forward(self, input_tensor, label_tensor):
x = torch.cat((input_tensor, label_tensor), -1)
return self.model(x)
def train(self, inputs, label_tensor, targets):
outputs = self.forward(inputs, label_tensor)
loss = self.loss_function(outputs, targets)
# monitor
self.counter += 1;
if (self.counter % 10 == 0):
self.progress.append(loss.item())
pass
if (self.counter % 1000 == 0):
print("counter = ", self.counter)
pass
self.optimiser.zero_grad()
loss.backward()
self.optimiser.step()
pass
# the plot to check convergent
def plot_progress(self):
df = pandas.DataFrame(self.progress, columns=['loss'])
df.plot(ylim=(0), figsize=(16,8), alpha=0.1, marker='.', grid=True, yticks=(0, 0.25, 0.5, 1.0, 5.0))
pass
pass
# +
# load real examples
from torch.utils.data import DataLoader
data_loader = torch.utils.data.DataLoader(train_dataset,batch_size=batch_size,shuffle=True, num_workers=4,drop_last=True )
# -
# set device to gpu
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
device
# +
# %%time
# test discriminator can separate real data from random noise
D = Discriminator()
D.to(device)
for smiles,label, target in data_loader:
# real examples
target = target.view(1).to(device)
label = label.float().to(device)
label = label.view(batch_size, 1, 1).repeat(1, smiles.shape[1], 1)
D.train(smiles.to(device).float(), label, target)
# noise data
fake_input = []
fake_label = []
for item in generate_random_seed(batch_size):
fake_input.append(item[0])
fake_label.append(item[1])
target = torch.FloatTensor(fake_label).to(device)
fake_input = torch.FloatTensor(fake_input).to(device)
fake_label = torch.FloatTensor(fake_label).to(device)
fake_label = fake_label.view(batch_size, 1, 1).repeat(1, fake_input.shape[1], 1)
D.train(fake_input, fake_label, target)
pass
# -
# check Discriminator progress and converge
D.plot_progress()
# +
# unit test
i = 0
for smiles,label,target in data_loader:
# real
label = label.float().to(device)
label = label.view(batch_size, 1, 1).repeat(1, smiles.shape[1], 1)
result = D.forward(smiles.to(device).float(), label)
print('label', label)
print('result',result)
i += 1
if (i >= 1):
break
pass
# +
# unit test
fake_input = []
fake_label = []
for item in generate_random_seed(batch_size):
fake_input.append(item[0])
fake_label.append(item[1])
target = torch.FloatTensor(fake_label).view(1,len(fake_label),1).to(device)
fake_input = torch.FloatTensor(fake_input).to(device)
fake_label = torch.FloatTensor(fake_label).to(device)
fake_label = fake_label.view(batch_size, 1, 1).repeat(1, fake_input.shape[1], 1)
result = D.forward(fake_input, fake_label)
print('fake_label', fake_label)
print('result', result)
# +
# generator class
class Generator(nn.Module):
def __init__(self, batch_size):
super().__init__()
self.batch_size = batch_size
self.model = nn.Sequential(
nn.Linear(G_input_size * 2, 200),
nn.LeakyReLU(0.02),
#nn.LayerNorm(200),
nn.Linear(200, number_of_data),
nn.Sigmoid()
)
self.optimiser = torch.optim.Adam(self.parameters(), lr=learning_rate_G)
# monitor
self.counter = 0;
self.progress = []
self.stop = False
pass
def forward(self, seed_tensor, label_tensor):
# combine seed and label
#print('Generator seed_tensor.shape', seed_tensor.shape, 'label_tensor', label_tensor.shape)
inputs = torch.cat((seed_tensor, label_tensor))
#print('after torch.cat', inputs.shape)
outputs = self.model(inputs)
#print('outputs', outputs)
return torch.argmax(F.softmax(outputs, dim=0))
def train(self, D, inputs, label_tensor, targets):
g_output = self.forward(inputs, label_tensor.repeat(G_input_size))
# get SMILES by g_output
g_smiles, _, _ = data_dataset[g_output]
# convert SMILES to tensor
g_input = torch.FloatTensor(g_smiles).to(device)
g_input = g_input.reshape(1, g_input.shape[0], g_input.shape[1])
# convert label_tensor dimension
g_label = label_tensor.repeat(1, g_input.shape[1], 1)
# pass to discriminator
d_output = D.forward(g_input, g_label)
loss = D.loss_function(d_output, targets)
# monitor
self.counter += 1;
if (self.counter % 10 == 0):
self.progress.append(loss.item())
pass
# zero gradients, perform a backward pass, update weights
self.optimiser.zero_grad()
loss.backward()
self.optimiser.step()
pass
# a function to predict by label, return one SMILES
def get_smiles(self, label):
label_tensor = torch.zeros((G_input_size))
for i in range(G_input_size):
label_tensor[i] = label
fake_input = torch.FloatTensor(generate_random_seed_G(G_input_size)).to(device)
fake_label = torch.FloatTensor(label_tensor).to(device)
idx = G.forward(fake_input, fake_label).detach().cpu().numpy()
return x_data['smiles'].iloc[idx]
def plot_progress(self):
df = pandas.DataFrame(self.progress, columns=['loss'])
df.plot(ylim=(0), figsize=(16,8), alpha=0.1, marker='.', grid=True, yticks=(0, 0.25, 0.5, 1.0, 5.0))
pass
pass
# +
# check the generator output is of the right type and shape
G = Generator(1)
G.to(device)
fake_input = torch.FloatTensor(generate_random_seed_G(G_input_size)).to(device)
fake_label = torch.FloatTensor(generate_random_value(G_input_size)).to(device)
idx = G.forward(fake_input, fake_label)
print('idx', idx)
data_dataset[idx]
# -
G.get_smiles(1.0)
# +
# %%time
# train Discriminator and Generator
D = Discriminator()
D.to(device)
G = Generator(batch_size)
G.to(device)
for epoch in range(epochs):
print ("epoch = ", epoch + 1)
# train Discriminator and Generator
for smiles,label,target in data_loader:
# train discriminator on true
target = torch.FloatTensor(torch.ones(smiles.shape[0])).to(device)
label = label.float().to(device)
label = label.view(batch_size, 1, 1).repeat(1, smiles.shape[1], 1)
D.train(smiles.to(device).float(), label, target)
# train discriminator on false
fake_input = torch.FloatTensor(generate_random_seed_G(G_input_size)).to(device)
fake_label = torch.FloatTensor(generate_random_label(1)).to(device)
idx = G.forward(fake_input, fake_label.repeat(G_input_size)).detach().cpu().numpy()
g_smiles, _, _ = data_dataset[idx]
g_input = torch.FloatTensor(g_smiles).to(device)
g_input = g_input.reshape(1, g_input.shape[0], g_input.shape[1])
g_label = fake_label.repeat(1, g_input.shape[1], 1)
target = torch.FloatTensor(torch.zeros(len(fake_label))).to(device)
D.train(g_input, g_label, target)
# train generator
fake_input = torch.FloatTensor(generate_random_seed_G(G_input_size)).to(device)
fake_label = torch.FloatTensor(generate_random_label(1)).to(device)
target = torch.FloatTensor([0.0]).to(device)
G.train(D, fake_input, fake_label, target)
pass
pass
# -
# check Discriminator progress and converge
D.plot_progress()
# test train result by getting a SMILES
G.plot_progress()
# a quick test the accuracy
qm9_dataset = pandas.read_csv('dataset/qm9.csv')
accuracy = []
number_of_verify = 100
for i in range(number_of_verify):
test_smiles = G.get_smiles(1.0)
real = qm9_dataset[qm9_dataset['smiles'] == test_smiles ]
#print('Test Result:', real.iat[0,0],real.iat[0,1], real.iat[0,1] > baseline)
accuracy.append(real.iat[0,1] > baseline)
np.mean(accuracy)
# +
# more test: Discriminator test by real examples
i = 0
for smiles,label,target in data_loader:
# real
target = torch.FloatTensor([1.0]).view(1,1,1).repeat(1, smiles.shape[0] ,1).to(device)
label = label.float().to(device)
label = label.view(batch_size, 1, 1).repeat(1, smiles.shape[1], 1)
print(D.forward(smiles.to(device).float(), label))
i += 1
if (i >= 10):
break
pass
# -
# more test: Discriminator test by random data
for i in range(10):
fake_input = []
fake_label = []
for item in generate_random_seed(batch_size):
fake_input.append(item[0])
fake_label.append(item[1])
fake_input = torch.FloatTensor(fake_input).to(device)
fake_label = torch.FloatTensor(fake_label).to(device)
fake_label = fake_label.view(batch_size, 1, 1).repeat(1, fake_input.shape[1], 1)
target = torch.FloatTensor([0.0]).view(1,1,1).repeat(1, fake_input.shape[0] ,1).to(device)
print(D.forward(fake_input, fake_label))
pass
# more test: generate SMILES
fake_input = torch.FloatTensor(generate_random_seed_G(G_input_size)).to(device)
fake_label = torch.FloatTensor(generate_random_value(G_input_size)).to(device)
result = G.forward(fake_input, fake_label).detach()
idx = result.cpu().numpy()
print('result', result, 'idx', idx, 'fake_input', fake_input.shape, 'fake_label.shape', fake_label.shape, 'result.shape', result.shape)
print(x_data['smiles'].iloc[idx])
# +
# save models
import pickle
from datetime import datetime
now = datetime.now()
date_time = now.strftime("%Y%m%d%H%M")
G_filename = 'pre_train_model/GAN19G' + date_time + '.sav'
print('save model to file:', G_filename)
pickle.dump(G, open(G_filename, 'wb'))
D_filename = 'pre_train_model/GAN19D' + date_time + '.sav'
print('save model to file:', D_filename)
pickle.dump(D, open(D_filename, 'wb'))
# -
#load models
'''
import pickle
Pkl_Filename = 'GAN19D202104150951.sav'
with open(Pkl_Filename, 'rb') as file:
D = pickle.load(file)
Pkl_Filename = 'GAN19G202104150951.sav'
with open(Pkl_Filename, 'rb') as file:
G = pickle.load(file)
'''
# +
# metrics function
from sklearn.metrics import *
def classification_metrics(Y_pred, Y_true, Y_auc_true, Y_auc_pred):
acc = accuracy_score(Y_true, Y_pred)
auc_ = roc_auc_score(Y_auc_true, Y_auc_pred)
precision = precision_score(Y_true, Y_pred, average='weighted')
recall = recall_score(Y_true, Y_pred, average='weighted')
f1score = f1_score(Y_true, Y_pred, average='weighted')
return acc, auc_, precision, recall, f1score
# -
qm9_dataset = pandas.read_csv('dataset/qm9.csv')
# +
# calculate metrics
y_true = []
prodict = []
y_auc_true = []
y_auc_prodict = []
number_of_verify = 100
for i in range(number_of_verify):
test_smiles = G.get_smiles(1.0)
real = qm9_dataset[qm9_dataset['smiles'] == test_smiles ]
#print('Test Result:', real.iat[0,0],real.iat[0,1], real.iat[0,1] > baseline)
y_true.append(float(real.iat[0,1] > baseline))
prodict.append(1.0)
y_auc_true.append(float(real.iat[0,1] > baseline))
y_auc_prodict.append(1.0)
# Fix auc issue
y_auc_true.append(0.0)
y_auc_prodict.append(0.0)
acc, auc_, precision, recall, f1score = classification_metrics(y_true, prodict, y_auc_true, y_auc_prodict)
print('acc', acc)
print('auc_', auc_)
print('precision', precision)
print('recall', recall)
print('f1score', f1score)
# +
# get model parameters
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
print(D)
print("Total parameters of D", count_parameters(D))
print(G)
print("Total parameters of G", count_parameters(G))
# -
|
GAN19_Model3.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="view-in-github"
# <a href="https://colab.research.google.com/github/NeuromatchAcademy/course-content/blob/master/tutorials/W1D3_ModelFitting/W1D3_Tutorial5.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] colab_type="text"
# # Tutorial 5: Model Selection: Bias-variance trade-off
# **Week 1, Day 3: Model Fitting**
#
# **By Neuromatch Academy**
#
# **Content creators**: <NAME>, <NAME>, <NAME> with help from <NAME>
#
# **Content reviewers**: <NAME>, <NAME>, <NAME>
#
#
#
# + [markdown] colab_type="text"
# ---
# #Tutorial Objectives
#
# This is Tutorial 5 of a series on fitting models to data. We start with simple linear regression, using least squares optimization (Tutorial 1) and Maximum Likelihood Estimation (Tutorial 2). We will use bootstrapping to build confidence intervals around the inferred linear model parameters (Tutorial 3). We'll finish our exploration of regression models by generalizing to multiple linear regression and polynomial regression (Tutorial 4). We end by learning how to choose between these various models. We discuss the bias-variance trade-off (Tutorial 5) and Cross Validation for model selection (Tutorial 6).
#
# In this tutorial, we will learn about the bias-variance tradeoff and see it in action using polynomial regression models.
#
# Tutorial objectives:
#
# * Understand difference between test and train data
# * Compare train and test error for models of varying complexity
# * Understand how bias-variance tradeoff relates to what model we choose
# + cellView="form" colab={"base_uri": "https://localhost:8080/", "height": 517} colab_type="code" outputId="8d20c293-126c-47d3-bdce-3c108f1da640"
#@title Video 1: Bias Variance Tradeoff
from IPython.display import YouTubeVideo
video = YouTubeVideo(id="NcUH_seBcVw", width=854, height=480, fs=1)
print("Video available at https://youtube.com/watch?v=" + video.id)
video
# + [markdown] colab_type="text"
# ---
# # Setup
# + cellView="both" colab={} colab_type="code"
import numpy as np
import matplotlib.pyplot as plt
# + cellView="form" colab={} colab_type="code"
#@title Figure Settings
# %config InlineBackend.figure_format = 'retina'
plt.style.use("https://raw.githubusercontent.com/NeuromatchAcademy/course-content/master/nma.mplstyle")
# + cellView="form" colab={} colab_type="code"
#@title Helper functions
def ordinary_least_squares(x, y):
"""Ordinary least squares estimator for linear regression.
Args:
x (ndarray): design matrix of shape (n_samples, n_regressors)
y (ndarray): vector of measurements of shape (n_samples)
Returns:
ndarray: estimated parameter values of shape (n_regressors)
"""
return np.linalg.inv(x.T @ x) @ x.T @ y
def make_design_matrix(x, order):
"""Create the design matrix of inputs for use in polynomial regression
Args:
x (ndarray): input vector of shape (n_samples)
order (scalar): polynomial regression order
Returns:
ndarray: design matrix for polynomial regression of shape (samples, order+1)
"""
# Broadcast to shape (n x 1) so dimensions work
if x.ndim == 1:
x = x[:, None]
#if x has more than one feature, we don't want multiple columns of ones so we assign
# x^0 here
design_matrix = np.ones((x.shape[0],1))
# Loop through rest of degrees and stack columns
for degree in range(1, order+1):
design_matrix = np.hstack((design_matrix, x**degree))
return design_matrix
def solve_poly_reg(x, y, max_order):
"""Fit a polynomial regression model for each order 0 through max_order.
Args:
x (ndarray): input vector of shape (n_samples)
y (ndarray): vector of measurements of shape (n_samples)
max_order (scalar): max order for polynomial fits
Returns:
dict: fitted weights for each polynomial model (dict key is order)
"""
# Create a dictionary with polynomial order as keys, and np array of theta
# (weights) as the values
theta_hats = {}
# Loop over polynomial orders from 0 through max_order
for order in range(max_order+1):
X = make_design_matrix(x, order)
this_theta = ordinary_least_squares(X, y)
theta_hats[order] = this_theta
return theta_hats
# + [markdown] colab_type="text"
# ---
# # Section 1: Train vs test data
#
# The data used for the fitting procedure for a given model is the **training data**. In tutorial 4, we computed MSE on the training data of our polynomial regression models and compared training MSE across models. An additional important type of data is **test data**. This is held-out data that is not used (in any way) during the fitting procedure. When fitting models, we often want to consider both the train error (the quality of prediction on the training data) and the test error (the quality of prediction on the test data) as we will see in the next section.
#
# + [markdown] colab_type="text"
# We will generate some noisy data for use in this tutorial using a similar process as in Tutorial 4.However, now we will also generate test data. We want to see how our model generalizes beyond the range of values see in the training phase. To accomplish this, we will generate x from a wider range of values ([-3, 3]). We then plot the train and test data together.
# + cellView="form" colab={"base_uri": "https://localhost:8080/", "height": 430} colab_type="code" outputId="2d24db63-27eb-46a1-9182-b21b64d622bd"
#@title
#@markdown Execute this cell to simulate both training and test data
### Generate training data
np.random.seed(0)
n_train_samples = 50
x_train = np.random.uniform(-2, 2.5, n_train_samples) # sample from a uniform distribution over [-2, 2.5)
noise = np.random.randn(n_train_samples) # sample from a standard normal distribution
y_train = x_train**2 - x_train - 2 + noise
### Generate testing data
n_test_samples = 20
x_test = np.random.uniform(-3, 3, n_test_samples) # sample from a uniform distribution over [-2, 2.5)
noise = np.random.randn(n_test_samples) # sample from a standard normal distribution
y_test = x_test**2 - x_test - 2 + noise
## Plot both train and test data
fig, ax = plt.subplots()
plt.title('Training & Test Data')
plt.plot(x_train, y_train, '.', markersize=15, label='Training')
plt.plot(x_test, y_test, 'g+', markersize=15, label='Test')
plt.legend()
plt.xlabel('x')
plt.ylabel('y');
# + [markdown] colab_type="text"
# ---
# # Section 2: Bias-variance tradeoff
#
# Finding a good model can be difficult. One of the most important concepts to keep in mind when modeling is the **bias-variance tradeoff**.
#
# **Bias** is the difference between the prediction of the model and the corresponding true output variables you are trying to predict. Models with high bias will not fit the training data well since the predictions are quite different from the true data. These high bias models are overly simplified - they do not have enough parameters and complexity to accurately capture the patterns in the data and are thus **underfitting**.
#
#
# **Variance** refers to the variability of model predictions for a given input. Essentially, do the model predictions change a lot with changes in the exact training data used? Models with high variance are highly dependent on the exact training data used - they will not generalize well to test data. These high variance models are **overfitting** to the data.
#
# In essence:
#
# * High bias, low variance models have high train and test error.
# * Low bias, high variance models have low train error, high test error
# * Low bias, low variance models have low train and test error
#
#
# As we can see from this list, we ideally want low bias and low variance models! These goals can be in conflict though - models with enough complexity to have low bias also tend to overfit and depend on the training data more. We need to decide on the correct tradeoff.
#
# In this section, we will see the bias-variance tradeoff in action with polynomial regression models of different orders.
#
# Graphical illustration of bias and variance.
# (Source: http://scott.fortmann-roe.com/docs/BiasVariance.html)
#
# 
#
# + [markdown] colab_type="text"
# We will first fit polynomial regression models of orders 0-5 on our simulated training data just as we did in Tutorial 4.
# + cellView="form" colab={} colab_type="code"
#@title
#@markdown Execute this cell to estimate theta_hats
max_order = 5
theta_hats = solve_poly_reg(x_train, y_train, max_order)
# + [markdown] colab_type="text"
# ### Exercise 1: Compute and compare train vs test error
#
# We will use MSE as our error metric again. Compute MSE on training data ($x_{train},y_{train}$) and test data ($x_{test}, y_{test}$) for each polynomial regression model (orders 0-5). Since you already developed code in T4 Exercise 4 for evaluating fit polynomials, we have ported that here into the function ``evaluate_poly_reg`` for your use.
#
# *Please think about after completing exercise before reading the following text! Do you think the order 0 model has high or low bias? High or low variance? How about the order 5 model?*
# + colab={} colab_type="code"
def evaluate_poly_reg(x, y, theta_hats, max_order):
""" Evaluates MSE of polynomial regression models on data
Args:
x (ndarray): input vector of shape (n_samples)
y (ndarray): vector of measurements of shape (n_samples)
theta_hats (dict): fitted weights for each polynomial model (dict key is order)
max_order (scalar): max order of polynomial fit
Returns
(ndarray): mean squared error for each order, shape (max_order)
"""
mse = np.zeros((max_order + 1))
for order in range(0, max_order + 1):
X_design = make_design_matrix(x, order)
y_hat = np.dot(X_design, theta_hats[order])
residuals = y - y_hat
mse[order] = np.mean(residuals ** 2)
return mse
# + colab={"base_uri": "https://localhost:8080/", "height": 447} colab_type="code" outputId="c242cb62-5e84-42a8-d01d-14ad4ce6475c"
def compute_mse(x_train,x_test,y_train,y_test,theta_hats,max_order):
"""Compute MSE on training data and test data.
Args:
x_train(ndarray): training data input vector of shape (n_samples)
x_test(ndarray): test data input vector of shape (n_samples)
y_train(ndarray): training vector of measurements of shape (n_samples)
y_test(ndarray): test vector of measurements of shape (n_samples)
theta_hats(dict): fitted weights for each polynomial model (dict key is order)
max_order (scalar): max order of polynomial fit
Returns:
ndarray, ndarray: MSE error on training data and test data for each order
"""
#######################################################
## TODO for students: calculate mse error for both sets
## Hint: look back at tutorial 5 where we calculated MSE
# Fill out function and remove
raise NotImplementedError("Student excercise: calculate mse for train and test set")
#######################################################
mse_train = ...
mse_test = ...
return mse_train, mse_test
#Uncomment below to test your function
# mse_train, mse_test = compute_mse(x_train, x_test, y_train, y_test, theta_hats, max_order)
fig, ax = plt.subplots()
width = .35
# ax.bar(np.arange(max_order + 1) - width / 2, mse_train, width, label="train MSE")
# ax.bar(np.arange(max_order + 1) + width / 2, mse_test , width, label="test MSE")
ax.legend()
ax.set(xlabel='Polynomial order', ylabel='MSE', title ='Comparing polynomial fits');
# + cellView="both" colab={"base_uri": "https://localhost:8080/", "height": 430} colab_type="code" outputId="959f79ef-68d2-474d-9e3e-0666a9f8e83e"
# to_remove solution
def compute_mse(x_train,x_test,y_train,y_test,theta_hats,max_order):
"""Compute MSE on training data and test data.
Args:
x_train(ndarray): training data input vector of shape (n_samples)
x_test(ndarray): test vector of shape (n_samples)
y_train(ndarray): training vector of measurements of shape (n_samples)
y_test(ndarray): test vector of measurements of shape (n_samples)
theta_hats(dict): fitted weights for each polynomial model (dict key is order)
max_order (scalar): max order of polynomial fit
Returns:
ndarray, ndarray: MSE error on training data and test data for each order
"""
mse_train = evaluate_poly_reg(x_train, y_train, theta_hats, max_order)
mse_test = evaluate_poly_reg(x_test, y_test, theta_hats, max_order)
return mse_train, mse_test
mse_train, mse_test = compute_mse(x_train, x_test, y_train, y_test, theta_hats, max_order)
with plt.xkcd():
fig, ax = plt.subplots()
width = .35
ax.bar(np.arange(max_order + 1) - width / 2, mse_train, width, label="train MSE")
ax.bar(np.arange(max_order + 1) + width / 2, mse_test , width, label="test MSE")
ax.legend()
ax.set(xlabel='Polynomial order', ylabel='MSE', title ='Comparing polynomial fits');
# + [markdown] colab_type="text"
# As we can see from the plot above, more complex models (higher order polynomials) have lower MSE for training data. The overly simplified models (orders 0 and 1) have high MSE on the training data. As we add complexity to the model, we go from high bias to low bias.
#
# The MSE on test data follows a different pattern. The best test MSE is for an order 2 model - this makes sense as the data was generated with an order 2 model. Both simpler models and more complex models have higher test MSE.
#
# So to recap:
#
# Order 0 model: High bias, low variance
#
# Order 5 model: Low bias, high variance
#
# Order 2 model: Just right, low bias, low variance
#
# + [markdown] colab_type="text"
# ---
# # Summary
#
# - Training data is the data used for fitting, test data is held-out data.
# - We need to strike the right balance between bias and variance. Ideally we want to find a model with optimal model complexity that has both low bias and low variance
# - Too complex models have low bias and high variance.
# - Too simple models have high bias and low variance.
# + [markdown] colab_type="text"
# **Note**
# - Bias and variance are very important concepts in modern machine learning, but it has recently been observed that they do not necessaruly trade off (see for example the phenomenon and theory of "double descent")
#
# **Further readings:**
# - [The elements of statistical learning](https://web.stanford.edu/~hastie/ElemStatLearn/) by <NAME> and Friedman
# + [markdown] colab_type="text"
# ---
# # Appendix
#
#
# + [markdown] colab_type="text"
# ## Bonus Exercise
#
# Prove the bias-variance decomposition for MSE
#
# $$
# \mathrm{E}_{x}\left[(y-\hat{y}(x ; \theta))^{2}\right]=\left(\operatorname{Bias}_{x}[\hat{y}(x ; \theta)]\right)^{2}+\operatorname{Var}_{x}[\hat{y}(x ; \theta)]+\sigma^{2}
# $$where
# $$\operatorname{Bias}_{x}[\hat{y}(x ; \theta)]=\mathrm{E}_{x}[\hat{y}(x ; \theta)]-y
# $$and
# $$\operatorname{Var}_{x}[\hat{y}(x ; \theta)]=\mathrm{E}_{x}\left[\hat{y}(x ; \theta)^{2}\right]-\mathrm{E}_{x}[\hat{y}(x ; \theta)]^{2}
# $$
#
# Hint: use $$\operatorname{Var}[X]=\mathrm{E}\left[X^{2}\right]-(\mathrm{E}[X])^{2}$$
|
tutorials/W1D3_ModelFitting/W1D3_Tutorial5.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # SLU01 Command Line & Text Editor - Learning Notebook 2
# ***
# In this SLU, we are introducing a powerful yet easy to use text editor. **Microsoft Visual Studio Code (VS Code)**.
# ### 1 | Download and Install
#
# Install on Windows:
# 1. download and install VS Code
# * https://code.visualstudio.com/
# 1. install the Windows Subsystem for Linux (WSL) extention
# * https://marketplace.visualstudio.com/items?itemName=ms-vscode-remote.vscode-remote-extensionpack
# 1. setup VS Code and WSL
# * https://code.visualstudio.com/docs/remote/wsl#_open-a-remote-folder-or-workspace
# 1. install the Python extension
# * https://marketplace.visualstudio.com/items?itemName=ms-python.python
#
# Install on Mac OS:
# 1. download and install VS Code
# * https://code.visualstudio.com/docs/setup/mac#_installation
# 1. setup VS Code so that you can start it from the command line
# * https://code.visualstudio.com/docs/setup/mac#_launching-from-the-command-line
# 1. install the Python extension
# * https://marketplace.visualstudio.com/items?itemName=ms-python.python
#
# Install on Ubuntu **(do not use this methond if you are on Windows 10 and WSL):**
# 1. download and install the `.deb` package containing VS Code
# * https://code.visualstudio.com/download
# 1. install the Python extension
# * https://marketplace.visualstudio.com/items?itemName=ms-python.python
#
# ### 2 | Hello World
#
# After installing VS Code, lets write our first program and run it!
#
# 1. activate your virtual environment
#
# 2. go to the folder of this SLU
#
# 3. run the command `code .` which will open VS Code
#
# ```bash
# (mig-venv) mig@laptop:~/ws/ldsa/ds-prep-course-instructors-2021/Week 01/SLU02 - Command Line & Text Editor$ code .
# ```
#
# <img src="./assets/vscode.png" width="800"/>
#
# 4. create a new file called `hello.py` with the following code (and save the file)
#
# ```python
# print('Hello world!!')
# print('Yey I can write text here!')
# ```
# 5. run your Python program!
#
# ```bash
# (mig-venv) mig@laptop:~/ws/ldsa/ds-prep-course-instructors-2021/Week 01/SLU02 - Command Line & Text Editor/SLU11 - Command Lines & Text Editor$ ls
# 'Exercise notebook.ipynb' 'Learning notebook 2.ipynb' assets requirements.txt
# 'Learning notebook 1.ipynb' README.md hello.py
#
# (mig-venv) mig@laptop:~/ws/ldsa/ds-prep-course-instructors-2021/Week 01/SLU02 - Command Line & Text Editor/SLU11 - Command Lines & Text Editor$ python hello.py
# Hello world!!
# ```
|
Week 01/SLU01 - Command Line & Text Editor/Learning notebook 2.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Treating a Function Like an Object
def factorial(n):
'''returns n!'''
return 1 if n < 2 else n * factorial(n-1)
factorial(42)
factorial.__doc__
type(factorial)
help(factorial)
fact = factorial
fact
fact(5)
map(factorial, range(11))
list(map(factorial, range(11)))
# # Higher-Order Functions
fruits = [' strawberry', 'fig', 'apple', 'cherry', 'raspberry', 'banana']
sorted(fruits, key=len)
def reverse(word):
return word[::-1]
reverse('testing')
sorted(fruits, key=reverse)
# ## Modern Replacement for map, filter, and reduce
list(map(fact, range(6)))
[fact(n) for n in range(6)]
list(map(factorial, filter(lambda n: n % 2, range(6))))
[factorial(n) for n in range(6) if n % 2]
from functools import reduce
from operator import add
reduce(add, range(100))
sum(range(100))
# # Anonymous Functions
fruits = [' strawberry', 'fig', 'apple', 'cherry', 'raspberry', 'banana']
sorted(fruits, key=lambda word: word[::-1])
# # User-Defined Callable Types
import random
class BingoCage:
def __init__(self, items):
self._items = list(items)
random.shuffle(self._items)
def pick(self):
try:
return self._items.pop()
except IndexError:
raise LookupError('pick from empty BingoCage')
def __call__(self):
return self.pick()
bingo = BingoCage(range(3))
bingo.pick()
bingo()
callable(bingo)
# # Function Introspection
dir(factorial)
def upper_case_name(obj):
return ("%s %s" % (obj.first_name, obj.last_name)).upper()
upper_case_name.short_description = 'Customer name'
upper_case_name.__dict__
class C: pass
obj = C()
def func(): pass
sorted(set(dir(func)) - set(dir(obj)))
# # From Positional to Keyword-Only Parameters
def tag(name, *content, cls=None, **attrs):
"""Generate one or more HTML tags"""
if cls is not None:
attrs['class'] = cls
if attrs:
attr_str = ''.join(' %s="%s"' % (attr,value)
for attr, value
in sorted(attrs.items()))
else:
attr_str = ''
if content:
return '\n'.join('<%s%s>%s</%s>' %
(name, attr_str, c, name) for c in content)
else:
return '<%s%s />' % (name, attr_str)
tag('br')
tag('p', 'hello')
print(tag('p', 'hello', 'world'))
tag('p', 'hello', id=33)
print(tag('p', 'hello', 'world', cls='sidebar'))
tag(content='testing', name="img")
my_tag = {'name': 'img', 'title': 'Sunset Boulevard',
'src': 'sunset.jpg', 'cls': 'framed'}
tag(**my_tag)
tag(name="img", 'testing')
def f(a, *, b):
return a, b
f(1, b=3)
# # Retrieving Information About Parameters
import bobo
@bobo.query('/')
def hello(person):
return 'Hello %s!' % person
tag.__code__
def clip(text, max_len=80):
"""Return text clipped at the last space before or after max_len"""
end = None
if len(text) > max_len:
space_before = text.rfind(' ', 0, max_len)
if space_before >= 0:
end = space_before
else:
space_after = text.rfind(' ', max_len)
if space_after >= 0:
end = space_after
if end is None:
end = len(text)
return text[:end].rstrip()
clip.__defaults__
clip.__code__
clip.__code__.co_varnames
clip.__code__.co_argcount
from inspect import signature
sig = signature(clip)
sig
str(sig)
for name, param in sig.parameters.items():
print(param.kind, ':', name, '=', param.default)
import inspect
sig = inspect.signature(tag)
my_tag = {'name': 'img', 'title': 'Sunset Boulevard',
'src': 'sunset.jpg', 'cls': 'framed'}
bound_args = sig.bind(**my_tag)
bound_args
for name, value in bound_args.arguments.items():
print(name, '=', value)
del my_tag['name']
bound_args = sig.bind(**my_tag)
# # Function Annotations
def clip(text:str, max_len:'int > 0'=80) -> str:
"""Return text clipped at the last space before or after max_len"""
end = None
if len(text) > max_len:
space_before = text.rfind(' ', 0, max_len)
if space_before >= 0:
end = space_before
else:
space_after = text.rfind(' ', max_len)
if space_after >= 0:
end = space_after
if end is None:
end = len(text)
return text[:end].rstrip()
clip.__annotations__
from inspect import signature
sig = signature(clip)
sig.return_annotation
for param in sig.parameters.values():
note = repr(param.annotation).ljust(13)
print(note, ':', param.name, '=', param.default)
# # Packages for Functional Programming
# ## The Operator Module
from functools import reduce
def fact(n):
return reduce(lambda a, b: a*b, range(1, n+1))
from operator import mul
def fact(n):
return reduce(mul, range(1, n+1))
metro_data = [
('Tokyo', 'JP', 36.933, (35.689722, 139.691667)),
('Delhi NCR', 'IN', 21.935, (28.613889, 77.208889)),
('Mexico City', 'MX', 20.142, (19.433333, -99.133333)),
('New York-Newark', 'US', 20.104, (40.808611, -74.020386)),
('Sao Paulo', 'BR', 19.649, (-23.547778, -46.635833)),
]
from operator import itemgetter
for city in sorted(metro_data, key=itemgetter(1)):
print(city)
cc_name = itemgetter(1, 0)
for city in metro_data:
print(cc_name(city))
from collections import namedtuple
LatLong = namedtuple('LatLong', 'lat long')
Metropolis = namedtuple('Metropolis', 'name cc pop coord')
metro_areas = [Metropolis(name, cc, pop, LatLong(lat, long))
for name, cc, pop, (lat, long) in metro_data]
metro_areas[0]
metro_areas[0].coord.lat
from operator import attrgetter
name_lat = attrgetter('name', 'coord.lat')
for city in sorted(metro_areas, key = attrgetter('coord.lat')):
print(name_lat(city))
import operator
print([name for name in dir(operator) if not name.startswith('_')])
from operator import methodcaller
s = 'The time has come'
upcase = methodcaller('upper')
upcase(s)
hiphenate = methodcaller('replace', ' ', '-')
hiphenate(s)
# ## Freezing Arguments with functools, partial
from operator import mul
from functools import partial
triple = partial(mul, 3)
triple(7)
list(map(triple, range(1, 10)))
import unicodedata, functools
nfc = functools.partial(unicodedata.normalize, 'NFC')
s1 = 'café'
s2 = 'cafe\u0301'
s1, s2
s1 == s2
nfc(s1) == nfc(s2)
def tag(name, *content, cls=None, **attrs):
"""Generate one or more HTML tags"""
if cls is not None:
attrs['class'] = cls
if attrs:
attr_str = ''.join(' %s="%s"' % (attr,value)
for attr, value
in sorted(attrs.items()))
else:
attr_str = ''
if content:
return '\n'.join('<%s%s>%s</%s>' %
(name, attr_str, c, name) for c in content)
else:
return '<%s%s />' % (name, attr_str)
tag
from functools import partial
picture = partial(tag, 'img', cls='pic-frame')
picture(src='wumpus.jpeg')
picture
picture.func
picture.args
picture.keywords
|
.ipynb_checkpoints/5. First-Class Functions-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
from keras.models import Model
from keras.layers import Input
from keras.layers.pooling import AveragePooling2D
from keras import backend as K
import json
from collections import OrderedDict
def format_decimal(arr, places=6):
return [round(x * 10**places) / 10**places for x in arr]
DATA = OrderedDict()
# ### AveragePooling2D
# **[pooling.AveragePooling2D.0] input 6x6x3, pool_size=(2, 2), strides=None, padding='valid', data_format='channels_last'**
# +
data_in_shape = (6, 6, 3)
L = AveragePooling2D(pool_size=(2, 2), strides=None, padding='valid', data_format='channels_last')
layer_0 = Input(shape=data_in_shape)
layer_1 = L(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
np.random.seed(270)
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['pooling.AveragePooling2D.0'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
# -
# **[pooling.AveragePooling2D.1] input 6x6x3, pool_size=(2, 2), strides=(1, 1), padding='valid', data_format='channels_last'**
# +
data_in_shape = (6, 6, 3)
L = AveragePooling2D(pool_size=(2, 2), strides=(1, 1), padding='valid', data_format='channels_last')
layer_0 = Input(shape=data_in_shape)
layer_1 = L(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
np.random.seed(271)
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['pooling.AveragePooling2D.1'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
# -
# **[pooling.AveragePooling2D.2] input 6x7x3, pool_size=(2, 2), strides=(2, 1), padding='valid', data_format='channels_last'**
# +
data_in_shape = (6, 7, 3)
L = AveragePooling2D(pool_size=(2, 2), strides=(2, 1), padding='valid', data_format='channels_last')
layer_0 = Input(shape=data_in_shape)
layer_1 = L(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
np.random.seed(272)
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['pooling.AveragePooling2D.2'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
# -
# **[pooling.AveragePooling2D.3] input 6x6x3, pool_size=(3, 3), strides=None, padding='valid', data_format='channels_last'**
# +
data_in_shape = (6, 6, 3)
L = AveragePooling2D(pool_size=(3, 3), strides=None, padding='valid', data_format='channels_last')
layer_0 = Input(shape=data_in_shape)
layer_1 = L(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
np.random.seed(273)
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['pooling.AveragePooling2D.3'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
# -
# **[pooling.AveragePooling2D.4] input 6x6x3, pool_size=(3, 3), strides=(3, 3), padding='valid', data_format='channels_last'**
# +
data_in_shape = (6, 6, 3)
L = AveragePooling2D(pool_size=(3, 3), strides=(3, 3), padding='valid', data_format='channels_last')
layer_0 = Input(shape=data_in_shape)
layer_1 = L(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
np.random.seed(274)
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['pooling.AveragePooling2D.4'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
# -
# **[pooling.AveragePooling2D.5] input 6x6x3, pool_size=(2, 2), strides=None, padding='same', data_format='channels_last'**
# +
data_in_shape = (6, 6, 3)
L = AveragePooling2D(pool_size=(2, 2), strides=None, padding='same', data_format='channels_last')
layer_0 = Input(shape=data_in_shape)
layer_1 = L(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
np.random.seed(275)
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['pooling.AveragePooling2D.5'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
# -
# **[pooling.AveragePooling2D.6] input 6x6x3, pool_size=(2, 2), strides=(1, 1), padding='same', data_format='channels_last'**
# +
data_in_shape = (6, 6, 3)
L = AveragePooling2D(pool_size=(2, 2), strides=(1, 1), padding='same', data_format='channels_last')
layer_0 = Input(shape=data_in_shape)
layer_1 = L(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
np.random.seed(276)
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['pooling.AveragePooling2D.6'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
# -
# **[pooling.AveragePooling2D.7] input 6x7x3, pool_size=(2, 2), strides=(2, 1), padding='same', data_format='channels_last'**
# +
data_in_shape = (6, 7, 3)
L = AveragePooling2D(pool_size=(2, 2), strides=(2, 1), padding='same', data_format='channels_last')
layer_0 = Input(shape=data_in_shape)
layer_1 = L(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
np.random.seed(277)
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['pooling.AveragePooling2D.7'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
# -
# **[pooling.AveragePooling2D.8] input 6x6x3, pool_size=(3, 3), strides=None, padding='same', data_format='channels_last'**
# +
data_in_shape = (6, 6, 3)
L = AveragePooling2D(pool_size=(3, 3), strides=None, padding='same', data_format='channels_last')
layer_0 = Input(shape=data_in_shape)
layer_1 = L(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
np.random.seed(278)
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['pooling.AveragePooling2D.8'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
# -
# **[pooling.AveragePooling2D.9] input 6x6x3, pool_size=(3, 3), strides=(3, 3), padding='same', data_format='channels_last'**
# +
data_in_shape = (6, 6, 3)
L = AveragePooling2D(pool_size=(3, 3), strides=(3, 3), padding='same', data_format='channels_last')
layer_0 = Input(shape=data_in_shape)
layer_1 = L(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
np.random.seed(279)
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['pooling.AveragePooling2D.9'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
# -
# **[pooling.AveragePooling2D.10] input 5x6x3, pool_size=(3, 3), strides=(2, 2), padding='valid', data_format='channels_first'**
# +
data_in_shape = (5, 6, 3)
L = AveragePooling2D(pool_size=(3, 3), strides=(2, 2), padding='valid', data_format='channels_first')
layer_0 = Input(shape=data_in_shape)
layer_1 = L(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
np.random.seed(280)
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['pooling.AveragePooling2D.10'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
# -
# **[pooling.AveragePooling2D.11] input 5x6x3, pool_size=(3, 3), strides=(1, 1), padding='same', data_format='channels_first'**
# +
data_in_shape = (5, 6, 3)
L = AveragePooling2D(pool_size=(3, 3), strides=(1, 1), padding='same', data_format='channels_first')
layer_0 = Input(shape=data_in_shape)
layer_1 = L(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
np.random.seed(281)
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['pooling.AveragePooling2D.11'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
# -
# **[pooling.AveragePooling2D.12] input 4x6x4, pool_size=(2, 2), strides=None, padding='valid', data_format='channels_first'**
# +
data_in_shape = (4, 6, 4)
L = AveragePooling2D(pool_size=(2, 2), strides=None, padding='valid', data_format='channels_first')
layer_0 = Input(shape=data_in_shape)
layer_1 = L(layer_0)
model = Model(inputs=layer_0, outputs=layer_1)
# set weights to random (use seed for reproducibility)
np.random.seed(282)
data_in = 2 * np.random.random(data_in_shape) - 1
result = model.predict(np.array([data_in]))
data_out_shape = result[0].shape
data_in_formatted = format_decimal(data_in.ravel().tolist())
data_out_formatted = format_decimal(result[0].ravel().tolist())
print('')
print('in shape:', data_in_shape)
print('in:', data_in_formatted)
print('out shape:', data_out_shape)
print('out:', data_out_formatted)
DATA['pooling.AveragePooling2D.12'] = {
'input': {'data': data_in_formatted, 'shape': data_in_shape},
'expected': {'data': data_out_formatted, 'shape': data_out_shape}
}
# -
# ### export for Keras.js tests
# +
import os
filename = '../../../test/data/layers/pooling/AveragePooling2D.json'
if not os.path.exists(os.path.dirname(filename)):
os.makedirs(os.path.dirname(filename))
with open(filename, 'w') as f:
json.dump(DATA, f)
# -
print(json.dumps(DATA))
|
notebooks/layers/pooling/AveragePooling2D.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + papermill={"duration": 9.863348, "end_time": "2021-04-30T06:27:57.590178", "exception": false, "start_time": "2021-04-30T06:27:47.726830", "status": "completed"} tags=[]
import os
import gc
import re
import cv2
import math
import numpy as np
import scipy as sp
import pandas as pd
import tensorflow as tf
from tensorflow import keras
import tensorflow_addons as tfa
from IPython.display import SVG
# import efficientnet.tfkeras as efn
from keras.utils import plot_model
import tensorflow.keras.layers as L
from keras.utils import model_to_dot
import tensorflow.keras.backend as K
from tensorflow.keras.models import Model
from kaggle_datasets import KaggleDatasets
from tensorflow.keras.applications import ResNet50
import seaborn as sns
from tqdm import tqdm
import matplotlib.cm as cm
from sklearn import metrics
import matplotlib.pyplot as plt
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import MultiLabelBinarizer
tqdm.pandas()
import plotly.express as px
import plotly.graph_objects as go
import plotly.figure_factory as ff
from plotly.subplots import make_subplots
np.random.seed(0)
tf.random.set_seed(0)
import warnings
warnings.filterwarnings("ignore")
# + papermill={"duration": 0.332425, "end_time": "2021-04-30T06:27:57.930908", "exception": false, "start_time": "2021-04-30T06:27:57.598483", "status": "completed"} tags=[]
AUTO = tf.data.experimental.AUTOTUNE
BATCH_SIZE = 16
IMAGE_PATH = "../input/plant-pathology-2021-fgvc8/train_images/"
# TEST_PATH = "../input/plant-pathology-2020-fgvc7/test.csv"
TRAIN_PATH = "../input/plant-pathology-2021-fgvc8/train.csv"
SUB_PATH = "../input/plant-pathology-2021-fgvc8/sample_submission.csv"
IMSIZES = (224, 240, 260, 300, 380, 456, 528, 600)
im_size = IMSIZES[7]
sub = pd.read_csv(SUB_PATH)
test_data = sub.copy()
train_data = pd.read_csv(TRAIN_PATH)
train_data['labels'] = train_data['labels'].apply(lambda string: string.split(' '))
s = list(train_data['labels'])
mlb = MultiLabelBinarizer()
trainx = pd.DataFrame(mlb.fit_transform(s), columns=mlb.classes_, index=train_data.index)
trainx
# + papermill={"duration": 0.007977, "end_time": "2021-04-30T06:27:57.947957", "exception": false, "start_time": "2021-04-30T06:27:57.939980", "status": "completed"} tags=[]
# + papermill={"duration": 2.360632, "end_time": "2021-04-30T06:28:00.316713", "exception": false, "start_time": "2021-04-30T06:27:57.956081", "status": "completed"} tags=[]
def format_path(st):
return '../input/plant-pathology-2021-fgvc8/test_images/'+str(st)
def decode_image(filename, label=None, image_size=(im_size, im_size)):
bits = tf.io.read_file(filename)
image = tf.image.decode_jpeg(bits, channels=3)
image = tf.cast(image, tf.float32) / 255.0
image = tf.image.resize(image, image_size)
if label is None:
return image
else:
return image, label
def data_augment(image, label=None):
image = tf.image.random_flip_left_right(image)
image = tf.image.random_flip_up_down(image)
if label is None:
return image
else:
return image, label
test_paths = test_data.image.apply(format_path).values
test_dataset = (
tf.data.Dataset
.from_tensor_slices(test_paths)
.map(decode_image, num_parallel_calls=AUTO)
.batch(BATCH_SIZE)
)
# valid_dataset = (
# tf.data.Dataset
# .from_tensor_slices((valid_paths, valid_labels))
# .map(decode_image, num_parallel_calls=AUTO)
# .batch(BATCH_SIZE)
# .cache()
# .prefetch(AUTO)
# )
# + papermill={"duration": 0.018881, "end_time": "2021-04-30T06:28:00.344373", "exception": false, "start_time": "2021-04-30T06:28:00.325492", "status": "completed"} tags=[]
# labels = (train_data.class_indices)
# labels = dict((v,k) for k,v in labels.items())
labels = {0: 'complex', 1: 'frog_eye_leaf_spot', 2: 'healthy', 3: 'powdery_mildew', 4: 'rust', 5: 'scab'}
labels
# + papermill={"duration": 11.77731, "end_time": "2021-04-30T06:28:12.130467", "exception": false, "start_time": "2021-04-30T06:28:00.353157", "status": "completed"} tags=[]
model = tf.keras.applications.EfficientNetB7(weights=None, include_top=False, input_shape=(im_size, im_size, 3))
final_model = tf.keras.Sequential([
model,
tf.keras.layers.GlobalAveragePooling2D(),
keras.layers.Dense(6,
kernel_initializer=keras.initializers.RandomUniform(seed=42),
bias_initializer=keras.initializers.Zeros(), name='dense_top', activation='sigmoid')
])
final_model.load_weights("../input/plantpathology2021trainedmodels/EffNetB7_EXP2_42.h5")
final_model.summary()
# + papermill={"duration": 8.552964, "end_time": "2021-04-30T06:28:20.700317", "exception": false, "start_time": "2021-04-30T06:28:12.147353", "status": "completed"} tags=[]
TTA = 3
preds = []
# for i in range(TTA):
# # test_set.reset()
# preds.append(final_model.predict(test_dataset))
# preds = np.mean(np.array(preds), axis=0)
preds =final_model.predict(test_dataset, verbose=1)
# + papermill={"duration": 0.023267, "end_time": "2021-04-30T06:28:20.734033", "exception": false, "start_time": "2021-04-30T06:28:20.710766", "status": "completed"} tags=[]
preds
# + papermill={"duration": 0.026449, "end_time": "2021-04-30T06:28:20.772858", "exception": false, "start_time": "2021-04-30T06:28:20.746409", "status": "completed"} tags=[]
preds = preds.tolist()
# threshold = {0: 0.33,
# 5: 0.35,
# 1: 0.7,
# 3: 0.18,
# 4: 0.53}
thres = [0.33,0.45,0.3,0.18,0.5,0.35]
indices = []
for pred in preds:
temp = []
for i,category in enumerate(pred):
if category>=thres[i]:
temp.append(i)
if temp!=[]:
print(temp, "sadfgs")
if 2 in temp:
indices.append([2])
else:
indices.append(temp)
else:
temp.append(np.argmax(pred))
indices.append(temp)
print(indices)
testlabels = []
for image in indices:
temp = []
for i in image:
temp.append(str(labels[i]))
testlabels.append(' '.join(temp))
print(testlabels)
# + papermill={"duration": 0.029722, "end_time": "2021-04-30T06:28:20.814376", "exception": false, "start_time": "2021-04-30T06:28:20.784654", "status": "completed"} tags=[]
sub['labels'] = testlabels
sub.to_csv('submission.csv', index=False)
sub
# + papermill={"duration": 0.017309, "end_time": "2021-04-30T06:28:20.842691", "exception": false, "start_time": "2021-04-30T06:28:20.825382", "status": "completed"} tags=[]
# pred_string = []
# for line in preds:
# s = ''
# for i in threshold.keys():
# if line[i] > threshold[i]:
# s = s + labels[i] + ' '
# if s == '':
# s = labels[2]
# pred_string.append(s)
# sub['labels'] = pred_string
# sub.to_csv('submission.csv', index=False)
# sub
# + papermill={"duration": 0.010823, "end_time": "2021-04-30T06:28:20.864635", "exception": false, "start_time": "2021-04-30T06:28:20.853812", "status": "completed"} tags=[]
|
18BCE080_088_092_101_247_Submission.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### Schelling Segregation model
# This example of implementation of Schelling Segregation model is adapted from https://colab.research.google.com/github/QuantEcon/lecture-python.notebooks/blob/master/schelling.ipynb
#
# In 1969, <NAME> developed a simple but striking model of racial segregation [Sch69].
#
# His model studies the dynamics of racially mixed neighborhoods.
#
# Like much of Schelling’s work, the model shows how local interactions can lead to surprising aggregate structure.
#
# In particular, it shows that relatively mild preference for neighbors of similar race can lead in aggregate to the collapse of mixed neighborhoods, and high levels of segregation.
#
# In recognition of this and other research, Schelling was awarded the 2005 Nobel Prize in Economic Sciences (joint with <NAME>).
# Import necessary libraries
from random import uniform, seed
from math import sqrt
import matplotlib.pyplot as plt
# %matplotlib inline
# ## The Model
#
#
#
# ### Set-Up
#
# Suppose we have two types of people: orange people and green people.
#
# For the purpose of this lecture, we will assume there are 250 of each type.
#
# These agents all live on a single unit square.
#
# The location of an agent is just a point $ (x, y) $, where $ 0 < x, y < 1 $.
#
# ### Preferences
#
# We will say that an agent is *happy* if half or more of her 10 nearest neighbors are of the same type.
#
# Here ‘nearest’ is in terms of [Euclidean distance](https://en.wikipedia.org/wiki/Euclidean_distance).
#
# An agent who is not happy is called *unhappy*.
#
# An important point here is that agents are not averse to living in mixed areas.
#
# They are perfectly happy if half their neighbors are of the other color.
#
# ### Behavior
#
# Initially, agents are mixed together (integrated).
#
# In particular, the initial location of each agent is an independent draw from a bivariate uniform distribution on $ S = (0, 1)^2 $.
#
# Now, cycling through the set of all agents, each agent is now given the chance to stay or move.
#
# We assume that each agent will stay put if they are happy and move if unhappy.
#
# The algorithm for moving is as follows
#
# 1. Draw a random location in $ S $
# 1. If happy at new location, move there
# 1. Else, go to step 1
#
#
# In this way, we cycle continuously through the agents, moving as required.
#
# We continue to cycle until no one wishes to move.
# +
seed(10) # For reproducible random numbers
class Agent:
def __init__(self, type):
self.type = type
self.draw_location()
def draw_location(self):
self.location = uniform(0, 1), uniform(0, 1)
def get_distance(self, other):
"Computes the euclidean distance between self and other agent."
a = (self.location[0] - other.location[0])**2
b = (self.location[1] - other.location[1])**2
return sqrt(a + b)
def happy(self, agents):
"True if sufficient number of nearest neighbors are of the same type."
distances = []
# distances is a list of pairs (d, agent), where d is distance from
# agent to self
for agent in agents:
if self != agent:
distance = self.get_distance(agent)
distances.append((distance, agent))
# == Sort from smallest to largest, according to distance == #
distances.sort()
# == Extract the neighboring agents == #
neighbors = [agent for d, agent in distances[:num_neighbors]]
# == Count how many neighbors have the same type as self == #
num_same_type = sum(self.type == agent.type for agent in neighbors)
return num_same_type >= require_same_type
def update(self, agents):
"If not happy, then randomly choose new locations until happy."
while not self.happy(agents):
self.draw_location()
# -
def plot_distribution(agents, cycle_num):
"Plot the distribution of agents after cycle_num rounds of the loop."
x_values_0, y_values_0 = [], []
x_values_1, y_values_1 = [], []
# == Obtain locations of each type == #
for agent in agents:
x, y = agent.location
if agent.type == 0:
x_values_0.append(x)
y_values_0.append(y)
else:
x_values_1.append(x)
y_values_1.append(y)
fig, ax = plt.subplots(figsize=(8, 8))
plot_args = {'markersize': 8, 'alpha': 0.6}
ax.set_facecolor('azure')
ax.plot(x_values_0, y_values_0, 'o', markerfacecolor='orange', **plot_args)
ax.plot(x_values_1, y_values_1, 'o', markerfacecolor='green', **plot_args)
ax.set_title(f'Cycle {cycle_num-1}')
plt.show()
# +
num_of_type_0 = 250
num_of_type_1 = 250
num_neighbors = 10 # Number of agents regarded as neighbors
require_same_type = 8 # Want at least this many neighbors to be same type
# == Create a list of agents == #
agents = []
for i in range(num_of_type_0):
agents.append(Agent(0)) # Add agents of type 0 to the list
for i in range(num_of_type_1):
agents.append(Agent(1)) # Add agents of type 1 to the list
# Note: you can also do this with Python comprehensions
#agents = [Agent(0) for i in range(num_of_type_0)]
#agents.extend(Agent(1) for i in range(num_of_type_1))
count = 1
# == Loop until none wishes to move == #
while True:
print('Entering loop ', count)
plot_distribution(agents, count)
count += 1
no_one_moved = True
for agent in agents:
old_location = agent.location
agent.update(agents)
if agent.location != old_location:
no_one_moved = False
if no_one_moved:
break
print('Converged, terminating.')
# -
|
Procedurally-generated narrative/SchellingModel.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# default_exp metric
#export
import os
from time import time
import gc
import pandas as pd
import numpy as np
from scipy.sparse import csr_matrix
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
plt.rcParams['figure.figsize'] = (14,6)
plt.rcParams['font.size'] = 16
# -
#hide
from nbdev.showdoc import *
# +
# For fast testing for Continuous Integration
# PATH_DATA = 'small_data'
# PATH_DATA_RAW = 'small_data/raw'
# os.listdir(PATH_DATA_RAW)
# -
#no_test
PATH_DATA = 'data'
PATH_DATA_RAW = 'data/raw'
os.listdir(PATH_DATA_RAW)
chunks = pd.read_csv(os.path.join(PATH_DATA_RAW, 'sales_train_evaluation.csv'), chunksize=1000)
df_stv = pd.concat(list(chunks)) # Safe for low RAM situation
df_cal = pd.read_csv(os.path.join(PATH_DATA_RAW, 'calendar.csv'))
df_prices = pd.read_csv(os.path.join(PATH_DATA_RAW, 'sell_prices.csv'))
df_ss = pd.read_csv(os.path.join(PATH_DATA_RAW, 'sample_submission.csv'))
# # WRMSSE metric implementation
# > The weighted root mean scaled squared error is the competition metric for this competition
# We need to implement the competition metric so that we can validate our prediction methods over different time periods. This notebook documents the intution and code needed to build the `WRMSSE` object. You may want to jump to it and see how to use it, but here are the main purposes of the object:
#
# 1. Score predictions with the competition metric
# 2. Save and analyse scores for different models and validation sets
# 3. Save the weights and scales used by the competition metric to create custom objective functions
# ## Building from the math up
# ### RMSSE of each series
# $$
# \mathrm{RMSSE} = \sqrt{
# \frac{1}{h} \frac{\sum_{t = n + 1}^{n + h}(Y_t - \hat{Y}_t)^2}{\frac{1}{n - 1}\sum_{t = 2}^{n}(Y_t - Y_{t - 1})^2}
# }.
# $$
#
# $Y_t$ is the actual value at $t$, $\hat{Y}_t$ the forecasted value, $n$ the number of time series values, and, $h$ the forecasting horizon.
#
# **Things to notice**
# * The bottom of the numerator sums over all training days through $n$, the last day before the forecast horizon. Its purpose is to normalize the errors of the series by scaling them by the average day-to-day difference in sales. This means that the RMSSE scores of any two series can be compared fairly, since both are scaled by their own volatility.
# * The top of the numerator sums over the 28 days of the forecast horizon, starting on day $n$ + 1
# * A prediction model that predicted the previous days sales should get a score of around 1.
#
# The metric in this competition sort of compares the models performance to a naive model that always predicts that the next day will be the same as the current day:
# ### WRMSSE
#
# $$
# \mathrm{WRMSSE} = \sum_{i=1}^{42,840} W_i \times \mathrm{RMSSE_i}
# $$
#
# $$
# W_i = \frac{\sum_{j = n - 28}^{n - 1} volume\_series_i} {\sum_{j = n - 28}^{n - 1} volume\_all\_series\_in\_level}
# $$
#
# The weight of each series will be computed based on the last 28 observations of the training
# sample of the dataset, i.e., the cumulative actual dollar sales that each series displayed in that particular
# period (sum of units sold multiplied by their respective price).
# Each of the 12 levels of aggregation is comprised of series whose weights add up to 1, and every product appears once in each level.
#
# To simplify notation, I like to write the WRMSSE like this:
#
# $$
# WRMSSE = \sum_{i=1}^{42,840} \left(W_i \times \sqrt{\frac{\frac{1}{28}\sum_{j=1}^{28}{(D_j)^2}}{S_i}}\right)
# $$
# * $W_i$: the weight of the ith series
# * $S_i$: the scaling factor of the ith series
# * $D_j$: The difference between sales and predicted sales for the ith series on day j
#
# which further simplifies to this:
# $$
# WRMSSE = \sum_{i=1}^{42,840} \frac{W_i}{\sqrt{S_i}} \times \sqrt{\frac{1}{28}\sum_{j=1}^{28}{(D_j)^2}}
# $$
#
# ### Generating all series, weights, and scaling factors
# To build a WRMSSE scoring object, we will need to create tools that can apply this caclulation as efficiently as possible. We will develop a sparse aggregation matrix, created with a one-hot-encoding style, that serves to compute the aggregations for all 42840 series from the bottme level 30490 series. After the aggregation matrix, we will develop methods to compute the weights W and the scaling factor S for all series. We will then combine our tools to create a WRMSSE object, capable of scoring predictions for any 28 validation period of known data.
#hide
print('#' * 100)
################## Variables ####################
# We will work through an example of calculating
# the WRMSSE by level, and overall. Then we will
#
START_TEST = 1914
# END_TRAIN = START_TEST - 1 # last training day
# #### Aggregation matrix
# We know we can compute all the aggregated series by
# using matrix multiplication with the correctly
# designed aggregation matrix. Our daily sales have the
# shape (number_items, prediction_horizon). Our agg
# matrix will need to have the shape
# (number_series, number_items) so that we can execute
# the matrix multiplication agg x sales.
#
# We need a list of the aggregating features that
# will align with our weights and scales so that
# our matrices will match up. Level 1 does not need
# a column to group by.
#
# For each sereis of each level of the WRMSSE, we will
# use pandas get_dummies function on the corresponding
# column or columns.
#export
def get_agg(df_stv):
"""Gets a sparse aggregaion matrix and index to align weights and scales."""
# Take the transpose of each dummy matrix to correctly orient the matrix
dummy_frames = [
pd.DataFrame({'Total': np.ones((df_stv.shape[0],)).astype('int8')}, index=df_stv.index).T,
pd.get_dummies(df_stv.state_id, dtype=np.int8).T,
pd.get_dummies(df_stv.store_id, dtype=np.int8).T,
pd.get_dummies(df_stv.cat_id, dtype=np.int8).T,
pd.get_dummies(df_stv.dept_id, dtype=np.int8).T,
pd.get_dummies(df_stv.state_id + '_' + df_stv.cat_id, dtype=np.int8).T,
pd.get_dummies(df_stv.state_id + '_' + df_stv.dept_id, dtype=np.int8).T,
pd.get_dummies(df_stv.store_id + '_' + df_stv.cat_id, dtype=np.int8).T,
pd.get_dummies(df_stv.store_id + '_' + df_stv.dept_id, dtype=np.int8).T,
pd.get_dummies(df_stv.item_id, dtype=np.int8).T,
pd.get_dummies(df_stv.item_id + '_' + df_stv.state_id, dtype=np.int8).T,
pd.get_dummies(df_stv.item_id + '_' + df_stv.store_id, dtype=np.int8).T
]
agg_matrix = pd.concat(dummy_frames, keys=range(1,13), names=['level', 'id'])
# Save the index for later use
agg_index = agg_matrix.index
# Sparse format will save space and calculation time
agg_matrix_csr = csr_matrix(agg_matrix)
return agg_matrix_csr, agg_index
agg_matrix_csr, agg_index = get_agg(df_stv)
display(agg_index[:5])
print('Number of series per each level')
agg_index.get_level_values(0).value_counts(sort=False)
# #### Weights and scales
#export
def get_df_weights(df_stv, df_cal, df_prices, agg_index, agg_matrix_csr, start_test=1914):
"""Returns the weight, scale, and scaled weight of all series,
in a dataframe aligned with the agg_index, created in get_agg()
##### Weights steps
We need to convert the sales data into dollar sales
data so that we can correctly weight each series.
To begin, we consider only the last 28 days of
data before START_TEST. We then put the data into
"long" format so we can merge the calendar
and price information.
Now we will get the total dollar sales for each
item/store combination. Be sure to set sort=False
so that our index stays in the proper order.
We don't need df anymore
We want to build a weight, scales,
and scaled weight columns
that are aligned with agg_index. We
will divide dollar_sales by the total
dollar sales to get the weight W
for each series. We don't need dollar_sales anymore.
##### Scaling factor steps
We also need to calculate each series scaling factor S,
which is the denominator in the WRMSSE cacluation. It can
be pulled out of the square root and combined with the
series weight to make a single weight W/sqrt(S),
simplifying our calculations a bit.
S is the average squared difference of day to daily sales
for a series, excluding leading zeros, for all training
days leading up to START_TEST.
Aggregate all series, and replace leading
zeros with np.nan so that we can do numpy calculations
that will ignore the np.nan.
Now we can finish our weights and scales dataframe by
adding scale and scaled_weight columns.
"""
d_cols = [f'd_{i}' for i in range(start_test - 28, start_test)]
df = df_stv[['store_id', 'item_id'] + d_cols]
df = df.melt(id_vars=['store_id', 'item_id'],
var_name='d',
value_name = 'sales')
df = df.merge(df_cal[['d', 'wm_yr_wk']], on='d', how='left')
df = df.merge(df_prices, on=['store_id', 'item_id', 'wm_yr_wk'], how='left')
df['dollar_sales'] = df.sales * df.sell_price
# Now we will get the total dollar sales
dollar_sales = df.groupby(['store_id', 'item_id'], sort=False)['dollar_sales'].sum()
del df
# Build a weight, scales, and scaled weight columns
# that are aligned with agg_index.
df_weights = pd.DataFrame(index = agg_index)
df_weights['dollar_sales'] = agg_matrix_csr * dollar_sales
df_weights['weight'] = df_weights.dollar_sales / df_weights.dollar_sales.values[0]
del df_weights['dollar_sales']
##################### Scaling factor #######################
df = df_stv.loc[:, :f'd_{start_test - 1}'].iloc[:, 6:]
agg_series = agg_matrix_csr * df.values
no_sale = np.cumsum(agg_series, axis=1) == 0
agg_series = np.where(no_sale, np.nan, agg_series)
scale = np.nanmean(np.diff(agg_series, axis=1) ** 2, axis=1)
df_weights['scale'] = 1 / np.sqrt(scale)
df_weights['scaled_weight'] = df_weights.weight * df_weights.scale
return df_weights
# +
df_weights = get_df_weights(df_stv, df_cal, df_prices, agg_index, agg_matrix_csr, start_test=1914)
display(df_weights)
print("All weights add to 1 for each level")
df_weights.groupby(level=0)['weight'].sum().to_frame()
# -
# ### Sample scoring
# Lets code a simple example using the last month as predicted sales
# +
actuals = df_stv.iloc[:, -28:].values
preds = df_stv.iloc[:, -28 * 2: -28].values
base_errors = actuals - preds
errors = agg_matrix_csr * base_errors
rmse = np.sqrt(np.mean((errors)**2, axis=1))
wrmsse_by_series = rmse * df_weights.scaled_weight
df_scores = pd.DataFrame(wrmsse_by_series).rename(
mapper={'scaled_weight': 'WRMSSE'}, axis=1)
wrmsse = np.sum(wrmsse_by_series) / 12
print(wrmsse)
print('Scores for all series')
display(df_scores)
# -
# ### Saving scores
# Instead of saving the scores for each series, I will only save scores for each level, and the total score.
model_name = 'last_month_sales'
start_test = 1914
# level_scores
level_scores = df_scores.groupby(level=0).sum()
level_scores.loc[13] = level_scores.mean()
level_scores['model_name'] = model_name
level_scores['start_test'] = start_test
level_scores.reset_index(inplace=True)
level_scores
# #### Keeping track of model / validation set scores.
# I need to be able to keep track of the scores for each model / validation set combination.
# To make organizing scores easier, I want to combine the columns 'model_name',
# 'level', 'start_test' into a single column'id' so I can store the scores with
# a single column as a unique identifier. I will also want to reverse this process later.
#export
def combine_cols(df, cols: list, sep='__', name='id', reverse=False):
"""Returns a copy of `df` with `cols` combined into a single coloumn `name`,
separated by `sep`, or with the `name` column expanded into `cols` if `reverse` is True."""
df = df.copy()
if reverse:
final_cols = cols + df.drop(name, axis=1).columns.tolist()
df[cols] = df[name].str.split(sep).tolist()
else:
final_cols = [name] + df.drop(cols, axis=1).columns.tolist()
df[name] = df[cols].astype(str).apply(sep.join, axis=1)
return df[final_cols]
# +
cols, sep, name = ['model_name', 'level', 'start_test'], '__', 'id'
print('level_scores with columns combined')
level_scores = combine_cols(level_scores, cols, sep, name)
display(level_scores.head(3))
print('reversed')
df_r = combine_cols(level_scores, cols, sep, name, reverse=True)
display(df_r.head(3))
# -
# #### What if the model / validation combo already exists
# I want to be able to append my scores to a dataframe so that I will not override previously logged scores, nor will I have copies. I will need a function that ensures I don't have any problems.
#export
def append_df_unique_id(df, df_new, id_col='id') -> pd.DataFrame:
"""Returns a copy of df with df_new appended to it with '(n)_'
prepended to the id_col if the new column value is already in
the original df. This is used to track scores and ensure there
are not copies of a unique identifier.
`id_col` should be of string type.
"""
if not (id_col in df.columns and id_col in df_new.columns):
return df.append(df_new) # No issues
df = df.copy()
df_new = df_new.copy()
ids = df[id_col].tolist()
new_id = df_new[id_col][0]
if new_id in ids:
x = 1
while f'({x})_' + new_id in ids:
x += 1
new_id = f'({x})_' + new_id
df_new[id_col] = f'({x})_' + df_new[id_col]
return df.append(df_new)
tmp = level_scores.head(3).copy()
df = tmp.copy()
for _ in range(3):
df = append_df_unique_id(df, tmp, id_col='id')
print('No copies, no overrides')
display(df)
# ### Visualizing results
model_name = 'all_ones'
fig, ax = plt.subplots()
level_scores = df_scores.groupby(level=0).sum()
sns.barplot(x=level_scores.index, y=level_scores['WRMSSE'])
plt.axhline(level_scores.mean()[0], color='blue', alpha=.5, ls=':')
name_and_days = f'{model_name} day {START_TEST} to {START_TEST + 27}'
title = f'{name_and_days} WRMSSE total: {round(level_scores.mean()[0], 4)}'
plt.title(title, fontsize=20, fontweight='bold')
for i in range(12):
ax.text(i, level_scores['WRMSSE'][i+1],
str(round(level_scores['WRMSSE'][i+1], 4)),
color='black', ha='center', fontsize=15)
plt.show()
# ### Saving predictions for competition scoring
# The host wants predictions submitted in a format like the sample submission file.
print('The id column needs a _validation or _evaluation tag')
display(df_ss.head())
display(df_ss.tail())
# **Example**
# +
df_preds = pd.DataFrame(preds, index=df_scores.loc[12].index).reset_index()
test=False
if not test: df_preds['id'] = df_preds['id'] + '_validation'
else: df_preds['id'] = df_preds['id'] + '_evaluation'
df_sub = df_ss[['id']].merge(df_preds, on='id', how='left').fillna(0)
file_name = 'sub_' + model_name + '.csv'
df_sub.to_csv(file_name, index=False)
# -
pd.read_csv(file_name)
# !rm {file_name}
# ## Main object
#export
class WRMSSE():
def __init__(self, PATH_DATA_RAW: str='data/raw', start_test: int=1914, horizon: int=28, df_stv_trunc: pd.DataFrame=None):
"""The main object that will hold data, weights and scales which are
associated with the forecast horizon starting on `start_test`,
extending horizon `days`.
"""
if type(df_stv_trunc) == pd.DataFrame: # Provided after filtering out certain items
self.df_stv = df_stv_trunc
else:
self.df_stv = pd.read_csv(os.path.join(PATH_DATA_RAW, 'sales_train_evaluation.csv'))
self.df_cal = pd.read_csv(os.path.join(PATH_DATA_RAW, 'calendar.csv'))
self.df_prices = pd.read_csv(os.path.join(PATH_DATA_RAW, 'sell_prices.csv'))
self.df_ss = pd.read_csv(os.path.join(PATH_DATA_RAW, 'sample_submission.csv'))
self.start_test = start_test
self.end_test = start_test + horizon - 1
self.preds, self.actuals = None, None
self.df_series_scores, self.model_name = None, None
path = os.path.join(PATH_DATA_RAW, '..', 'scores.csv')
if os.path.exists(path):
self.scores = pd.read_csv(path)
else:
self.scores = pd.DataFrame()
if f'd_{self.end_test}' in self.df_stv.columns:
self.actuals = self.df_stv.loc[:, f'd_{start_test}': f'd_{self.end_test}'].values
self.agg_matrix_csr, self.agg_index = get_agg(self.df_stv)
self.df_weights = get_df_weights(self.df_stv, self.df_cal, self.df_prices,
self.agg_index, self.agg_matrix_csr, start_test)
self.w_12 = self.df_weights.loc[12]
self.w_12.index += '_evaluation'
def score(self, preds: np.array, fast: bool=True, model_name: str=None) -> float:
"""Scores preds against `self.actuals`. If `fast` is set to True, nothing
will be saved. If `fast` is set to False, `self.df_series_scores` will be
set to a dataframe with the scores for each of the 40280 series, and if
`model_name` name is also passed, `self.model_name` is set, `self.scores`
will be updated with the 12 levels scores along with total score and then
saved to csv.
"""
if type(preds) == pd.DataFrame:
preds = preds.values
base_errors = self.actuals - preds
errors = self.agg_matrix_csr * base_errors
mse = np.sqrt(np.mean((errors)**2, axis=1))
wrmsse_by_series = mse * self.df_weights.scaled_weight
wrmsse = np.sum(wrmsse_by_series) / 12
if not fast:
self.preds = preds
self.df_series_scores = pd.DataFrame(wrmsse_by_series).rename(
mapper={'scaled_weight': 'WRMSSE'}, axis=1)
if model_name:
self.model_name = model_name
print(f'Saving level scores with model name: {model_name}')
self._append_level_scores(self.df_series_scores, model_name)
return wrmsse
def feval(self, preds, train_data) -> tuple:
"""For custom metric in lightgbm"""
preds = preds.reshape(self.actuals.shape[1], -1).T
score = self.score(preds)
return 'WRMSSE', score, False
@staticmethod
def get_weighted_mse_feval(w_12_eval, weight_col) -> callable:
"""Returns a weighted root mean squared error metric function for lightgbm.
w_12_eval must be aligned with grid_df like
w_12_eval = w_12.reindex(grid_df[eval_mask].id)
"""
weight = w_12_eval[weight_col] / w_12_eval[weight_col].mean()
def feval(preds, eval_data) -> tuple:
actuals = eval_data.get_label()
diff = preds - actuals
res = np.mean(diff ** 2 * weight)
return f'mse_feval_{weight_col}', res, False
return feval
@staticmethod
def get_weighted_mae_feval(w_12_eval, weight_col) -> callable:
"""Returns a weighted mean absolute error metric function for lightgbm.
w_12_eval must be aligned with grid_df like
w_12_eval = w_12.reindex(grid_df[eval_mask].id)
"""
weight = w_12_eval[weight_col] / w_12_eval[weight_col].mean()
def feval(preds, eval_data) -> tuple:
actuals = eval_data.get_label()
diff = preds - actuals
res = np.mean(np.abs(diff ** 2 * weight))
return f'mae_feval_{weight_col}', res, False
return feval
@staticmethod
def get_weighted_mse_fobj(w_12_train, weight_col, weight_hess=True) -> callable:
"""Returns a weighted mean squared error objective function for lightgbm.
w_12_train must be aligned with grid_df like
w_12_train = w_12.reindex(grid_df[train_mask].id)
"""
weight = w_12_train[weight_col] / w_12_train[weight_col].mean()
def fobj(preds, train_data) -> tuple:
actuals = train_data.get_label()
diff = preds - actuals
grad = diff * weight
hess = weight if weight_hess else np.ones_like(diff)
return grad, hess
return fobj
@staticmethod
def get_weighted_mae_fobj(w_12_train, weight_col, weight_hess=True) -> callable:
"""Returns a weighted mean absolute error objective function for lightgbm.
w_12_train must be aligned with grid_df like
w_12_train = w_12.reindex(grid_df[train_mask].id)
"""
weight = w_12_train[weight_col] / w_12_train[weight_col].mean()
def fobj(preds, train_data) -> tuple:
actuals = train_data.get_label()
diff = preds - actuals
grad = np.sign(diff) * weight
hess = weight if weight_hess else np.ones_like(diff)
return grad, hess
return fobj
def _append_level_scores(self, df_series_scores, model_name) -> None:
# level_scores
level_scores = df_series_scores.groupby(level=0).sum()
level_scores.loc[13] = level_scores.mean()
level_scores['model_name'] = model_name
level_scores['start_test'] = start_test
level_scores.reset_index(inplace=True)
cols, sep, name = ['model_name', 'level', 'start_test'], '__', 'id'
level_scores = combine_cols(level_scores, cols, sep, name)
self.scores = append_df_unique_id(self.scores, level_scores)
def dump_scores(self, path_dir: str='.') -> None:
"""Saves `self.scores`, which contains scores of each level for
each `model_name` `start_test` combination.
"""
self.scores.to_csv(os.path.join(path_dir, 'scores.csv'), index=False)
def plot_scores(self, df_series_scores=None, model_name: str=None) -> tuple:
"""Returns a tuple: fig, ax with a seaborn plot of the 12 levels of the wrmsse."""
if not df_series_scores: df_series_scores = self.df_series_scores
if not model_name: model_name = self.model_name
fig, ax = plt.subplots()
level_scores = df_series_scores.groupby(level=0).sum()
sns.barplot(x=level_scores.index, y=level_scores['WRMSSE'])
plt.axhline(level_scores.mean()[0], color='blue', alpha=.5, ls=':')
name_and_days = f'{model_name} test {self.start_test} to {self.end_test}'
title = f'{name_and_days} WRMSSE: {round(level_scores.mean()[0], 4)}'
plt.title(title, fontsize=20, fontweight='bold')
for i in range(12):
ax.text(i, level_scores['WRMSSE'][i+1],
str(round(level_scores['WRMSSE'][i+1], 4)),
color='black', ha='center', fontsize=15)
plt.show()
return fig, ax
def make_sub(self, preds: np.array=None, test=False, model_name='no_name', path_dir='.') -> None:
"""Creates and writes a csv file that is ready for submission. If `test` is
set to True, it will be for the final test set, otherwise, the predictions
are for the validation set.
The files name will be at `path_dir`/sub_`model_name`.csv"""
if not preds: preds = self.preds
model_name = self.model_name if self.model_name else 'no_name'
df_preds = pd.DataFrame(preds, index=df_scores.loc[12].index).reset_index()
if not test: df_preds['id'] = df_preds['id'] + '_validation'
else: df_preds['id'] = df_preds['id'] + '_evaluation'
df_sub = df_ss[['id']].merge(df_preds, on='id', how='left').fillna(0)
file_name = 'sub_' + model_name + '.csv'
df_sub.to_csv(os.path.join(path_dir, file_name), index=False)
def get_oos_scale(self, oos_train_df) -> None:
"""Gets the series scaling factor for the level 12 series and adds this
as a column to self.w_12. Used for 'out of stock' fixed data and possibly
a better otpimizing metric or objective function."""
rec = oos_train_df.iloc[:, 6:-28]
rdiff = np.diff(rec, axis=1)
self.w_12['oos_scale'] = 1 / np.sqrt(np.nanmean(rdiff**2, axis=1))
self.w_12['oos_scaled_weight'] = self.w_12['weight'] * self.w_12['oos_scale']
def add_total_scaled_weight(self) -> None:
"""Creates a column in self.w_12 that is meant to appropriately weight
each level 12 series to optimize for the wrmsse metric, while still using
mean squared error on level 12 series as an objective function.
Explanation of the problem:
---------------------------
Each sereies the 12th level has a scaled weight associated with it.
If we were only being scored with the 12th level wrmsse, this would
seem to be an ideal weight. But we are being scored on all 12 levels
of aggregation equally. Even if our objective function is not optimizing
for the wrmsse on all 12 levels directly, we may be able to properly
weight the level 12 series so that they reflect their overall impact
to all of the 12 levels of aggregation.
To illustrate the point, there could be item x, which costs $1000, and
item y, which costs $1. Assuming they have the same sales fluctuations,
and thus the same scaling factor, item x will have a weight 1000 times
that of y. But both x and y appear in 11 other series, where they are
equally as important as every other item in that series. For instance,
x and y have equal importance on level 1, 'Total sales of all items'.
Solution:
---------
For each level 12 series x we find the weights (scaled weight since this
is the 'true weight' of the series) of the series to which x belongs on
all other 11 levels of aggregation and add them up. Our hope is this
will allow us to optimize for the wrmsse metric, while still using mean
squared error on level 12 series as an objective function.
"""
w_df = self.df_weights
tmp = self.w_12.copy()
tmp['level_1_sw'] = w_df.loc[1].scaled_weight[0]
tmp['level_2_sw'] = tmp.index.map(lambda x: w_df.loc[(2,x.split('_')[3])].scaled_weight)
tmp['level_3_sw'] = tmp.index.map(lambda x: w_df.loc[(3,x[-15: -11])].scaled_weight)
tmp['level_4_sw'] = tmp.index.map(lambda x: w_df.loc[(4,x.split('_')[0])].scaled_weight)
tmp['level_5_sw'] = tmp.index.map(lambda x: w_df.loc[(5, x.split('_')[0] + '_' + x.split('_')[1])].scaled_weight)
tmp['level_6_sw'] = tmp.index.map(lambda x: w_df.loc[(6, x.split('_')[3] + '_' + x.split('_')[0])].scaled_weight)
tmp['level_7_sw'] = tmp.index.map(lambda x: w_df.loc[(7, x.split('_')[3] + '_' + x.split('_')[0] + '_' + x.split('_')[1])].scaled_weight)
tmp['level_8_sw'] = tmp.index.map(lambda x: w_df.loc[(8, x.split('_')[3] + '_' + x.split('_')[4] + '_' + x.split('_')[0])].scaled_weight)
tmp['level_9_sw'] = tmp.index.map(lambda x: w_df.loc[(9, x.split('_')[3] + '_' + x.split('_')[4] + '_' + x.split('_')[0] + '_' + x.split('_')[1])].scaled_weight)
tmp['level_10_sw'] = tmp.index.map(lambda x: w_df.loc[(10, x.split('_')[0] + '_' + x.split('_')[1] + '_' + x.split('_')[2])].scaled_weight)
tmp['level_11_sw'] = tmp.index.map(lambda x: w_df.loc[(11, x.split('_')[0] + '_' + x.split('_')[1] + '_' + x.split('_')[2] + '_' + x.split('_')[3])].scaled_weight)
self.w_12['total_scaled_weight'] = tmp[['scaled_weight', 'level_1_sw', 'level_2_sw', 'level_3_sw', 'level_4_sw',
'level_5_sw', 'level_6_sw', 'level_7_sw', 'level_8_sw', 'level_9_sw',
'level_10_sw', 'level_11_sw']].sum(axis=1)
# **Example use of the WRMSSE evaluator**
# * **Test period**: Days 1914 - 1941, same as the competition validation period, so the we should get the same score here as we do if we submit the same predictions to kaggle (confirmed).
# * **Predicton model**: simlple baseline of predicting sales to be the same as the previous 28 days.
start_test = 1914
e = WRMSSE(PATH_DATA_RAW, start_test=start_test)
e.add_total_scaled_weight()
e.w_12
#hide
show_doc(WRMSSE.score)
preds = e.df_stv.loc[:, f'd_{start_test - 28}': f'd_{start_test - 1}'].values
e.score(preds, model_name='same_as_last_month', fast=False)
#hide
show_doc(WRMSSE.plot_scores)
fig, ax = e.plot_scores()
#hide
show_doc(WRMSSE.dump_scores)
e.dump_scores(PATH_DATA)
pd.read_csv(PATH_DATA + '/scores.csv')
#hide
show_doc(WRMSSE.make_sub)
e.make_sub()
pd.read_csv('sub_' + e.model_name + '.csv')
# Submit this file and see that it scores the same on the kaggle public leaderboard
# !rm {'sub_' + e.model_name + '.csv'} {PATH_DATA + '/scores.csv'}
# ## Using weights for custom objective functions and metrics
# These static methods will help us create custom metrics and evaluation functions for lightgbm training
#hide
show_doc(WRMSSE.get_weighted_mse_feval)
#hide
show_doc(WRMSSE.get_weighted_mae_feval)
#hide
show_doc(WRMSSE.get_weighted_mse_fobj)
#hide
show_doc(WRMSSE.get_weighted_mae_fobj)
#hide
from nbdev.export import notebook2script; notebook2script()
|
02_WRMSSE_metric.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
sns.set_palette('magma')
import numpy as np
plt.style.use('aa_paper')
# %matplotlib inline
gene = 'HSPB1'
gene_lower = gene.lower()
chrom = 'chr7'
start = 76302558
end = 76304301
# + [markdown] variables={"chrom": "chr7", "end": "76304301", "gene": "HSPB1", "gene_lower": "hspb1", "start": "76302558"}
# Note: this notebook relies on the Jupyter extension [Python Markdown](https://github.com/ipython-contrib/jupyter_contrib_nbextensions/tree/master/src/jupyter_contrib_nbextensions/nbextensions/python-markdown) to properly display the commands below, and in other markdown cells.
#
# This notebook describes our process of designing optimal guides for allele-specific excision for the gene *{{gene}}*. *{{gene}}* is a gene located on {{chrom}}.
#
# # Identify variants to target
#
# Identify exhaustive list of targetable variant pairs in the gene with 1000 Genomes data for excision maximum limit = 10kb for the paper.
#
# `python ~/projects/AlleleAnalyzer/scripts/ExcisionFinder.py -v /pollard/home/kathleen/projects/AlleleAnalyzer/manuscript_analyses/1000genomes_analysis/get_gene_list/gene_list_hg38.tsv {{gene}} /pollard/data/projects/AlleleAnalyzer_data/1kgp_data/hg38_analysis/1kgp_annotated_variants_by_chrom/{{chrom}}_annotated.h5 10000 SpCas9,SaCas9 /pollard/data/genetics/1kg/phase3/hg38/ALL.{{chrom}}_GRCh38.genotypes.20170504.bcf /pollard/home/kathleen/projects/AlleleAnalyzer/manuscript_analyses/set_cover_analysis/{{gene}}/{{gene_lower}}_var_pairs_ --window=5000 --exhaustive`
#
# ## Generate arcplot input for all populations together and for each superpopulation.
#
# `python ~/projects/AlleleAnalyzer/plotting_scripts/gen_arcplot_input.py /pollard/home/kathleen/projects/AlleleAnalyzer/manuscript_analyses/set_cover_analysis/{{gene}}/{{gene_lower}}_var_pairs_exh.tsv /pollard/home/kathleen/projects/AlleleAnalyzer/manuscript_analyses/set_cover_analysis/{{gene}}/{{gene_lower}}_arcplot_ALL`
#
# `parallel " python ~/projects/AlleleAnalyzer/plotting_scripts/gen_arcplot_input.py /pollard/home/kathleen/projects/AlleleAnalyzer/manuscript_analyses/set_cover_analysis/{{gene}}/{{gene_lower}}_var_pairs_exh.tsv /pollard/home/kathleen/projects/AlleleAnalyzer/manuscript_analyses/set_cover_analysis/{{gene}}/{{gene_lower}}_arcplot_{} --sample_legend=/pollard/data/projects/AlleleAnalyzer_data/1kgp_data/1kg_allsamples.tsv --pop={} " ::: AFR AMR EAS EUR SAS`
#
# ### Plot arcplots together to demonstrate the different patterns of sharing.
#
# `python ~/projects/AlleleAnalyzer/manuscript_analyses/set_cover_analysis/src/superpops_for_arcplot_merged.py ~/projects/AlleleAnalyzer/manuscript_analyses/set_cover_analysis/{{gene}}/{{gene_lower}}_arcplot_ ~/projects/AlleleAnalyzer/manuscript_analyses/set_cover_analysis/{{gene}}/{{gene_lower}}_`
#
# `Rscript ~/projects/AlleleAnalyzer/manuscript_analyses/set_cover_analysis/src/arcplot_superpops_for_paper.R ~/projects/AlleleAnalyzer/manuscript_analyses/set_cover_analysis/{{gene}}/{{gene_lower}}_all_pops_arcplot_input.tsv ~/projects/AlleleAnalyzer/manuscript_analyses/set_cover_analysis/{{gene}}/{{gene_lower}}_filt20_allpops 20 {{start}} {{end}} 5000 {{gene}}`
#
# # Set Cover
#
# ## Use set cover to identify top 5 variant pairs
#
# `python ~/projects/AlleleAnalyzer/scripts/optimize_ppl_covered.py --type=max_probes 5 ~/projects/AlleleAnalyzer/manuscript_analyses/set_cover_analysis/{{gene}}/{{gene_lower}}_var_pairs_exh.tsv ~/projects/AlleleAnalyzer/manuscript_analyses/set_cover_analysis/{{gene}}/{{gene_lower}}_set_cover_5_pairs`
# -
set_cover_top_pairs = pd.read_csv(f'/pollard/home/kathleen/projects/AlleleAnalyzer/manuscript_analyses/set_cover_analysis/{gene}/{gene_lower}_set_cover_5_pairs_pairs_used.txt',
sep='\t')
set_cover_top_pairs
# ## Population coverage for set cover pairs
def ppl_covered(guides_used_df, cohort_df):
guides_list = guides_used_df['var1'].tolist() + guides_used_df['var2'].tolist()
ppl_covered = cohort_df.query('(var1 in @guides_list) and (var2 in @guides_list)').copy()
return ppl_covered
global pairs_to_ppl
pairs_to_ppl = pd.read_csv(f'/pollard/home/kathleen/projects/AlleleAnalyzer/manuscript_analyses/set_cover_analysis/{gene}/{gene_lower}_var_pairs_exh.tsv',
sep='\t', low_memory=False)
ptp_sc_5 = ppl_covered(set_cover_top_pairs, pairs_to_ppl)
ptp_sc_5.head()
# # Top 5
#
# ## Extract top 5 pairs by population coverage
top_five_top_pairs = pd.read_csv(f'/pollard/home/kathleen/projects/AlleleAnalyzer/manuscript_analyses/set_cover_analysis/{gene}/{gene_lower}_arcplot_ALL.tsv',
sep='\t').sort_values(by='percent_pop_covered', ascending=False).head().reset_index(drop=True)
ptp_top_5 = ppl_covered(top_five_top_pairs[['var1','var2']], pairs_to_ppl)
# Demonstrate the difference in population coverages between top 5 shared pairs and set cover identified pairs.
top_five_top_pairs
# # Make arcplots for set cover and top 5 pairs
#
# Make file of set cover pairs for use with arcplot plotting script.
# +
# set cover
exh = pd.read_csv(f'/pollard/home/kathleen/projects/AlleleAnalyzer/manuscript_analyses/set_cover_analysis/{gene}/{gene_lower}_var_pairs_exh.tsv',
sep='\t', low_memory=False)
exh_sc = []
for ix, row in set_cover_top_pairs.iterrows():
var1 = row['var1']
var2 = row['var2']
exh_sc.append(pd.DataFrame(exh.query('(var1 == @var1) and (var2 == @var2)')))
exh_sc_df = pd.concat(exh_sc)
exh_sc_df.to_csv(f'/pollard/home/kathleen/projects/AlleleAnalyzer/manuscript_analyses/set_cover_analysis/{gene}/{gene_lower}_var_pairs_exh_sc.tsv',
sep='\t', index=False)
# +
# top 5
exh = pd.read_csv(f'/pollard/home/kathleen/projects/AlleleAnalyzer/manuscript_analyses/set_cover_analysis/{gene}/{gene_lower}_var_pairs_exh.tsv',
sep='\t', low_memory=False)
exh_tf = []
for ix, row in top_five_top_pairs.iterrows():
var1 = row['var1']
var2 = row['var2']
exh_tf.append(pd.DataFrame(exh.query('(var1 == @var1) and (var2 == @var2)')))
exh_tf_df = pd.concat(exh_tf)
exh_tf_df.to_csv(f'/pollard/home/kathleen/projects/AlleleAnalyzer/manuscript_analyses/set_cover_analysis/{gene}/{gene_lower}_var_pairs_exh_tf.tsv',
sep='\t', index=False)
# + [markdown] variables={"end": "76304301", "gene": "HSPB1", "gene_lower": "hspb1", "start": "76302558"}
# ### Set cover
#
# Make input arcplot-formatted:
#
# `python ~/projects/AlleleAnalyzer/plotting_scripts/gen_arcplot_input.py /pollard/home/kathleen/projects/AlleleAnalyzer/manuscript_analyses/set_cover_analysis/{{gene}}/{{gene_lower}}_var_pairs_exh_sc.tsv /pollard/home/kathleen/projects/AlleleAnalyzer/manuscript_analyses/set_cover_analysis/{{gene}}/{{gene_lower}}_arcplot_set_cover_ALL`
#
# Make arcplot:
#
# `Rscript ~/projects/AlleleAnalyzer/plotting_scripts/arcplot_generic.R /pollard/home/kathleen/projects/AlleleAnalyzer/manuscript_analyses/set_cover_analysis/{{gene}}/{{gene_lower}}_arcplot_set_cover_ALL.tsv /pollard/home/kathleen/projects/AlleleAnalyzer/manuscript_analyses/set_cover_analysis/{{gene}}/{{gene_lower}}_arcplot_set_cover_ALL 0 {{start}} {{end}} 5000 {{gene}}`
#
# ### Top 5
#
# Make input arcplot-formatted:
#
# `python ~/projects/AlleleAnalyzer/plotting_scripts/gen_arcplot_input.py /pollard/home/kathleen/projects/AlleleAnalyzer/manuscript_analyses/set_cover_analysis/{{gene}}/{{gene_lower}}_var_pairs_exh_tf.tsv /pollard/home/kathleen/projects/AlleleAnalyzer/manuscript_analyses/set_cover_analysis/{{gene}}/{{gene_lower}}_arcplot_top_five_ALL`
#
# Make arcplot:
#
# `Rscript ~/projects/AlleleAnalyzer/plotting_scripts/arcplot_generic.R /pollard/home/kathleen/projects/AlleleAnalyzer/manuscript_analyses/set_cover_analysis/{{gene}}/{{gene_lower}}_arcplot_top_five_ALL.tsv /pollard/home/kathleen/projects/AlleleAnalyzer/manuscript_analyses/set_cover_analysis/{{gene}}/{{gene_lower}}_arcplot_top_five_ALL 0 {{start}} {{end}} 5000 {{gene}}`
# -
# # Compare coverage
# +
def cov_cat(row):
if row['sample'] in ptp_top_5['ind'].tolist() and row['sample'] in ptp_sc_5['ind'].tolist():
return 'Both'
elif row['sample'] in ptp_top_5['ind'].tolist():
return 'Top 5'
elif row['sample'] in ptp_sc_5['ind'].tolist():
return 'Set Cover'
else:
return 'Neither'
global inds
inds = pd.read_csv('/pollard/data/projects/AlleleAnalyzer_data/1kgp_data/1kg_allsamples.tsv',
sep='\t')
inds_cov = inds.copy()
inds_cov['AlleleAnalyzer'] = inds['sample'].isin(ptp_sc_5['ind'])
inds_cov['Top 5'] = inds['sample'].isin(ptp_top_5['ind'])
inds_cov['Coverage'] = inds_cov.apply(lambda row: cov_cat(row), axis=1)
global superpop_dict
superpop_dict = {
'AMR':'Admixed American',
'AFR':'African',
'SAS':'South Asian',
'EAS':'East Asian',
'EUR':'European'
}
sns.set_palette('Dark2', n_colors=3)
fig, ax = plt.subplots(figsize=(2.8, 1.8))
sns.countplot(y='superpop', hue='AlleleAnalyzer', data=inds_cov.replace(superpop_dict).replace({
True:'Covered',
False:'Not covered'
}).sort_values(by=['superpop','Coverage']))
plt.xlabel('Number of individuals')
plt.ylabel('Super Populations')
plt.xticks(rotation=0)
ax.legend(loc='upper right',prop={'size': 9},
frameon=False,
borderaxespad=0.1)
ax.set_xlim([0,600]) # 600 often works but can be tweaked per gene
plt.title('AlleleAnalyzer')
# -
sns.set_palette('Dark2', n_colors=3)
fig, ax = plt.subplots(figsize=(2.8, 1.8))
sns.countplot(y='superpop', hue='Top 5', data=inds_cov.replace(superpop_dict).replace({
True:'Covered',
False:'Not covered'
}).sort_values(by=['superpop','Coverage']))
plt.xlabel('Number of individuals')
plt.ylabel('Super Populations')
plt.xticks(rotation=0)
ax.legend(loc='upper right',prop={'size': 9},
frameon=False,
borderaxespad=0.1)
ax.set_xlim([0,600]) # 600 often works but can be tweaked per gene
plt.title('Top 5')
# # Design and score sgRNAs for variants included in Set Cover and Top 5 pairs
#
# ## Set Cover
# ### Make BED files for positions for each variant pair
# +
set_cover_bed = pd.DataFrame()
set_cover_bed['start'] = set_cover_top_pairs['var1'].tolist() + set_cover_top_pairs['var2'].tolist()
set_cover_bed['end'] = set_cover_top_pairs['var1'].tolist() + set_cover_top_pairs['var2'].tolist()
set_cover_bed['region'] = set_cover_bed.index
set_cover_bed['chrom'] = f'{chrom}'
set_cover_bed = set_cover_bed[['chrom','start','end','region']]
set_cover_bed.to_csv(f'/pollard/home/kathleen/projects/AlleleAnalyzer/manuscript_analyses/set_cover_analysis/{gene}/set_cover_pairs.bed',
sep='\t', index=False, header=False)
# + [markdown] variables={"chrom": "chr7", "gene": "HSPB1", "gene_lower": "hspb1"}
# ### Design sgRNAs
#
# `python ~/projects/AlleleAnalyzer/scripts/gen_sgRNAs.py -v /pollard/data/genetics/1kg/phase3/hg38/ALL.{{chrom}}_GRCh38.genotypes.20170504.bcf /pollard/data/projects/AlleleAnalyzer_data/1kgp_data/hg38_analysis/1kgp_annotated_variants_by_chrom/{{chrom}}_annotated.h5 /pollard/home/kathleen/projects/AlleleAnalyzer/manuscript_analyses/set_cover_analysis/{{gene}}/set_cover_pairs.bed /pollard/data/projects/AlleleAnalyzer_data/pam_sites_hg38/ /pollard/data/vertebrate_genomes/human/hg38/hg38/hg38.fa /pollard/home/kathleen/projects/AlleleAnalyzer/manuscript_analyses/set_cover_analysis/{{gene}}/guides_set_cover_{{gene_lower}} SpCas9,SaCas9 20 --bed --sim -d --crispor=hg38`
# -
# ## Top 5
# ### Make BED files for positions for each variant pair
# +
top_five_bed = pd.DataFrame()
top_five_bed['start'] = top_five_top_pairs['var1'].tolist() + top_five_top_pairs['var2'].tolist()
top_five_bed['end'] = top_five_top_pairs['var1'].tolist() + top_five_top_pairs['var2'].tolist()
top_five_bed['region'] = top_five_bed.index
top_five_bed['chrom'] = f'{chrom}'
top_five_bed = top_five_bed[['chrom','start','end','region']]
top_five_bed.to_csv(f'/pollard/home/kathleen/projects/AlleleAnalyzer/manuscript_analyses/set_cover_analysis/{gene}/top_five_pairs.bed',
sep='\t', index=False, header=False)
# + [markdown] variables={"chrom": "chr7", "gene": "HSPB1", "gene_lower": "hspb1"}
# ### Design sgRNAs
#
# `python ~/projects/AlleleAnalyzer/scripts/gen_sgRNAs.py -v /pollard/data/genetics/1kg/phase3/hg38/ALL.{{chrom}}_GRCh38.genotypes.20170504.bcf /pollard/data/projects/AlleleAnalyzer_data/1kgp_data/hg38_analysis/1kgp_annotated_variants_by_chrom/{{chrom}}_annotated.h5 /pollard/home/kathleen/projects/AlleleAnalyzer/manuscript_analyses/set_cover_analysis/{{gene}}/top_five_pairs.bed /pollard/data/projects/AlleleAnalyzer_data/pam_sites_hg38/ /pollard/data/vertebrate_genomes/human/hg38/hg38/hg38.fa /pollard/home/kathleen/projects/AlleleAnalyzer/manuscript_analyses/set_cover_analysis/{{gene}}/guides_top_five_{{gene_lower}} SpCas9,SaCas9 20 --bed --sim -d --crispor=hg38`
# -
# # Reanalyze coverage at positions with at least 1 sgRNA with predicted specificity score > threshold
#
# ## Set Cover
# +
sc_grnas = pd.read_csv(f'/pollard/home/kathleen/projects/AlleleAnalyzer/manuscript_analyses/set_cover_analysis/{gene}/guides_set_cover_{gene_lower}.tsv',
sep='\t')
def get_pairs(pairs_df, grna_df, min_score=0):
grna_df_spec = grna_df.query('(scores_ref >= @min_score) or (scores_alt >= @min_score)')
positions = grna_df_spec['variant_position'].astype(int).unique().tolist()
pairs_out = pairs_df.query('(var1 in @positions) and (var2 in @positions)').copy()
return(pairs_out)
def plot_coverage(orig_pairs, grnas, min_score_list, xlim, legend_pos='lower right', sc=True):
if sc:
label = 'AlleleAnalyzer'
else:
label = 'Top 5'
inds_cov_df_list = []
for min_score in min_score_list:
pairs_filt = get_pairs(orig_pairs, grnas, min_score = min_score)
ptp = ppl_covered(pairs_filt, pairs_to_ppl)
inds_cov = inds.copy()
inds_cov['Coverage'] = inds['sample'].isin(ptp['ind'])
inds_cov['Minimum Specificity Score'] = min_score
inds_cov_df_list.append(inds_cov)
inds_cov = pd.concat(inds_cov_df_list).query('Coverage').drop_duplicates()
fig, ax = plt.subplots(figsize=(3.8, 5.8))
p = sns.countplot(y='superpop', hue='Minimum Specificity Score',
data=inds_cov.replace(superpop_dict).sort_values(by=['superpop']), palette='magma')
# p = sns.catplot(y='superpop', hue='Minimum Specificity Score', kind='count', row='superpop',
# data=inds_cov.replace(superpop_dict).sort_values(by=['superpop']), palette='magma')
plt.xlabel('Number of individuals')
plt.ylabel('Super Populations')
# plt.xticks(rotation=45)
plt.legend(loc=legend_pos,prop={'size': 9},
frameon=False,
borderaxespad=0.1,
title='Minimum score')
ax.set_xlim([0,xlim])
if sc:
plt.title(f'AlleleAnalyzer coverage at various \nminimum score thresholds, {gene}')
else:
plt.title(f'Top 5 sites at various \nminimum score thresholds, {gene}')
return p
# -
set_cover_top_pairs.head()
sns.swarmplot(x='variant_position', y='scores_ref', data=sc_grnas)
plt.xticks(rotation=90)
p = plot_coverage(set_cover_top_pairs, sc_grnas, list(range(0, 100, 10)), 600, 'lower right')
p.get_figure().savefig(f'/pollard/home/kathleen/projects/AlleleAnalyzer/manuscript_analyses/set_cover_analysis/{gene}/sc_coverage_all.pdf',
dpi=300, bbox_inches='tight')
def plot_overall(orig_pairs, grnas, max_y):
filters = list(range(0,100,10))
plot_vals = {}
for filt in filters:
pairs_filt = get_pairs(orig_pairs, grnas, min_score = filt)
ptp = ppl_covered(pairs_filt, pairs_to_ppl)
plot_vals[filt] = 100.0* (len(ptp['ind'].unique())/2504.0)
plot_vals_df = pd.DataFrame.from_dict(plot_vals, orient='index')
plot_vals_df['Minimum Score'] = plot_vals_df.index
plot_vals_df.columns = ['% 1KGP Covered','Minimum Score']
fig, ax = plt.subplots(figsize=(3.8, 2.8))
p = sns.barplot(x='Minimum Score', y='% 1KGP Covered',
data=plot_vals_df, palette='magma')
plt.title(f'Overall 1KGP Coverage with Filtering\n by Predicted Specificity Score, {gene}')
plt.xlabel('Minimum Score Threshold')
ax.set_ylim([0,max_y])
return(p)
sc_overall_plot = plot_overall(set_cover_top_pairs, sc_grnas, 60)
sc_overall_plot.get_figure().savefig(f'/pollard/home/kathleen/projects/AlleleAnalyzer/manuscript_analyses/set_cover_analysis/{gene}/1kgp_cov_overall_set_cover.pdf',
dpi=300, bbox_inches='tight')
# ## Top 5
tf_grnas = pd.read_csv(f'/pollard/home/kathleen/projects/AlleleAnalyzer/manuscript_analyses/set_cover_analysis/{gene}/guides_top_five_{gene_lower}.tsv',
sep='\t')
p = plot_coverage(top_five_top_pairs, tf_grnas, list(range(0, 100, 10)), 600, 'lower right', sc=False)
p.get_figure().savefig(f'/pollard/home/kathleen/projects/AlleleAnalyzer/manuscript_analyses/set_cover_analysis/{gene}/tf_coverage_all.pdf',
dpi=300, bbox_inches='tight')
tf_overall_plot = plot_overall(top_five_top_pairs, tf_grnas, 70)
tf_overall_plot.get_figure().savefig(f'/pollard/home/kathleen/projects/AlleleAnalyzer/manuscript_analyses/set_cover_analysis/{gene}/1kgp_cov_overall_top_five.pdf',
dpi=300, bbox_inches='tight')
|
manuscript_analyses/set_cover_analysis/set cover analysis for sgRNA pair design in the gene HSPB1 updated.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Introduction to `darts`
# In this notebook, we will go over the main functionalities of the library: how to build and manipulate time series, train forecasting models, make predictions, evaluate metrics, backtest models and ensemble several models.
#
# As a toy example, we will use the well known [monthly airline passengers dataset](https://github.com/jbrownlee/Datasets/blob/master/monthly-airline-passengers.csv).
# + tags=[]
# fix python path if working locally
from utils import fix_pythonpath_if_working_locally
fix_pythonpath_if_working_locally()
# + tags=[]
# %load_ext autoreload
# %autoreload 2
# %matplotlib inline
import sys
import pandas as pd
import numpy as np
from datetime import datetime
import time
import matplotlib.pyplot as plt
from darts import TimeSeries
from darts.models import (
NaiveSeasonal,
NaiveDrift,
Prophet,
ExponentialSmoothing,
ARIMA,
AutoARIMA,
StandardRegressionModel,
Theta,
FFT
)
from darts.metrics import mape, mase
from darts.utils.statistics import check_seasonality, plot_acf, plot_residuals_analysis
import warnings
warnings.filterwarnings("ignore")
import logging
logging.disable(logging.CRITICAL)
# -
# ## Read data and build a `TimeSeries`
# A `TimeSeries` simply represents a univariate or multivariate time series, with a proper time index. It is a wrapper around a `pandas.DataFrame`, and it can be built in a few different ways:
# * From an entire Pandas `DataFrame` directly
# * From a time index and an array of corresponding values
# * From a subset of Pandas `DataFrame` columns, indicating which are the time column and the values columns. This is what we will do here:
# +
df = pd.read_csv('AirPassengers.csv', delimiter=",")
series = TimeSeries.from_dataframe(df, 'Month', ['#Passengers'])
series.plot()
# -
# ## Creating a training and validation series
# First, let's split our `TimeSeries` into a training and a validation series. Note: in general, it is also a good practice to keep a test series aside and never touch it until the end of the process. Here, we just build a training and a test series for simplicity.
#
# The training series will be a `TimeSeries` containing values until January 1958 (excluded), and the validation series a `TimeSeries` containing the rest:
train, val = series.split_before(pd.Timestamp('19580101'))
train.plot(label='training')
val.plot(label='validation')
plt.legend();
# ## Playing with toy models
# There is a collection of "naive" baseline models in `darts`, which can be very useful to get an idea of the bare minimum accuracy that one could expect. For example, the `NaiveSeasonal(K)` model always "repeats" the value that occured `K` time steps ago.
#
# In its most naive form, when `K=1`, this model simply always repeats the last value of the training series:
# +
naive_model = NaiveSeasonal(K=1)
naive_model.fit(train)
naive_forecast = naive_model.predict(36)
series.plot(label='actual')
naive_forecast.plot(label='naive forecast (K=1)')
plt.legend();
# -
# It's very easy to fit models and produce predictions on `TimeSeries`. All the models have a `fit()` and a `predict()` function. This is similar to [https://scikit-learn.org/](https://scikit-learn.org/), except that it is specific to time series. The `fit()` function takes in argument the training time series on which to fit the model, and the `predict()` function takes in argument the number of time steps (after the end of the training series) over which to forecast.
#
# ### Inspect Seasonality
# Our model above is perhaps a bit too naive. We can already improve by exploiting the seasonality in the data. It seems quite obvious that the data has a yearly seasonality, which we can confirm by looking at the auto-correlation function (ACF), and highlighting the lag `m=12`:
plot_acf(train, m = 12, alpha = .05)
# The ACF presents a spike at x = 12, which suggests a yearly seasonality trend (highlighted in red). The blue zone determines the significance of the statistics for a confidence level of alpha = 5%. In cases where we are unsure, we can also run a statistical check of seasonality for each candidate period `m`:
# + tags=[]
for m in range(2, 25):
is_seasonal, period = check_seasonality(train, m=m, alpha=.05)
if is_seasonal:
print('There is seasonality of order {}.'.format(period))
# -
# ### A less naive model
# Let's try the `NaiveSeasonal` model again with a seasonality of 12:
# +
seasonal_model = NaiveSeasonal(K=12)
seasonal_model.fit(train)
seasonal_forecast = seasonal_model.predict(36)
series.plot(label='actual')
seasonal_forecast.plot(label='naive forecast (K=12)')
plt.legend();
# -
# This is better, but we are still missing the trend. Fortunately, there is also another naive baseline model capturing the trend, which is called `NaiveDrift`. This model will simply produce linear predictions, with a slope that is determined by the first and last values of the training set:
# +
drift_model = NaiveDrift()
drift_model.fit(train)
drift_forecast = drift_model.predict(36)
combined_forecast = drift_forecast + seasonal_forecast - train.last_value()
series.plot()
combined_forecast.plot(label='combined')
drift_forecast.plot(label='drift')
plt.legend();
# -
# What happened in the last cell? We simply fit a naive drift model, and add its forecast to the seasonal forecast we had previously. We also substract the last value of the training set to the result, so that the resulting combined forecast starts off with the right offset.
#
# This looks already like a fairly descent forecast, and we did not use any non-naive model yet! In fact - any model should be able to beat this. But hey, what's the error we are getting here? Let's see what we'll have to beat:
# + tags=[]
print("Mean absolute percentage error for the combined naive drift + seasonal: {:.2f}%.".format(
mape(series, combined_forecast)))
# -
# ## Quickly try a few more models
# `darts` is built to make it easy to train and validate several models in a unified way. Let's train a few more and compute their respective MAPE on the validation set:
# + tags=[]
def eval_model(model):
model.fit(train)
forecast = model.predict(len(val))
print('model {} obtains MAPE: {:.2f}%'.format(model, mape(val, forecast)))
eval_model(ExponentialSmoothing())
eval_model(Prophet())
eval_model(AutoARIMA())
eval_model(Theta())
# -
# Here, we did only built these models with their default parameters. We can probably do better if we fine-tune to our problem. Let's try with the Theta method.
# ## The Theta method
# The model `Theta` contains an implementation of Assimakopoulos and Nikolopoulos' Theta method. This method has known great success, particularly in the M3-competition.
#
# Though the value of the Theta parameter is often set to 0 in applications, our implementation supports a variable value for parameter tuning purposes. Let's try to find a good value for Theta:
# +
# Search for the best theta parameter, by trying 50 different values
thetas = 2 - np.linspace(-10, 10, 50)
best_mape = float('inf')
best_theta = 0
for theta in thetas:
model = Theta(theta)
model.fit(train)
pred_theta = model.predict(len(val))
res = mape(val, pred_theta)
if res < best_mape:
best_mape = res
best_theta = theta
# + tags=[]
best_theta_model = Theta(best_theta)
best_theta_model.fit(train)
pred_best_theta = best_theta_model.predict(len(val))
print('The MAPE is: {:.2f}, with theta = {}.'.format(mape(val, pred_best_theta), best_theta))
# -
train.plot(label='train')
val.plot(label='true')
pred_best_theta.plot(label='prediction')
plt.legend();
# We can observe that the model with `best_theta` is so far the best we have, in terms of MAPE.
# ## Backtesting: simulate historical forecasting
# So at this point we have a model that performs well on our validation set, and that's good. But how can we know the performance we *would have obtained* if we *had been using this model* historically.
#
# Backtesting simulates predictions that would have been obtained historically with a given model. It can take a while to produce, since the model is re-fit every time the simulated prediction time advances.
#
# Such simulated forecasts are always defined with respect to a *forecast horizon*, which is the number of time steps that separate the prediction time from the forecast time. In the example below, we simulate forecasts done for 3 months in the future (compared to prediction time).
# + tags=[]
best_theta_model = Theta(best_theta)
historical_fcast_theta = best_theta_model.backtest(series, start=pd.Timestamp('19550101'),
forecast_horizon=3, verbose=True)
# -
# Let's see what this backtest forecast looks like. You can see it produces more accurate predictions than the one-off prediction done above, because here the model is re-fit every month.
series.plot(label='data')
historical_fcast_theta.plot(label='backtest 3-months ahead forecast (Theta)')
plt.title('MAPE = {:.2f}%'.format(mape(historical_fcast_theta, series)))
plt.legend();
# Let's look at the fitted value residuals of our current `Theta` model, i.e. the difference between the 1-step forecasts at every point in time obtained by fitting the model on all previous points, and the actual observed values.
plot_residuals_analysis(best_theta_model.residuals(series))
# We can see that the distribution has a mean that is slightly larger than 0. This means that our `Theta` model is biased. We can also make out a large ACF value at lag equal to 12, which indicates that the residuals contain information that was not used by the model.
#
# Could we maybe do better with a simple `ExponentialSmoothing` model?
# + tags=[]
model_es = ExponentialSmoothing()
historical_fcast_es = model_es.backtest(series, start=pd.Timestamp('19550101'),
forecast_horizon=3, verbose=True)
series.plot(label='data')
historical_fcast_es.plot(label='backtest 3-months ahead forecast (Exp. Smoothing)')
plt.title('MAPE = {:.2f}%'.format(mape(historical_fcast_es, series)))
plt.legend()
# -
# This much better! We get a mean absolute percentage error of 4.07% when backtesting with a 3-months forecast horizon in this case.
plot_residuals_analysis(model_es.residuals(series))
# The residual analysis also reflects an improved performance in that we now have a distribution of the residuals centred at value 0, and the ACF values, although not insignificant, have lower magnitudes.
#
# ## Ensembling several predictions
# *Ensembling* is about combining the forecasts produced by several models, in order to obtain a final -- and hopefully better forecast.
#
# For instance, in our example of a "less naive" model above, we manually combined a naive seasonal model with a naive drift model. Here, we will try to find such combinations in an automated way, using `RegressionModel`s. A regression model is a model that predicts a *target* time series from a bunch of *features* time series. If the features time series are themselves obtained from forecasting models, their future (predicted) values can be combined using the regression model to obtain a final forecast.
#
# Here, we will first compute the historical predictions two naive seasonal models (with 6 and 12 months seasonality), and naive drift model. To compute the historical predictions, we can simply reuse the `backtest()` method:
# + tags=[]
models = [NaiveSeasonal(6), NaiveSeasonal(12), NaiveDrift()]
model_predictions = [m.backtest(series, start=pd.Timestamp('19570101'), forecast_horizon=6, verbose=True)
for m in models]
# -
# Now that we have the historical forecasts *that we would have obtained* from a couple of models, we can train a `StandardRegressionModel`, in order to learn in a supervised way how to best combine the features time series (our 3 forecasts) into the target series that we are trying to predict.
#
# By default the `StandardRegressionModel` will fit a linear regression for predicting the target series from some features series. If you want something different than linear regression, `StandardRegressionModel` can wrap around any scikit-learn regression model.
# + tags=[]
""" We build the regression model, and tell it to use the 12 preceding points to fit the regression
"""
regr_model = StandardRegressionModel(train_n_points=12)
""" Our target series is what we want to predict (the actual data)
It has to have the same time index as the features series:
"""
series_target = series.slice_intersect(model_predictions[0])
""" Here we backtest our regression model
"""
ensemble_pred = regr_model.backtest(model_predictions, series_target,
start=pd.Timestamp('19580101'), forecast_horizon=3, verbose=True)
# -
# Finally, let's see how good the regression performs, compared to the original forecasts:
# + tags=[]
plt.figure(figsize=(8,5))
series.plot(label='actual')
for i, m in enumerate(models):
model_predictions[i].plot(label=str(m))
# intersect last part, to compare all the methods over the duration of the ensemble forecast
model_pred = model_predictions[i].slice_intersect(ensemble_pred)
mape_model = mape(series, model_pred)
print('MAPE Error for {}: {:.2f}%'.format(m, mape_model))
print('MAPE Error ensemble: {:.2f}%'.format(mape(series, ensemble_pred)))
ensemble_pred.plot(label='Ensemble')
print('\nCoefficients of the features time series:')
for i, m in enumerate(models):
print('Learned coefficient for {}: {:.2f}'.format(m, regr_model.model.coef_[0][i]))
plt.legend();
# -
# That's quite nice: by just combining 3 naive models (two seasonal repetitions and a linear trend) using a linear regression, we get a decent-looking ensemble model, which is better than any of the sub-model, with a MAPE of 5.30%.
#
# A couple of interesting things to observe:
# * Note how the seasonal model for `K=6` and the naive drift model have an incorrect phase compared to the original signal (due to the original signal having a true seasonality of 12). Despite this, the ensembling is able to learn coefficients that compensate for this effect. Removing either of the two models results in worse performance.
# * Note how the regression (ensemble) forecast starts off 12 months after the individual models forecasts -- that is because the regression model needs 12 data points to fit the weights coefficients of the linear regression.
# ## FFT and RNNs
# If you'd like to try models based on Fast Fourier Transform or Recurrent Neural Networks, we recommend that you go over the `FFT-examples.ipynb` and `RNN-examples.ipynb` notebooks, respectively.
# ## A final word of caution
# So is Theta, exponential smoothing, or a linear regression of naive models the best approach for predicting the future number of airline passengers? Well, at this point it's still hard to say exactly which one is best. Our time series is small, and our validation set is even smaller. In such cases, it's very easy to overfit the whole forecasting exercise to such a small validation set. That's especially true if the number of available models and their degrees of freedom is high; so always take results with a grain of salt (especially on small datasets), and apply the scientific method before making any kind of forecast!
|
examples/darts-intro.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: fbd
# language: python
# name: fbd
# ---
# + colab={"base_uri": "https://localhost:8080/"} id="8afnuTDfKV78" outputId="b4e8f69d-5d7c-4a50-8b73-ddcce5da6cff"
# !pip install ipynb
# + id="WdJqa87rDCMP"
import numpy as np
import os
from time import time
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.ticker import NullFormatter
import seaborn as sns
import ipynb
sns.set(style="darkgrid")
import warnings
#warnings.simplefilter(action='ignore', category=IntegrationWarning)
# + id="VmdW-JjoDCMP"
from ipynb.fs.full.closed_form_solution import f, p, p1, p2, call_price
# + id="Huf5H-5MDCMP"
start = time()
# Set the parameters
sigma = 0.61
theta = 0.019
kappa = 6.21
mu = 0.5
rho = -0.5
generator = np.random.default_rng()
n = [250, 400] # n time periods
m = [x**2 for x in n] # number of simulations
# + id="YMWqqN0EwaLN"
K = [1.5, 2.5]
S0 = [2, 3]
V0 = [0.01, 0.07]
int_rates = [0, 0.05]
time_maturity = [1, 2, 5]
# + [markdown] id="5ncexc_zDCMP"
# Stock prices simulation:
# + id="wTzm8k-CB7Mc"
def stock_price_generator (T, n ,m, r, S0, k, V0, sigma, theta, kappa, rho, generator):
dt = T / n
# Brownian motions:
dw_v = generator.normal(size=(m, n)) * np.sqrt(dt)
dw_i = generator.normal(size=(m, n)) * np.sqrt(dt)
#dw_v = np.random.normal(size=(m, n)) * np.sqrt(dt)
#dw_i = np.random.normal(size=(m, n)) * np.sqrt(dt)
dw_s = rho * dw_v + np.sqrt(1.0 - rho ** 2) * dw_i
# Perform time evolution
s = np.empty((m, n + 1)) # initialisation stock prices vector
s[:, 0] = S0
v = np.ones(m) * V0
for t in range(n):
dv = kappa * (theta - v) * dt + sigma * np.sqrt(v) * dw_v[:, t]
ds = r * s[:, t] * dt + np.sqrt(v) * s[:, t] * dw_s[:, t]
v = np.clip(v + dv, a_min=0.0, a_max=None)
s[:, t + 1] = s[:, t] + ds
return s[:,-1]
# + id="XHEjPkXxDCMV"
#s = stock_price_generator (, n ,m, r, S0, k, V0, sigma, theta, kappa, rho)
# + [markdown] id="Eb4toCbKDCMY"
# Plots stock prices simulations:
# + id="xfLgG8WQFcSX"
#plot_mc (s,,n)
# + [markdown] id="inZlNsAGDCMZ"
# Payoff:
# + id="zS-Nk3amDCMZ"
# function which finds the expected payoff
def find_expected_payoff(stock_paths, k, r, T):
final = stock_paths - k
payoff = [max(x,0) for x in final] # one payoff for each simulation
payoff = np.asarray(payoff)
expected_payoff = payoff.mean()
c = expected_payoff * np.exp(-r * T) # in case r=0, this step is useless
return c
# + id="zzIA3nNBDCMZ"
#c = find_expected_payoff(s, k, r, ) # in case r=0, this step is useless
#c
# + id="WLbryx_TwaLP"
df = pd.DataFrame(columns=['S0', 'K', 'V0', 'T', 'r', 'n', 'm', 'closed_solution', 'mc_price', 'ST_std'])
# + colab={"base_uri": "https://localhost:8080/"} id="xtezYZ0JB7Mi" outputId="5a386d8e-0294-4c05-9b91-a96239a258fb"
# fill the dataset
for s0 in S0:
print(f's0 {s0}')
for k in K:
print(f'k {k}')
for v0 in V0:
print(f'v0 {v0}')
for t in time_maturity:
print(f't {t}')
for r in int_rates:
# analytical closed sol
sol = call_price(kappa, theta, sigma, rho, v0, r, t, s0, k)
for i in range(len(n)):
nn = n[i]
mm = m[i]
# monte carlo solution:
ST = stock_price_generator(
t, nn, mm, r, s0, k, v0, sigma, theta, kappa, rho,
generator) # stock prices of mc simulation
mc_price = find_expected_payoff(ST, k, r, t)
new_row = {
'S0': s0,
'K': k,
'V0': v0,
'T': t,
'V0': v0,
'r': r,
'n': nn,
'm': mm,
'closed_solution': sol,
'mc_price': mc_price,
'ST_std': np.std(ST)
}
# append row to the dataframe
df = df.append(new_row, ignore_index=True)
# + colab={"base_uri": "https://localhost:8080/", "height": 202} id="MuFhFqL8B7Mj" outputId="78e2bbc8-98e4-4020-fdd0-e47505175a21"
df.head()
# + id="JCexvtyaDCMZ"
df.to_csv("solution_complete_mc.csv")
# + id="fWtlM-iCDCMY"
def plot_mc (s,T,n, m):
# Setup figure
plt.figure(figsize=(8,6))
# noinspection PyTypeChecker
ax_lines = plt.axes()
maxx = np.max(s)
minn = np.min(s)
# Make the line plots
t = np.linspace(0, T, num = n + 1)
ns = 40
for i in range(ns):
ax_lines.plot(t, s[i, :], lw=1.0)
ax_lines.set(xlabel='Years', ylabel='St',title='Price Simulations')
ax_lines.set_xlim((0, T))
ax_lines.set_ylim((minn, maxx))
# Add mean value to line plots
ax_lines.plot([0.0, 1.0], [s[:, -1].mean(), s[:, -1].mean()], lw='2', ls="--", label='mean')
plt.show()
# plot stock prices distributions
plt.figure(figsize=(6,5))
bins = np.arange(1.4, 3, .04)
plt.hist(s[:, -1], bins=bins)
plt.xlabel('ST')
plt.ylabel('Number')
plt.title('Stock Prices distribution')
plt.show()
# + id="H_H-4HQxwaLR" colab={"base_uri": "https://localhost:8080/", "height": 202} outputId="0ab1728a-6bf4-48dd-eb1b-2cb1f1ee2249"
solution_mc = pd.read_csv('solution_complete_mc.csv')
solution_mc.head()
# + colab={"base_uri": "https://localhost:8080/"} id="Wp7QEbEwCa2O" outputId="35c47fd4-add0-44bc-ef90-e8ba54e5f344"
len(solution_mc)
# + [markdown] id="opF1tO88B7Mm"
# # AV
# + id="frMqk84RB7Mn"
def stock_price_generator_av (T, n ,m, r, S0, k, V0, sigma, theta, kappa, rho, separate=False):
dt = T / n
m = int(m/2) # with av we can do half of the simulations
n = int(n)
# Brownian motions:
dw_v = generator.normal(size=(m, n)) * np.sqrt(dt)
dw_i = generator.normal(size=(m, n)) * np.sqrt(dt)
dw_s = rho * dw_v + np.sqrt(1.0 - rho ** 2) * dw_i
# Perform time evolution
s = np.empty((m, n + 1)) # initialisation stock prices vector
s[:, 0] = S0
s_ant = np.empty((m, n + 1))
s_ant[:, 0] = S0
v = np.ones(m) * V0
v_ant = np.ones(m) * V0
for t in range(n):
dv = kappa * (theta - v) * dt + sigma * np.sqrt(v) * dw_v[:, t]
dv_ant = kappa * (theta - v_ant) * dt + sigma * np.sqrt(v_ant) * dw_v[:, t]
ds = r * s[:, t] * dt + np.sqrt(v) * s[:, t] * dw_s[:, t]
ds_ant = r * s_ant[:, t] * dt + np.sqrt(v_ant) * s_ant[:, t] * dw_s[:, t]
v = np.clip(v + dv, a_min=0.0, a_max=None)
v_ant = np.clip(v_ant + dv_ant, a_min=0.0, a_max=None)
s[:, t + 1] = s[:, t] + ds
s_ant[:, t + 1] = s_ant[:, t] + ds_ant
if separate==True:
return s, s_ant
return np.concatenate((s, s_ant))[:,-1]
# + colab={"base_uri": "https://localhost:8080/"} id="awYBKxaHB7Mn" outputId="cd4d7b94-1700-4618-e978-39f26058fd25"
mc_price_av = []
ST_av_std = []
i=0
for row in solution_mc.index:
i = i+1
if i % 10 == 0:
print(i)
T = solution_mc['T'][row]
n = solution_mc['n'][row]
m = solution_mc['m'][row]
r = solution_mc['r'][row]
S0 = solution_mc['S0'][row]
K = solution_mc['K'][row]
V0 = solution_mc['V0'][row]
ST_ant = stock_price_generator_av (T, n, m, r, S0, K, V0, sigma, theta, kappa, rho)
ST_av_std.append(np.std(ST_ant))
mc_price_av.append(find_expected_payoff(ST_ant, K, r, T))
# + id="YGbgYFa6B7Mn"
solution_mc['mc_price_av'] = mc_price_av
solution_mc['ST_av_std'] = ST_av_std
# + colab={"base_uri": "https://localhost:8080/", "height": 202} id="E2Pfft7-Li8F" outputId="3f097481-1a2a-41a0-e1bf-daee531a2bf3"
solution_mc.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 202} id="595D6ZFKuUU4" outputId="74983106-0a23-431a-df48-084f95cf4aaa"
standard_deviation = solution_mc[['ST_std', 'ST_av_std']]
standard_deviation.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 202} id="IJw2Ge8ZB7Mo" outputId="c4c46123-597a-4a09-8f08-0bbeacf6c891"
prices = solution_mc[['closed_solution', 'mc_price', 'mc_price_av']]
prices.head()
# + id="ShQNfQ8wMPyx"
rmse_mc = np.sqrt(((prices.closed_solution - prices.mc_price)**2).sum())
rmse_mc_av = np.sqrt(((prices.closed_solution - prices.mc_price_av)**2).sum())
# + colab={"base_uri": "https://localhost:8080/"} id="3YQXsAoUMdzP" outputId="607e83c8-7d4a-4718-cdd0-76fa6cc26376"
rmse_mc
# + colab={"base_uri": "https://localhost:8080/"} id="tR51zBvfMgb5" outputId="4432caba-508a-4544-e78d-85dde50164c7"
rmse_mc_av
# + id="ZfcJTEC-M1jh"
solution_mc.to_csv("solution_complete_mc.csv")
# + id="Tq5kbgwJNdN6"
|
old (to ignore)/complete data/complete_mc.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/priyanshgupta1998/Natural-language-processing-NLP-/blob/master/jatana_assignment.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="2PS-FsQ2LBgw" colab_type="text"
# #Read Excel (.xlsx) file using python
# + id="Rae4ZK6TW8Zf" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 463} outputId="f7aacf0c-e8fd-417e-deea-ba1517287ac0"
# !wget https://www.dropbox.com/s/3boyfjsjmi4dpzp/sampledata.xlsx
# + id="ZYBg2OoZGYeI" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 194} outputId="4d652586-b6ff-4ec7-9a9d-2e8760d1d516"
# !pip install xlrd
import xlrd
import time
import pandas as pd
# !pip install contractions
import contractions
import nltk
nltk.download('punkt')
nltk.download('averaged_perceptron_tagger')
import requests
url = 'http://translate.google.com/translate_a/t'
import numpy as np
from textblob import TextBlob
# To open Workbook
df = xlrd.open_workbook('sampledata.xlsx')
sheet = df.sheet_by_index(0)
# + id="XaRutxUWWzG3" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 72} outputId="de27abd9-9b16-433f-a321-f554a5c472f2"
print(sheet , sheet.nrows ,sheet.ncols)
sheet.row_values(5)[0]
# + id="KCCzAMNpW5XQ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1494} outputId="b3641905-3da1-439e-9086-15f19cda719e"
# For row 0 and column 0
for i in range(4,8):
print(sheet.cell_value(i,0))
print('\n----------------------------------------------------------------------------------------------------------\n')
# + id="gR__CEhTXP4B" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 336} outputId="0e719b8f-60a7-4fc3-da16-87aad4b077a4"
# find all the contractionas(short forms)
import numpy as np
import re
# For row 0 and column 0
liss = []
for i in range(1,sheet.nrows):
#print(sheet.cell_value(i,0))
refine = sheet.row_values(i)[0]
refine = refine.strip().replace('\n', '')
add = re.findall(r"\w+'\w+", refine)
if len(add)>0:
liss.append(add)
unique_cont = []
for i in range(len(liss)):
for word in liss[i]:
unique_cont.append(word)
print(len(unique_cont) , np.array(unique_cont[:30]))
print(len(np.unique(unique_cont)))
np.unique(unique_cont)
# + [markdown] id="4G52NaW5_JnN" colab_type="text"
# https://textblob.readthedocs.io/en/dev/quickstart.html
# + [markdown] id="I4vZIKmMkkhY" colab_type="text"
# #Extract data from the xlsx file
# + id="AV33AqZLKuBZ" colab_type="code" outputId="8f822f41-a08a-46e1-ec7f-9d2ea2b9e899" colab={"base_uri": "https://localhost:8080/", "height": 161}
#REMOVE '\n' and replace contractions
data = []
for line in range(1,sheet.nrows):
rep = sheet.row_values(line)[0].strip().replace('\n\n', ' ')
rep =rep.strip().replace(':', '.')
rep =rep.strip().replace('..', '. ')
data.append(contractions.fix(rep))
print(len(data))
print(data[:5])
# array(['de', 'en', 'es', 'fr', 'it', 'ja', 'nl', 'pl', 'ru', 'zh-CN'],
# dtype='<U5')
for i in range(5):
result = TextBlob(data[i])
print(len(result.tags) , result.tags)
#Tokkenize dataset
all_app = []
for i in range(5):
result = TextBlob(data[i])
app = []
for j in range(len(result.tags)):
app.append(result.tags[j][0])
all_app.append(app)
# + [markdown] id="lVD7i9eqlFEG" colab_type="text"
# #for sentence tokenize to csv
# + id="C-jLklWiODcL" colab_type="code" colab={}
import time
import pandas as pd
sentences = pd.DataFrame(columns=['sentence_no','Sentence_token'] )
for j in range(len(data)):
doc = []
sentence = nltk.sent_tokenize(data[j])
doc.append(sentence)
sentences.loc[j] = ['sentence_' + str(j+1)] + doc
sentences.to_csv('sentence_token.csv' , index= False)
# + id="vB5eYLvvt3i5" colab_type="code" outputId="3bcafada-b7ac-4e51-d5fc-0f70e3902789" colab={"base_uri": "https://localhost:8080/", "height": 215}
sent = pd.read_csv('sentence_token.csv')
print(sent.shape)
sent.head()
# + [markdown] id="hynPQnhHlYS-" colab_type="text"
# #for language detection to csv
# + id="6Hf4fWUflQ1g" colab_type="code" colab={}
# import time
# t=[]
# for i in range(len(data)):
# blob = TextBlob(data[i])
# t.append(blob.detect_language())
# time.sleep(2)
# t.to_csv('language_detection.csv' , index= False)
# + id="-7OrLtMUt8mA" colab_type="code" outputId="0dde1515-df21-456e-f520-1493b350621e" colab={"base_uri": "https://localhost:8080/", "height": 221}
lang = pd.read_csv('language.csv')
print(lang.shape)
lang.head()
# + [markdown] id="1PZekYiSliCG" colab_type="text"
# #extract the data from tokenize.csv and convert all into textblob datatype
# + id="uaKjM4Gvk5Nc" colab_type="code" outputId="ef5d0ca0-4be0-4f4c-8bf9-08adebf9d7a2" colab={"base_uri": "https://localhost:8080/", "height": 125}
import zipfile
from google.colab import drive
drive.mount('/content/drive/')
sentences= pd.read_csv('/content/drive/My Drive/sentence_token.csv')
k = sentences.Sentence_token
from ast import literal_eval
sent = []
for i in range(len(k)):
sent.append(literal_eval(k[i]))
# l=[]
# for i in range(0,600,7):
# l.append(i)
# #print(len(l))
# #print(l)
all_blob =[]
for i in range(len(sent)):
kaju = []
exam = sent[i]
for j in range(len(exam)):
blob = TextBlob(exam[j])
kaju.append(blob)
all_blob.append(kaju)
# + [markdown] id="IxFR8mWBe-Pi" colab_type="text"
# #textBlob.translate() has limitations of 1000 calls/day
# `TextBlog internally uses Terry Yin's google-translate-python which now uses MyMemory instead Google Translate.`
#
# `According to MyMemory: API usage limits: Free, anonymous usage is limited to 1000 calls/day (Or in one Session).`
#
# Here we have total 6298 calls (sentences). So I decided to split the dataset into chunks of size less than or equal to 1000.
# >set ----> sentences (calls)
# 0:96 ----->999
# 96:190 ----->998
# 190:286------>997
# 286:381----> 999
# 381:471----->990
# 471:566---->995
# 566:596------>320
# ###make small small chunks
# + id="PVwJUibsl1Av" colab_type="code" colab={}
# 0:96 , 96:190 ,190:286 , 286:381 , 381:471 ,471:566, 566:596
# 999, 998 , 997, 999, 990 , 995 ,320
# df = all_blob[566:596]
# add = 0
# for i in range(len(df)):
# add+=len(df[i])
# add
# + id="JB3CVzvfUgtJ" colab_type="code" colab={}
# seven-times
df = all_blob[566:596]
final = pd.DataFrame(columns=['doc_no' , 'document'])
for i in range(len(df)):
abs_data = []
for j in range(len(df[i])):
if(len(df[i][j])>2):
if(df[i][j].detect_language()!='en'):
d =df[i][j].translate(to='en')
abs_data.append(str(d))
else:
abs_data.append(str(df[i][j]))
else:
abs_data.append(str(df[i][j]))
doc = []
doc.append(abs_data)
final.loc[i] = ['document_' + str(i+1)] + doc
final.to_csv('final7.csv' , index=False)
# + [markdown] id="iK5FfOZ5mUv_" colab_type="text"
#
#
# ---
#
#
#
# ---
#
#
#
# ---
#
#
#
# ---
#
#
#
# ---
#
#
#
# ---
#
#
#
# ---
#
#
#
# ---
#
#
#
# ---
#
#
#
# ---
#
#
# + id="5j0inUyVq39E" colab_type="code" outputId="b243cb7b-340c-4adc-93a6-fdfe135328b1" colab={"resources": {"http://localhost:8080/nbextensions/google.colab/files.js": {"data": "Ly8gQ29weXJpZ2h0IDIwMTcgR29vZ2xlIExMQwovLwovLyBMaWNlbnNlZCB1bmRlciB0aGUgQXBhY2hlIExpY2Vuc2UsIFZlcnNpb24gMi4wICh0aGUgIkxpY2Vuc2UiKTsKLy8geW91IG1heSBub3QgdXNlIHRoaXMgZmlsZSBleGNlcHQgaW4gY29tcGxpYW5jZSB3aXRoIHRoZSBMaWNlbnNlLgovLyBZb3UgbWF5IG9idGFpbiBhIGNvcHkgb2YgdGhlIExpY2Vuc2UgYXQKLy8KLy8gICAgICBodHRwOi8vd3d3LmFwYWNoZS5vcmcvbGljZW5zZXMvTElDRU5TRS0yLjAKLy8KLy8gVW5sZXNzIHJlcXVpcmVkIGJ5IGFwcGxpY2FibGUgbGF3IG9yIGFncmVlZCB0byBpbiB3cml0aW5nLCBzb2Z0d2FyZQovLyBkaXN0cmlidXRlZCB1bmRlciB0aGUgTGljZW5zZSBpcyBkaXN0cmlidXRlZCBvbiBhbiAiQVMgSVMiIEJBU0lTLAovLyBXSVRIT1VUIFdBUlJBTlRJRVMgT1IgQ09ORElUSU9OUyBPRiBBTlkgS0lORCwgZWl0aGVyIGV4cHJlc3Mgb3IgaW1wbGllZC4KLy8gU2VlIHRoZSBMaWNlbnNlIGZvciB0aGUgc3BlY2lmaWMgbGFuZ3VhZ2UgZ292ZXJuaW5nIHBlcm1pc3Npb25zIGFuZAovLyBsaW1pdGF0aW9ucyB1bmRlciB0aGUgTGljZW5zZS4KCi8qKgogKiBAZmlsZW92ZXJ2aWV3IEhlbHBlcnMgZm9yIGdvb2dsZS5jb2xhYiBQeXRob24gbW9kdWxlLgogKi8KKGZ1bmN0aW9uKHNjb3BlKSB7CmZ1bmN0aW9uIHNwYW4odGV4dCwgc3R5bGVBdHRyaWJ1dGVzID0ge30pIHsKICBjb25zdCBlbGVtZW50ID0gZG9jdW1lbnQuY3JlYXRlRWxlbWVudCgnc3BhbicpOwogIGVsZW1lbnQudGV4dENvbnRlbnQgPSB0ZXh0OwogIGZvciAoY29uc3Qga2V5IG9mIE9iamVjdC5rZXlzKHN0eWxlQXR0cmlidXRlcykpIHsKICAgIGVsZW1lbnQuc3R5bGVba2V5XSA9IHN0eWxlQXR0cmlidXRlc1trZXldOwogIH0KICByZXR1cm4gZWxlbWVudDsKfQoKLy8gTWF4IG51bWJlciBvZiBieXRlcyB3aGljaCB3aWxsIGJlIHVwbG9hZGVkIGF0IGEgdGltZS4KY29uc3QgTUFYX1BBWUxPQURfU0laRSA9IDEwMCAqIDEwMjQ7Ci8vIE1heCBhbW91bnQgb2YgdGltZSB0byBibG9jayB3YWl0aW5nIGZvciB0aGUgdXNlci4KY29uc3QgRklMRV9DSEFOR0VfVElNRU9VVF9NUyA9IDMwICogMTAwMDsKCmZ1bmN0aW9uIF91cGxvYWRGaWxlcyhpbnB1dElkLCBvdXRwdXRJZCkgewogIGNvbnN0IHN0ZXBzID0gdXBsb2FkRmlsZXNTdGVwKGlucHV0SWQsIG91dHB1dElkKTsKICBjb25zdCBvdXRwdXRFbGVtZW50ID0gZG9jdW1lbnQuZ2V0RWxlbWVudEJ5SWQob3V0cHV0SWQpOwogIC8vIENhY2hlIHN0ZXBzIG9uIHRoZSBvdXRwdXRFbGVtZW50IHRvIG1ha2UgaXQgYXZhaWxhYmxlIGZvciB0aGUgbmV4dCBjYWxsCiAgLy8gdG8gdXBsb2FkRmlsZXNDb250aW51ZSBmcm9tIFB5dGhvbi4KICBvdXRwdXRFbGVtZW50LnN0ZXBzID0gc3RlcHM7CgogIHJldHVybiBfdXBsb2FkRmlsZXNDb250aW51ZShvdXRwdXRJZCk7Cn0KCi8vIFRoaXMgaXMgcm91Z2hseSBhbiBhc3luYyBnZW5lcmF0b3IgKG5vdCBzdXBwb3J0ZWQgaW4gdGhlIGJyb3dzZXIgeWV0KSwKLy8gd2hlcmUgdGhlcmUgYXJlIG11bHRpcGxlIGFzeW5jaHJvbm91cyBzdGVwcyBhbmQgdGhlIFB5dGhvbiBzaWRlIGlzIGdvaW5nCi8vIHRvIHBvbGwgZm9yIGNvbXBsZXRpb24gb2YgZWFjaCBzdGVwLgovLyBUaGlzIHVzZXMgYSBQcm9taXNlIHRvIGJsb2NrIHRoZSBweXRob24gc2lkZSBvbiBjb21wbGV0aW9uIG9mIGVhY2ggc3RlcCwKLy8gdGhlbiBwYXNzZXMgdGhlIHJlc3VsdCBvZiB0aGUgcHJldmlvdXMgc3RlcCBhcyB0aGUgaW5wdXQgdG8gdGhlIG5leHQgc3RlcC4KZnVuY3Rpb24gX3VwbG9hZEZpbGVzQ29udGludWUob3V0cHV0SWQpIHsKICBjb25zdCBvdXRwdXRFbGVtZW50ID0gZG9jdW1lbnQuZ2V0RWxlbWVudEJ5SWQob3V0cHV0SWQpOwogIGNvbnN0IHN0ZXBzID0gb3V0cHV0RWxlbWVudC5zdGVwczsKCiAgY29uc3QgbmV4dCA9IHN0ZXBzLm5leHQob3V0cHV0RWxlbWVudC5sYXN0UHJvbWlzZVZhbHVlKTsKICByZXR1cm4gUHJvbWlzZS5yZXNvbHZlKG5leHQudmFsdWUucHJvbWlzZSkudGhlbigodmFsdWUpID0+IHsKICAgIC8vIENhY2hlIHRoZSBsYXN0IHByb21pc2UgdmFsdWUgdG8gbWFrZSBpdCBhdmFpbGFibGUgdG8gdGhlIG5leHQKICAgIC8vIHN0ZXAgb2YgdGhlIGdlbmVyYXRvci4KICAgIG91dHB1dEVsZW1lbnQubGFzdFByb21pc2VWYWx1ZSA9IHZhbHVlOwogICAgcmV0dXJuIG5leHQudmFsdWUucmVzcG9uc2U7CiAgfSk7Cn0KCi8qKgogKiBHZW5lcmF0b3IgZnVuY3Rpb24gd2hpY2ggaXMgY2FsbGVkIGJldHdlZW4gZWFjaCBhc3luYyBzdGVwIG9mIHRoZSB1cGxvYWQKICogcHJvY2Vzcy4KICogQHBhcmFtIHtzdHJpbmd9IGlucHV0SWQgRWxlbWVudCBJRCBvZiB0aGUgaW5wdXQgZmlsZSBwaWNrZXIgZWxlbWVudC4KICogQHBhcmFtIHtzdHJpbmd9IG91dHB1dElkIEVsZW1lbnQgSUQgb2YgdGhlIG91dHB1dCBkaXNwbGF5LgogKiBAcmV0dXJuIHshSXRlcmFibGU8IU9iamVjdD59IEl0ZXJhYmxlIG9mIG5leHQgc3RlcHMuCiAqLwpmdW5jdGlvbiogdXBsb2FkRmlsZXNTdGVwKGlucHV0SWQsIG91dHB1dElkKSB7CiAgY29uc3QgaW5wdXRFbGVtZW50ID0gZG9jdW1lbnQuZ2V0RWxlbWVudEJ5SWQoaW5wdXRJZCk7CiAgaW5wdXRFbGVtZW50LmRpc2FibGVkID0gZmFsc2U7CgogIGNvbnN0IG91dHB1dEVsZW1lbnQgPSBkb2N1bWVudC5nZXRFbGVtZW50QnlJZChvdXRwdXRJZCk7CiAgb3V0cHV0RWxlbWVudC5pbm5lckhUTUwgPSAnJzsKCiAgY29uc3QgcGlja2VkUHJvbWlzZSA9IG5ldyBQcm9taXNlKChyZXNvbHZlKSA9PiB7CiAgICBpbnB1dEVsZW1lbnQuYWRkRXZlbnRMaXN0ZW5lcignY2hhbmdlJywgKGUpID0+IHsKICAgICAgcmVzb2x2ZShlLnRhcmdldC5maWxlcyk7CiAgICB9KTsKICB9KTsKCiAgY29uc3QgY2FuY2VsID0gZG9jdW1lbnQuY3JlYXRlRWxlbWVudCgnYnV0dG9uJyk7CiAgaW5wdXRFbGVtZW50LnBhcmVudEVsZW1lbnQuYXBwZW5kQ2hpbGQoY2FuY2VsKTsKICBjYW5jZWwudGV4dENvbnRlbnQgPSAnQ2FuY2VsIHVwbG9hZCc7CiAgY29uc3QgY2FuY2VsUHJvbWlzZSA9IG5ldyBQcm9taXNlKChyZXNvbHZlKSA9PiB7CiAgICBjYW5jZWwub25jbGljayA9ICgpID0+IHsKICAgICAgcmVzb2x2ZShudWxsKTsKICAgIH07CiAgfSk7CgogIC8vIENhbmNlbCB1cGxvYWQgaWYgdXNlciBoYXNuJ3QgcGlja2VkIGFueXRoaW5nIGluIHRpbWVvdXQuCiAgY29uc3QgdGltZW91dFByb21pc2UgPSBuZXcgUHJvbWlzZSgocmVzb2x2ZSkgPT4gewogICAgc2V0VGltZW91dCgoKSA9PiB7CiAgICAgIHJlc29sdmUobnVsbCk7CiAgICB9LCBGSUxFX0NIQU5HRV9USU1FT1VUX01TKTsKICB9KTsKCiAgLy8gV2FpdCBmb3IgdGhlIHVzZXIgdG8gcGljayB0aGUgZmlsZXMuCiAgY29uc3QgZmlsZXMgPSB5aWVsZCB7CiAgICBwcm9taXNlOiBQcm9taXNlLnJhY2UoW3BpY2tlZFByb21pc2UsIHRpbWVvdXRQcm9taXNlLCBjYW5jZWxQcm9taXNlXSksCiAgICByZXNwb25zZTogewogICAgICBhY3Rpb246ICdzdGFydGluZycsCiAgICB9CiAgfTsKCiAgaWYgKCFmaWxlcykgewogICAgcmV0dXJuIHsKICAgICAgcmVzcG9uc2U6IHsKICAgICAgICBhY3Rpb246ICdjb21wbGV0ZScsCiAgICAgIH0KICAgIH07CiAgfQoKICBjYW5jZWwucmVtb3ZlKCk7CgogIC8vIERpc2FibGUgdGhlIGlucHV0IGVsZW1lbnQgc2luY2UgZnVydGhlciBwaWNrcyBhcmUgbm90IGFsbG93ZWQuCiAgaW5wdXRFbGVtZW50LmRpc2FibGVkID0gdHJ1ZTsKCiAgZm9yIChjb25zdCBmaWxlIG9mIGZpbGVzKSB7CiAgICBjb25zdCBsaSA9IGRvY3VtZW50LmNyZWF0ZUVsZW1lbnQoJ2xpJyk7CiAgICBsaS5hcHBlbmQoc3BhbihmaWxlLm5hbWUsIHtmb250V2VpZ2h0OiAnYm9sZCd9KSk7CiAgICBsaS5hcHBlbmQoc3BhbigKICAgICAgICBgKCR7ZmlsZS50eXBlIHx8ICduL2EnfSkgLSAke2ZpbGUuc2l6ZX0gYnl0ZXMsIGAgKwogICAgICAgIGBsYXN0IG1vZGlmaWVkOiAkewogICAgICAgICAgICBmaWxlLmxhc3RNb2RpZmllZERhdGUgPyBmaWxlLmxhc3RNb2RpZmllZERhdGUudG9Mb2NhbGVEYXRlU3RyaW5nKCkgOgogICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAgICAnbi9hJ30gLSBgKSk7CiAgICBjb25zdCBwZXJjZW50ID0gc3BhbignMCUgZG9uZScpOwogICAgbGkuYXBwZW5kQ2hpbGQocGVyY2VudCk7CgogICAgb3V0cHV0RWxlbWVudC5hcHBlbmRDaGlsZChsaSk7CgogICAgY29uc3QgZmlsZURhdGFQcm9taXNlID0gbmV3IFByb21pc2UoKHJlc29sdmUpID0+IHsKICAgICAgY29uc3QgcmVhZGVyID0gbmV3IEZpbGVSZWFkZXIoKTsKICAgICAgcmVhZGVyLm9ubG9hZCA9IChlKSA9PiB7CiAgICAgICAgcmVzb2x2ZShlLnRhcmdldC5yZXN1bHQpOwogICAgICB9OwogICAgICByZWFkZXIucmVhZEFzQXJyYXlCdWZmZXIoZmlsZSk7CiAgICB9KTsKICAgIC8vIFdhaXQgZm9yIHRoZSBkYXRhIHRvIGJlIHJlYWR5LgogICAgbGV0IGZpbGVEYXRhID0geWllbGQgewogICAgICBwcm9taXNlOiBmaWxlRGF0YVByb21pc2UsCiAgICAgIHJlc3BvbnNlOiB7CiAgICAgICAgYWN0aW9uOiAnY29udGludWUnLAogICAgICB9CiAgICB9OwoKICAgIC8vIFVzZSBhIGNodW5rZWQgc2VuZGluZyB0byBhdm9pZCBtZXNzYWdlIHNpemUgbGltaXRzLiBTZWUgYi82MjExNTY2MC4KICAgIGxldCBwb3NpdGlvbiA9IDA7CiAgICB3aGlsZSAocG9zaXRpb24gPCBmaWxlRGF0YS5ieXRlTGVuZ3RoKSB7CiAgICAgIGNvbnN0IGxlbmd0aCA9IE1hdGgubWluKGZpbGVEYXRhLmJ5dGVMZW5ndGggLSBwb3NpdGlvbiwgTUFYX1BBWUxPQURfU0laRSk7CiAgICAgIGNvbnN0IGNodW5rID0gbmV3IFVpbnQ4QXJyYXkoZmlsZURhdGEsIHBvc2l0aW9uLCBsZW5ndGgpOwogICAgICBwb3NpdGlvbiArPSBsZW5ndGg7CgogICAgICBjb25zdCBiYXNlNjQgPSBidG9hKFN0cmluZy5mcm9tQ2hhckNvZGUuYXBwbHkobnVsbCwgY2h1bmspKTsKICAgICAgeWllbGQgewogICAgICAgIHJlc3BvbnNlOiB7CiAgICAgICAgICBhY3Rpb246ICdhcHBlbmQnLAogICAgICAgICAgZmlsZTogZmlsZS5uYW1lLAogICAgICAgICAgZGF0YTogYmFzZTY0LAogICAgICAgIH0sCiAgICAgIH07CiAgICAgIHBlcmNlbnQudGV4dENvbnRlbnQgPQogICAgICAgICAgYCR7TWF0aC5yb3VuZCgocG9zaXRpb24gLyBmaWxlRGF0YS5ieXRlTGVuZ3RoKSAqIDEwMCl9JSBkb25lYDsKICAgIH0KICB9CgogIC8vIEFsbCBkb25lLgogIHlpZWxkIHsKICAgIHJlc3BvbnNlOiB7CiAgICAgIGFjdGlvbjogJ2NvbXBsZXRlJywKICAgIH0KICB9Owp9CgpzY29wZS5nb29nbGUgPSBzY29wZS5nb29nbGUgfHwge307CnNjb3BlLmdvb2dsZS5jb2xhYiA9IHNjb3BlLmdvb2dsZS5jb2xhYiB8fCB7fTsKc2NvcGUuZ29vZ2xlLmNvbGFiLl9maWxlcyA9IHsKICBfdXBsb2FkRmlsZXMsCiAgX3VwbG9hZEZpbGVzQ29udGludWUsCn07Cn0pKHNlbGYpOwo=", "ok": true, "headers": [["content-type", "application/javascript"]], "status": 200, "status_text": ""}}, "base_uri": "https://localhost:8080/", "height": 112}
from google.colab import files
files.upload()
# + id="UZ7Na-htp1Me" colab_type="code" outputId="73ea6d2b-de6b-4e00-ec6b-030964a4edf4" colab={"base_uri": "https://localhost:8080/", "height": 212}
# first-method
# import zipfile
# with zipfile.ZipFile('English_dataset.zip', 'r') as zipObj:
# zipObj.extractall('home/')
#second-method
# !unzip English_dataset.zip
# + [markdown] id="3RPh9lYloktc" colab_type="text"
# #Import multiple csv files into pandas and concatenate into one DataFrame
# `I would like to read several csv files from a directory into pandas and concatenate them into one big DataFrame. `
# + id="eDkUIkXprsCc" colab_type="code" outputId="a9592c36-c79c-4b34-d4c7-9194cda33efe" colab={"base_uri": "https://localhost:8080/", "height": 141}
import pandas as pd
import glob
path = r'English_dataset' # use your path
all_files = glob.glob(path + "/final*.csv")
all_files
# + id="xvOVmrJHWXeK" colab_type="code" colab={}
li = []
for filename in all_files:
df = pd.read_csv(filename, index_col=None, header=0)
li.append(df)
final = pd.concat(li, axis=0, ignore_index=True)
final.to_csv('final.csv' , index= False)
# + id="j6C2cmvXfBAE" colab_type="code" outputId="d5397fd3-dc1a-40ec-cc5a-27464201dd22" colab={"base_uri": "https://localhost:8080/", "height": 34}
from ast import literal_eval
k = final.document
word = []
for i in range(len(k)):
word.append(literal_eval(k[i]))
len(word)
# + id="b_Kjf0QwuHp0" colab_type="code" outputId="7939e704-8616-4e1e-db68-dbd6fd335e9f" colab={"base_uri": "https://localhost:8080/", "height": 52}
import nltk
from nltk.tokenize import word_tokenize
nltk.download('punkt')
documents = pd.DataFrame(columns=['doc_no','word_token'] )
for i in range(len(word)):
liss = []
for j in range(len(word[i])):
liss += word_tokenize(word[i][j])
words = []
words.append(liss)
documents.loc[i] = ['document_' + str(i+1)] + words
documents.to_csv('word_token.csv' , index= False)
# + [markdown] id="BAxjFXnGBZih" colab_type="text"
# #Noise removing step
# + [markdown] id="wKvYgNgREMQo" colab_type="text"
# https://polyglot.readthedocs.io/en/latest/Download.html
#
# 'Dutch' 'English' 'French' 'German' 'Italian' 'Japanese' 'Polish' 'Russian' 'Spanish'
# + id="yAbZjxthfA9C" colab_type="code" outputId="4ef898e5-8266-46a8-86b0-582bac296ec9" colab={"base_uri": "https://localhost:8080/", "height": 107}
nltk.download('stopwords')
from nltk.corpus import stopwords
print(len(stopwords.fileids()))
print(stopwords.fileids())
# + id="AnGKXN4wfA6A" colab_type="code" outputId="385ae6d3-4361-4976-9a0a-371626ec5a42" colab={"base_uri": "https://localhost:8080/", "height": 34}
stops= []
li = ['dutch', 'english', 'french', 'german' ,'italian' ,'russian', 'spanish']
stops +=stopwords.words(li)
len(stops)
# + id="NWG6XIGOxVVr" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 107} outputId="3fe6ed41-f551-4220-cefc-3fd1105e9ce5"
c = ['abuabdo', 'aleisa', 'alessandra', 'alesya', 'amazfit', 'ameet', 'andrea', 'anja', 'annya', 'arti', 'assistent', 'avonturia', 'azzurro', 'bajram', 'bene',
'berto', 'bichon', 'bijenkorf', 'bugfixes', 'caed', 'capelleveen', 'cara', 'carmen', 'carola', 'carra', 'carrefour', 'catalogue', 'caterina', 'cenk', 'chantal',
'christl', 'ciao', 'clarisse', 'claude', 'claudio', 'clubcard', 'cod', 'continente', 'cristina', 'cumprimentos', 'daniele', 'daria', 'dario', 'delhaize', 'denis',
'dini', 'dirk', 'dobrota', 'elena', 'eleonora', 'ellen', 'elly', 'emanuela', 'emanuele', 'emmanuelle', 'ernestina', 'fabrizio', 'farmaciile', 'fausto', 'federico',
'finder', 'finta', 'fotis', 'francesca', 'francesco', 'gabriele', 'gaelah', 'geessien', 'geiken', 'gerhard', 'giancarlo', 'gianni', 'giovanni', 'giuseppe', 'globus',
'gmbh', 'godfried', 'graziano', 'greta', 'guido', 'hafenstraße', 'hanneke', 'hans', 'harma', 'haver', 'heijn', 'heinz', 'henri', 'hollenstein', 'hooi', 'iclou',
'ikram', 'ilyas', 'indonesia', 'instal', 'intermarché', 'irina', 'isabelle', 'ismael', 'italiano', 'jacinthe', 'jam', 'jannie', 'jean', 'joachim', 'jola', 'josée',
'karen', 'karin', 'kees', 'kirssia', 'klantenpas', 'lacour', 'leclerc', 'lewy', 'liliana', 'lina', 'lisa', 'luciana', 'luigi', 'mannheim', 'manuela', 'marco',
'marg', 'margreet', 'maria', 'marius', 'marleen', 'marouf', 'martine', 'massimo', 'matej', 'matteo', 'maurizio', 'mecherouh', 'mehran', 'meißner', 'michel', 'mieke',
'mirella', 'mirko', 'modifier', 'mohammed', 'morgane', 'myriam', 'míra', 'nahid', 'natalia', 'obligate', 'opitz', 'ordert', 'ornella', 'parlo', 'paulina', 'pedro',
'peppe', 'pesce', 'petitti', 'philippe', 'pierre', 'piet', 'pietro', 'pilar', 'plonie', 'plus', 'purtroppo', 'ralf', 'reint', 'renate', 'ricardo', 'riccardo',
'roberto', 'rodrigo', 'rolf', 'romania', 'ronak', 'roswitha', 'ryon', 'sabina', 'saludo', 'saluti', 'sanne', 'schäfer', 'sergio', 'simone', 'situate', 'smartwatch',
'solveig', 'tanti', 'tizen', 'tiziana', 'tomas', 'torben', 'trenitalia', 'unite', 'vincenza', 'virginie', 'vitaminclub', 'vodafone', 'wacker', 'wezyk', 'wojtek',
'yanusya', 'ying', 'yves', 'łukasz']
k = ['abudhabi', 'aleza', 'alessandra', 'alesya', 'amazfit', 'ameet', 'andrea', 'anja', 'annya', 'arti', 'assistent', 'adventuria', 'light blue', 'Eid', 'well',
'berto', 'bichon', 'beehive', 'bugfixes', 'was found', 'capelleveen', 'way', 'carmen', 'carola', 'carra', 'carrefour', 'catalogue', 'caterina', 'cenk', 'chantal',
'christl', 'ciao', 'clarisse', 'claude', 'claudio', 'clubcard', 'cod', 'continent', 'cristina', 'best regards', 'daniele', 'daria', 'dario', 'delhaize', 'denis',
'dini', 'dirk', 'kindness', 'elena', 'eleonora', 'ellen', 'elly', 'emanuela', 'emanuele', 'emmanuelle', 'ernestina', 'fabrizio', 'pharmacies', 'auspicious', 'federico',
'finder', 'fake', 'fotis', 'francesca', 'francesco', 'gabriele', 'bro', 'see', 'geiken', 'gerhard', 'giancarlo', 'gianni', 'giovanni', 'giuseppe', 'globus',
'gmbh', 'godfried', 'graziano', 'greta', 'guido', 'hafenstrasse', 'hanneke', 'hans', 'mourn', 'to have', 'hey', 'heinz', 'henri', 'hollenstein', 'hay', 'icloud',
'ikram', 'ilyas', 'indonesia', 'instal', 'intermarket', 'irina', 'isabelle', 'ismael', 'italiano', 'jacinthe', 'jam', 'jannie', 'jean', 'joachim', 'jola', 'joseph',
'karen', 'karin', 'kees', 'cherry', 'klantenpas', 'lacour', 'leclerc', 'lewy', 'liliana', 'lina', 'lisa', 'luciana', 'luigi', 'mannheim', 'manuela', 'marco', 'marg',
'margreet', 'maria', 'marius', 'marleen', 'marouf', 'martine', 'massimo', 'matej', 'matteo', 'maurizio', 'mecherouh', 'mehran', 'meissner', 'michel', 'mieke', 'mirella',
'mirko', 'modifier', 'mohammed', 'morgane', 'myriam', 'míra', 'nahid', 'natalia', 'obligate', 'opitz', 'ordert', 'ornella', 'parlo', 'paulina', 'pedro', 'peppe',
'fish', 'little ones', 'philippe', 'pierre', 'piet', 'pietro', 'pilar', 'yield', 'plus', 'Unfortunately', 'ralf', 'restaraunts', 'renate', 'ricardo', 'riccardo',
'roberto', 'rodrigo', 'rolf', 'romania', 'ronak', 'roswitha', 'ryon', 'sabina', 'greeting', 'regards', 'sanne', 'shepherd', 'sergio', 'simone', 'situate',
'smartwatch', 'solveig', 'many', 'tizen', 'tiziana', 'tomas', 'torben', 'trenitalia', 'unite', 'vincenza', 'virginie', 'vitaminclub', 'vodafone', 'wacker',
'wavy line', 'wojtek', 'I am', 'should', 'yves', 'luke']
print(len(set(c) - set(k)))
l1 = [i for i in c + k if i not in c or i not in k][:40]
l2 = [i for i in c + k if i not in c or i not in k][40:]
print(l1)
print(l2)
dic = dict()
for i in range(len(l1)):
dic[l1[i]] = l2[i]
print(dic)
# + id="O1qPcR1pk5Ko" colab_type="code" outputId="233995d4-6d5a-4d92-86d2-a6da8ad761e7" colab={"base_uri": "https://localhost:8080/", "height": 70}
nltk.download('stopwords')
from nltk.corpus import stopwords
import re
from ast import literal_eval
k = final.document
datasets=[]
for j in range(len(k)):
dataset = literal_eval(k[j])
for i in range(len(dataset)):
dataset[i] = dataset[i].lower()
dataset[i] = re.sub(r'[a-zA-Z0-9-_.]+@[a-zA-Z0-9-_.]+','email',dataset[i]) #will remove email address
dataset[i] = re.sub(r'https?://(?:[-\w.]|(?:%[\da-fA-F]{2}))+','http',dataset[i]) #will remove Http address
dataset[i] = re.sub(r'\W',' ',dataset[i])# will remove non-word charecters like #,*,%,. etc
dataset[i] = re.sub(r'\d',' ',dataset[i])#will remove digits
dataset[i] = re.sub(r'\s+',' ',dataset[i])#will remove extra spaces
words = nltk.word_tokenize(dataset[i])
#print(words)
new = []
for word in words:
for key,value in dic.items():
if(word==key):
word = value
if(len(word)>3):
if word not in stops:
new.append(word)
dataset[i] = ' '.join(new)
#print(len(dataset) , dataset)
datasets.append(dataset)
len(datasets)
# + [markdown] id="6bzfaTESI8v_" colab_type="text"
# #Text Normalization:
# + id="vmtEuLiWH60Z" colab_type="code" outputId="3248ba2d-5046-40bb-b800-c9e1031a17bd" colab={"base_uri": "https://localhost:8080/", "height": 70}
nltk.download('wordnet')
from nltk.stem.wordnet import WordNetLemmatizer
lem = WordNetLemmatizer() #lemma object
stem_data = []
for j in range(len(datasets)):
dataset = datasets[j]
for i in range(len(dataset)):
words = nltk.word_tokenize(dataset[i])
words = [lem.lemmatize(word,pos='v') for word in words]
dataset[i] = ' '.join(words)
#print(len(dataset) , dataset)
stem_data.append(dataset)
len(stem_data)
# + id="EqlzEBBJLtw4" colab_type="code" colab={}
all_clean = pd.DataFrame(columns=['doc_no','documents'] )
for p in range(len(stem_data)):
data =stem_data[p]
c =[]
for i in range(len(data)):
if(len(data[i])>0):
c.append(data[i])
clean_data =[]
clean_data.append(c)
all_clean.loc[p] = ['document_' + str(p+1)] + clean_data
all_clean.to_csv('clean_text.csv' , index= False)
# + [markdown] id="p01HzK-cSYp4" colab_type="text"
# #Feature Engineering
# + id="zNa4yx_6P61c" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="92d0c16d-3ea0-46e4-ceb9-8da5710749c0"
df = pd.read_csv('clean_text.csv')
from ast import literal_eval
k =df.documents
all_token=[]
for j in range(len(k)):
dataset = literal_eval(k[j])
#print(dataset)
token =[]
for i in range(len(dataset)):
token+=nltk.word_tokenize(dataset[i])
all_token.append(token)
len(all_token)
# + id="8kfPII-OR2Sl" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 54} outputId="a71a760e-ac2f-4527-e1d2-e9ca39633672"
print(all_token[:1])
# + id="krbnGG-DXO5p" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="3724f4d8-2f7c-4b04-aa91-f87b1acefb23"
clean_data =[]
for p in range(len(stem_data)):
data =stem_data[p]
c =[]
for i in range(len(data)):
if(len(data[i])>0):
c +=data[i].split()
clean_data.append(' '.join(c))
len(clean_data)
# + id="HVh8k07cXauy" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 125} outputId="21eb1267-ba73-4a58-a294-b21249625755"
clean_data[0:5]
# + [markdown] id="Xg8s8vGCBbqI" colab_type="text"
# # Perform Topic modeling Using machine learning library
# + [markdown] id="4MyWHUi58BUJ" colab_type="text"
# ###1. Bag of words model:
# + id="wjRADnBdFqqZ" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 178} outputId="b5d8f2ba-4373-48c2-9867-08c528bff29e"
from sklearn.feature_extraction.text import CountVectorizer
count_vect = CountVectorizer(ngram_range=(0,1)) #in scikit-learn
final_counts = count_vect.fit_transform(clean_data)
print(len(count_vect.get_feature_names()) , count_vect.get_feature_names())
print(final_counts.toarray())
# + [markdown] id="KMa5Dcfe8JvK" colab_type="text"
# ###2.TF-IDF Vectorizer
# + id="MNSpZKVrRDZk" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 178} outputId="4fc6b173-9e3b-48a9-a137-464698b8754f"
from sklearn.feature_extraction.text import TfidfVectorizer
vector = TfidfVectorizer()
tfidf = vector.fit_transform(clean_data)
tfidf_feature_names = vector.get_feature_names()
print(len(tfidf_feature_names) , tfidf_feature_names )
print(tfidf.toarray())
# + id="bRwln7KSw40a" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 178} outputId="42a3a1bb-68d6-41a5-8442-a590e75c14c6"
# LDA can only use raw term counts for LDA because it is a probabilistic graphical model
from sklearn.feature_extraction.text import CountVectorizer
tf_vectorizer = CountVectorizer()
tf = tf_vectorizer.fit_transform(clean_data)
tf_feature_names = tf_vectorizer.get_feature_names()
print(len(tf_feature_names) , tf_feature_names)
print(tf.toarray())
# + id="nSEIKr4JFqeK" colab_type="code" colab={}
from sklearn.decomposition import NMF, LatentDirichletAllocation
no_topics = 20
# Run NMF
nmf = NMF(n_components=no_topics, random_state=1, alpha=.1, l1_ratio=.5, init='nndsvd').fit(tfidf)
# Run LDA
lda = LatentDirichletAllocation(n_components=no_topics, max_iter=5, learning_method='online', learning_offset=50.,random_state=0).fit(tf)
# + id="Rw2YXN-kiljY" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 70} outputId="859baae8-6da8-4ac0-c4ef-fc0c7fd41ab3"
nmf
# + id="GKWv0vrrilgN" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 141} outputId="7b3122f8-ae67-4b31-f30e-6e76eb7bdb90"
lda
# + id="EPgmgG_Ueg4V" colab_type="code" colab={}
def display_topics(model, feature_names, no_top_words):
for topic_idx, topic in enumerate(model.components_):
print("Topic %d:" % (topic_idx))
print(" ".join([feature_names[i] for i in topic.argsort()[:-no_top_words - 1:-1]]))
no_top_words = 10
# + id="1ss1u_s8eg19" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 727} outputId="f591b608-672b-428a-ac55-b540e30ce53d"
#Using NMF
display_topics(nmf, tfidf_feature_names, no_top_words)
# + id="m_rQdmOzegzq" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 727} outputId="ea7172c4-e1e1-4d91-cd2f-e0eb87b35c78"
#Using LDA
display_topics(lda, tf_feature_names, no_top_words)
# + [markdown] id="p9r0E4nxBzGM" colab_type="text"
# # Perform Topic modeling Using Gensim library
# + id="-xqspx6tZlr3" colab_type="code" colab={}
# !pip install pyLDAvis
# + id="SI2n0NwregvH" colab_type="code" colab={}
# Gensim
import gensim
import gensim.corpora as corpora
from gensim.utils import simple_preprocess
from gensim.models import CoherenceModel
from gensim import corpora, models
# spacy for lemmatization
import spacy
# Plotting tools
import pyLDAvis
pyLDAvis.enable_notebook()
import pyLDAvis.gensim # don't skip this
import matplotlib.pyplot as plt
# %matplotlib inline
from nltk.tokenize import RegexpTokenizer
from nltk.stem.porter import PorterStemmer
# + id="Qxr3JaShPiG5" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 54} outputId="19f6034d-4399-4522-cd61-d913c52ce2c4"
# Create Dictionary
data_lemmatized = all_token
id2word = corpora.Dictionary(data_lemmatized)
# Create Corpus
texts = data_lemmatized
# Term Document Frequency
corpus = [id2word.doc2bow(text) for text in texts]
# View
print(corpus[:1])
# + id="U0k11DdSSR0S" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 70} outputId="19a08a42-e3bf-410d-9fad-6e01e922bcd3"
print(type(id2word))
print(len(id2word))
id2word[0]
# + id="aC3PoFq4SrmI" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1438} outputId="5c550333-2890-40d3-a3e8-b1523e943d32"
# Human readable format of corpus (term-frequency)
[[(id2word[id], freq) for id, freq in cp] for cp in corpus[:1]]
# + id="40ybD-GBSrjU" colab_type="code" colab={}
# Build LDA model
lda_model = gensim.models.ldamodel.LdaModel(corpus=corpus,
id2word=id2word,
num_topics=20,
random_state=100,
update_every=1,
chunksize=100,
passes=10,
alpha='auto',
per_word_topics=True)
# + id="q6_6CE-PSrfR" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 1438} outputId="e5e70ef5-58d1-4162-dc16-ccf30de24f1e"
# Print the Keyword in the 10 topics
pprint(lda_model.print_topics())
doc_lda = lda_model[corpus]
# + id="VyZvddcrSrcj" colab_type="code" colab={}
# Compute Perplexity
print('\nPerplexity: ', lda_model.log_perplexity(corpus)) # a measure of how good the model is. lower the better.
# Compute Coherence Score
coherence_model_lda = CoherenceModel(model=lda_model, texts=data_lemmatized, dictionary=id2word, coherence='c_v')
coherence_lda = coherence_model_lda.get_coherence()
print('\nCoherence Score: ', coherence_lda)
# + id="gcH9UmFNSrZ1" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 861} outputId="9c0cd543-821d-483e-fe97-7f738b191ddd"
# Visualize the topics
pyLDAvis.enable_notebook()
vis = pyLDAvis.gensim.prepare(lda_model, corpus, id2word)
vis
# + id="QQ9rcDqsSq80" colab_type="code" colab={}
# + id="f1TK4fHHSq54" colab_type="code" colab={}
# + id="IXN4yaZcSq3X" colab_type="code" colab={}
|
jatana_assignment.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] graffitiCellId="id_6z92v7o"
# # Implement a stack using an array
#
# In this notebook, we'll look at one way to implement a stack. First, check out the walkthrough for an overview, and then you'll get some practice implementing it for yourself.
# + [markdown] graffitiCellId="id_dr7cfv3"
# 
# + [markdown] graffitiCellId="id_yyfq2a2"
# Below we'll go through the implementation step by step. Each step has a walkthrough and also a solution. We recommend that you first watch the walkthrough, and then try to write the code on your own.
#
# When you first try to remember and write out the code for yourself, this effort helps you understand and remember the ideas better. At the same time, it's normal to get stuck and need a refresher—so don't hesitate to use the *Show Solution* buttons when you need them.
# + [markdown] graffitiCellId="id_cz4u9pc"
# ## Functionality
# Our goal will be to implement a `Stack` class that has the following behaviors:
#
# 1. `push` - adds an item to the top of the stack
# 2. `pop` - removes an item from the top of the stack (and returns the value of that item)
# 3. `size` - returns the size of the stack
# 4. `top` - returns the value of the item at the top of stack (without removing that item)
# 5. `is_empty` - returns `True` if the stack is empty and `False` otherwise
# + [markdown] graffitiCellId="id_n4ehw9p"
# ## 1. Create and initialize the `Stack` class
# First, have a look at the walkthrough:
# + [markdown] graffitiCellId="id_n4ehw9p"
# In the cell below:
# * Define a class named `Stack` and add the `__init__` method
# * Initialize the `arr` attribute with an array containing 10 elements, like this: `[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]`
# * Initialize the `next_index` attribute
# * Initialize the `num_elements` attribute
# + graffitiCellId="id_gi9yplo"
class Stack:
def __init__(self, initial_size = 10):
self.arr = [0 for _ in range(initial_size)]
self.next_index = 0
self.num_elements = 0
# + [markdown] graffitiCellId="id_gb12el5"
# Let's check that the array is being initialized correctly. We can create a `Stack` object and access the `arr` attribute, and we should see our ten-element array:
# + graffitiCellId="id_od8d7ju"
foo = Stack()
print(foo.arr)
print("Pass" if foo.arr == [0, 0, 0, 0, 0, 0, 0, 0, 0, 0] else "Fail")
# + [markdown] graffitiCellId="id_uddtqok"
# ## 2. Add the `push` method
# Next, we need to define our `push` method, so that we have a way of adding elements to the top of the stack.
# + [markdown] graffitiCellId="id_uddtqok"
# Now give it a try for yourself. Here's are the key things to include:
# * The method will need to have a parameter for the value that you want to push
# * Remember that `next_index` will have the index for where the value should be added
# * Once you've added the value, you'll want to increment both `next_index` and `num_elements`
# + graffitiCellId="id_qx4z3nq"
class Stack:
def __init__(self, initial_size = 10):
self.arr = [0 for _ in range(initial_size)]
self.next_index = 0
self.num_elements = 0
def push(self, data):
self.arr[self.next_index] = data
self.next_index += 1
self.num_elements += 1
# + [markdown] graffitiCellId="id_alax6u5"
# Let's test it by creating a stack object and pushing an item onto the stack:
# + graffitiCellId="id_qmycsda"
foo = Stack()
foo.push("Test!")
print(foo.arr)
print("Pass" if foo.arr[0] == "Test!" else "Fail")
# + [markdown] graffitiCellId="id_kyztmk5"
# ## 3. Handle full capacity
#
# Great, the `push` method seems to be working fine! But we know that it's not done yet. If we keep pushing items onto the stack, eventually we will run out of room in the array. Currently, that will cause an `Index out of range` error. In order to avoid a stack overflow, we need to check the capacity of the array before pushing an item to the stack. And if the array is full, we need to increase the array size before pushing the new element.
# + [markdown] graffitiCellId="id_kyztmk5"
# First, define the `_handle_stack_capacity_full` method:
# * Define an `old_arr` variable and assign it the current (full) array
# * Create a new (larger) array and assign it to `arr`.
# * Iterate over the values in the old array and copy them to the new array.
#
# Then, in the `push` method:
# * Add a conditional to check if the array is full; if it is, call the `_handle_stack_capacity_full`
# + graffitiCellId="id_m0mkufb"
class Stack:
def __init__(self, initial_size = 10):
self.arr = [0 for _ in range(initial_size)]
self.next_index = 0
self.num_elements = 0
def push(self, data):
if self.next_index == len(self.arr):
print("Out of space! Increasing array capacity ...")
self._handle_stack_capacity_full()
self.arr[self.next_index] = data
self.next_index += 1
self.num_elements += 1
def _handle_stack_capacity_full(self):
old_arr = self.arr
self.arr = [0 for _ in range( 2* len(old_arr))]
for index, element in enumerate(old_arr):
self.arr[index] = element
# + [markdown] graffitiCellId="id_wo3cqbd"
# We can test this by pushing items onto the stack until we exceed the original capacity. Let's try it and see if we get an error, or if the array size gets increased like we want it to.
# + graffitiCellId="id_6ineceb"
foo = Stack()
foo.push(1)
foo.push(2)
foo.push(3)
foo.push(4)
foo.push(5)
foo.push(6)
foo.push(7)
foo.push(8)
foo.push(9)
foo.push(10) # The array is now at capacity!
foo.push(11) # This one should cause the array to increase in size
print(foo.arr) # Let's see what the array looks like now!
print("Pass" if len(foo.arr) == 20 else "Fail") # If we successfully doubled the array size, it should now be 20.
# + [markdown] graffitiCellId="id_wyoe5v1"
# ## 4. Add the `size` and `is_empty` methods
#
# Next, we need to add a couple of simple methods:
# * Add a `size` method that returns the current size of the stack
# * Add an `is_empty` method that returns `True` if the stack is empty and `False` otherwise
#
# (This one is pretty straightforward, so there's no walkthrough—but there's still solution code below if you should need it.)
# + graffitiCellId="id_w5j566l"
class Stack:
def __init__(self, initial_size = 10):
self.arr = [0 for _ in range(initial_size)]
self.next_index = 0
self.num_elements = 0
def push(self, data):
if self.next_index == len(self.arr):
print("Out of space! Increasing array capacity ...")
self._handle_stack_capacity_full()
self.arr[self.next_index] = data
self.next_index += 1
self.num_elements += 1
def size(self):
return self.num_elements
def is_empty(self):
return self.num_elements == 0
def _handle_stack_capacity_full(self):
old_arr = self.arr
self.arr = [0 for _ in range( 2* len(old_arr))]
for index, value in enumerate(old_arr):
self.arr[index] = value
# + [markdown] graffitiCellId="id_tz4nkd7"
# Let's test the new methods:
# + graffitiCellId="id_ciu8abs"
foo = Stack()
print(foo.size()) # Should return 0
print(foo.is_empty()) # Should return True
foo.push("Test") # Let's push an item onto the stack and check again
print(foo.size()) # Should return 1
print(foo.is_empty()) # Should return False
# + [markdown] graffitiCellId="id_00pq8v4"
# ## 5. Add the `pop` method
#
# The last thing we need to do is add the `pop` method.
# + [markdown] graffitiCellId="id_00pq8v4"
# The method needs to:
# * Check if the stack is empty and, if it is, return `None`
# * Decrement `next_index` and `num_elements`
# * Return the item that is being "popped"
# + graffitiCellId="id_5iq1jsn"
class Stack:
def __init__(self, initial_size = 10):
self.arr = [0 for _ in range(initial_size)]
self.next_index = 0
self.num_elements = 0
def push(self, data):
if self.next_index == len(self.arr):
print("Out of space! Increasing array capacity ...")
self._handle_stack_capacity_full()
self.arr[self.next_index] = data
self.next_index += 1
self.num_elements += 1
def pop(self):
if self.is_empty():
self.next_index = 0
return None
self.next_index -= 1
self.num_elements -= 1
return self.arr[self.next_index]
def size(self):
return self.num_elements
def is_empty(self):
return self.num_elements == 0
def _handle_stack_capacity_full(self):
old_arr = self.arr
self.arr = [0 for _ in range( 2* len(old_arr))]
for index, element in enumerate(old_arr):
self.arr[index] = element
# + [markdown] graffitiCellId="id_v2r2b2x"
# Let's test the `pop` method:
# + graffitiCellId="id_0ktsfz8"
foo = Stack()
foo.push("Test") # We first have to push an item so that we'll have something to pop
print(foo.pop()) # Should return the popped item, which is "Test"
print(foo.pop()) # Should return None, since there's nothing left in the stack
# + [markdown] graffitiCellId="id_8do0njo"
# Done!
|
array_strings/ipynb/array_stack.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Data analysis
# ### Importing needed libraries
# !pip install descartes
# !pip install geopandas
# !pip install country_converter
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
import pandas
import geopandas as gpd
from matplotlib import style
import country_converter as coco
# ### Loading a CSV file in Python
data = pandas.read_csv('data.csv', sep=',')
data
#check the column names
data.columns.values
#get the number of collected domains
index = data.index
total = len(index)
print(total)
# ### Distribution of domain names across name servers
#settings of plotting
plt.style.use('seaborn-deep')
plt.rcParams.update({'font.size': 10})
#grouping the data by NS IPs
ns = data.groupby('IP of NS')
ns_counter = ns.size().to_frame('size')
ns_counter
#sorting the results and counting the percentage distribution
result_ns = ns_counter.sort_values('size', ascending=False)
result_ns['percent'] = result_ns['size']/total*100
result_ns=result_ns.reset_index()
result_ns.index += 1
result_ns_table = result_ns.head(25)
print(result_ns_table)
result_ns_table['percent'].sum()
# +
#printing the graph
ns_pl = result_ns.cumsum()
ns_pl.plot(y='percent', kind='line',
figsize=(5, 4), legend=False, style='b.-')
#plt.title(" Distribution of domains across name servers", y=1.01, fontsize=15)
plt.ylabel("Percentage of vulnerable domains", labelpad=15, fontsize=12)
plt.xlabel("Number of authoritative name servers", labelpad=15, fontsize=12);
# -
# ### Distribution of domain names across autonomous systems
#grouping the data by NS ASNs
ASN_dns = data.groupby('ASN(NS)')
ASN_dns_counter = ASN_dns.size().to_frame('size')
ASN_dns_counter
#sorting the results and counting the percentage distribution
result_asn = ASN_dns_counter.sort_values('size', ascending=False)
result_asn['percent'] = result_asn['size']/total*100
result_asn = result_asn.reset_index()
result_asn.index += 1
print(result_asn)
result_table = result_asn.head()
result_table['percent'].sum()
asn_pl = result_asn.cumsum().head(94)
print(asn_pl)
#printing the graph
asn_pl = result_asn.cumsum()
asn_pl.plot(y='percent', use_index=True, kind='line',
figsize=(5, 4), legend=False, style='b.-')
#plt.title("Distribution of domains across autonomous systems", y=1.01, fontsize=15)
plt.ylabel("Percentage of vulnerable domains", labelpad=15, fontsize=12)
plt.xlabel("Number of autonomous systems", labelpad=15, fontsize=12);
# ### Distribution of domains across autonomous systems based on web servers
data['ASN_web'] = data['ASN(WEB)'].str.split(':').str[1]
ASN_web = data.groupby('ASN_web')
ASN_web_counter = ASN_web.size().to_frame('size')
result_web = ASN_web_counter.sort_values('size', ascending=False)
result_web = result_web.tail(1016)
result_web['percent'] = result_web['size']/total*100
result_table_web = result_web.head(46)
result_web = result_web.reset_index()
print(result_table_web)
result_table_web['size'].sum()
asn_pl = result_web.cumsum()
asn_pl.plot(y='percent', use_index=True, kind='line',
figsize=(10, 8), legend=False, style='b.-')
plt.title("Distribution of domains across autonomous systems based on web servers", y=1.01, fontsize=12)
plt.ylabel("Percentage of vulnerable domains", labelpad=15, fontsize=10)
plt.xlabel("Number of autonomous systems", labelpad=15, fontsize=10);
# ### Distribution of domains across autonomous systems based on WHOIS records
whois_org = data.groupby('Whois org')
whois_org_counter = whois_org.size().to_frame('size')
result_whois_org = whois_org_counter.sort_values('size', ascending=False)
result_whois_org['percent'] = result_whois_org['size']/total*100
result_whois_org_table = result_whois_org.head(40)
print(result_whois_org_table)
result_whois_org['size'].sum()
# ### Analysis of DNS records
# #### A records
ipv4 = data.groupby('IPv4(WEB)')
ipv4_counter = ipv4.size().to_frame('size')
result_ipv4 = ipv4_counter.sort_values('size', ascending=False)
result_ipv4['percent'] = result_ipv4['size']/total*100
result_ipv4=result_ipv4.reset_index()
result_ipv4_table = result_ipv4.head(249)
print(result_ipv4_table)
result_ipv4['size'].sum()
# #### AAAA records
ipv6 = data.groupby('IPv6')
ipv6_counter = ipv6.size().to_frame('size')
result_ipv6 = ipv6_counter.sort_values('size', ascending=False)
result_ipv6['percent'] = result_ipv6['size']/total*100
result_ipv6_table = result_ipv6.head(249)
print(result_ipv6_table)
result_ipv6['size'].sum()
# #### CNAME records
cname = data.groupby('CNAME')
cname_counter = cname.size().to_frame('size')
result_cname = cname_counter.sort_values('size', ascending=False)
result_cname['percent'] = result_cname['size']/total*100
result_cname_table = result_cname.head()
print(result_cname_table)
result_cname['size'].sum()
# #### A, AAAA, CNAME records - information on web servers
web = data.groupby(['IPv4(WEB)', 'IPv6', 'CNAME'])
web_counter = web.size().to_frame('size')
result_web = web_counter.sort_values('size', ascending=False)
result_web['percent'] = result_web['size']/total*100
result_web_table = result_web.head(249)
print(result_web_table)
result_web['size'].sum()
# #### AXFR records
axfr = data.groupby('AXFR')
axfr_counter = axfr.size().to_frame('size')
result_axfr = axfr_counter.sort_values('size', ascending=False)
result_axfr['percent'] = result_axfr['size']/total*100
result_axfr_table = result_axfr.head()
print(result_axfr_table)
result_axfr['size'].sum()
# #### DNSKEY records
DNSKEY = data.groupby('DNSKEY')
DNSKEY_counter = DNSKEY.size().to_frame('size')
result_DNSKEY = DNSKEY_counter.sort_values('size', ascending=False)
result_DNSKEY['percent'] = result_DNSKEY['size']/total*100
result_DNSKEY_table = result_DNSKEY.head()
print(result_DNSKEY_table)
result_DNSKEY['size'].sum()
# #### DS records
DS = data.groupby('DS')
DS_counter = DS.size().to_frame('size')
result_DS = DS_counter.sort_values('size', ascending=False)
result_DS['percent'] = result_DS['size']/total*100
result_DS_table = result_DS.head()
print(result_DS_table)
result_DS['size'].sum()
# #### KEY records
KEY = data.groupby('KEY')
KEY_counter = KEY.size().to_frame('size')
result_KEY = KEY_counter.sort_values('size', ascending=False)
result_KEY['percent'] = result_KEY['size']/total*100
result_KEY_table = result_KEY.head()
print(result_KEY_table)
result_KEY['size'].sum()
# #### RRSIG records
RRSIG = data.groupby('RRSIG')
RRSIG_counter = RRSIG.size().to_frame('size')
result_RRSIG = RRSIG_counter.sort_values('size', ascending=False)
result_RRSIG['percent'] = result_RRSIG['size']/total*100
result_RRSIG_table = result_RRSIG.head()
print(result_RRSIG_table)
result_RRSIG['size'].sum()
# #### MX records
MX = data.groupby('MX')
MX_counter = MX.size().to_frame('size')
result_MX = MX_counter.sort_values('size', ascending=False)
result_MX['percent'] = result_MX['size']/total*100
result_MX_table = result_MX
print(result_MX_table)
result_MX['size'].sum()
# #### NS records
NS_1 = data.groupby('NS.1')
NS_1_counter = NS_1.size().to_frame('size')
result_NS_1 = NS_1_counter.sort_values('size', ascending=False)
result_NS_1['percent'] = result_NS_1['size']/total*100
result_NS_1_table = result_NS_1.head()
print(result_NS_1_table)
result_NS_1['size'].sum()
# #### TXT records
TXT = data.groupby(['TXT'])
TXT_counter = TXT.size().to_frame('size')
result_TXT = TXT_counter.sort_values('size', ascending=False)
result_TXT['percent'] = result_TXT['size']/total*100
result_TXT_table = result_TXT
print(result_TXT_table)
result_TXT['size'].sum()
# #### TXT and MX records - information on mail servers
TXT = data.groupby(['TXT','MX'])
TXT_counter = TXT.size().to_frame('size')
result_TXT = TXT_counter.sort_values('size', ascending=False)
result_TXT['percent'] = result_TXT['size']/total*100
result_TXT_table = result_TXT
print(result_TXT_table)
result_TXT['size'].sum()
# ### Lexical data
# #### TLD
TLD = data.groupby('TLD')
TLD_counter = TLD.size().to_frame('size')
result_TLD = TLD_counter.sort_values('size', ascending=False)
result_TLD['percent'] = result_TLD['size']/total*100
result_TLD = result_TLD.reset_index()
result_TLD_table = result_TLD.tail(50)
print(result_TLD_table)
result_TLD['size'].sum()
def group_lower_ranking_values(column):
rating_counts = data.groupby(column).agg('count')
pct_value = rating_counts[lambda x: x.columns[0]].quantile(0.99)
values_below_pct_value = rating_counts[lambda x: x.columns[0]].loc[lambda s: s < pct_value].index.values
def fix_values(row):
if row[column] in values_below_pct_value:
row[column] = 'Other'
return row
rating_grouped = data.apply(fix_values, axis=1).groupby(column).agg('count')
return rating_grouped
rating_grouped = group_lower_ranking_values('TLD')
print(rating_grouped)
# +
plt.figure(1, figsize=(20,10))
the_grid = GridSpec(2, 2)
plt.subplot(the_grid[0, 1], aspect=1)
type_show_ids = plt.pie(rating_grouped['Domain'], labels=rating_grouped.index, autopct='%1.1f%%', shadow=True)
plt.show()
# -
# #### Level of domains
domain = data.groupby('Level of domain')
domain_counter = domain.size().to_frame('size')
result_domain = domain_counter.sort_values('size', ascending=False)
result_domain['percent'] = result_domain['size']/total*100
result_domain_table = result_domain.head()
print(result_domain_table)
result_domain['size'].sum()
# #### Subdomain check
subd = data.groupby('research.')
subd_counter = subd.size().to_frame('size')
result_subd = subd_counter.sort_values('size', ascending=False)
result_subd['percent'] = result_subd['size']/total*100
result_subd_table = result_subd.head()
print(result_subd_table)
result_subd['size'].sum()
# ### Rankings
# #### General list
d = data.groupby(['ALexa daily','Alexa global','Majestic','Umbrella','TRANCO'])
d_counter = d.size().to_frame('size')
result_d = d_counter.sort_values('size', ascending=False)
result_d['percent'] = result_d['size']/total*100
result_d_table = result_d.head()
print(result_d_table)
result_d['size'].sum()
# #### Alexa daily
al_d = data.groupby('ALexa daily')
al_d_counter = al_d.size().to_frame('size')
result_al_d = al_d_counter.sort_values('ALexa daily', ascending=True)
result_al_d['percent'] = result_al_d['size']/total*100
result_al_d_table = result_al_d.head()
print(result_al_d_table)
result_al_d['size'].sum()
# #### Alexa global
al = data.groupby('Alexa global')
al_counter = al.size().to_frame('size')
result_al = al_counter.sort_values('Alexa global', ascending=True)
result_al['percent'] = result_al['size']/total*100
result_al_table = result_al.head()
print(result_al_table)
result_al['size'].sum()
# #### Majestic
maj = data.groupby('Majestic')
maj_counter = maj.size().to_frame('size')
result_maj = maj_counter.sort_values('Majestic', ascending=True)
result_maj['percent'] = result_maj['size']/total*100
result_maj_table = result_maj.head()
print(result_maj_table)
result_maj['size'].sum()
# #### Cisco Umbrella
umb = data.groupby('Umbrella')
umb_counter = umb.size().to_frame('size')
result_umb = umb_counter.sort_values('Umbrella', ascending=True)
result_umb['percent'] = result_umb['size']/total*100
result_umb_table = result_umb.head()
print(result_umb_table)
result_umb['size'].sum()
# #### TRANCO
tr = data.groupby('TRANCO')
tr_counter = tr.size().to_frame('size')
result_tr = tr_counter.sort_values('TRANCO', ascending=True)
result_tr['percent'] = result_tr['size']/total*100
result_tr_table = result_tr.head()
print(result_tr_table)
result_tr['size'].sum()
some_rows = data[(data['TRANCO'] == 9912)]
some_rows.to_csv('tranco3')
# ### Blacklists
# #### SafeBrowsing
sbd = data.groupby('SafeBrowsing(domain)')
sbd_counter = sbd.size().to_frame('size')
result_sbd = sbd_counter.sort_values('size', ascending=False)
result_sbd['percent'] = result_sbd['size']/total*100
result_sbd_table = result_sbd.head()
print(result_sbd_table)
result_sbd['size'].sum()
some_rows = data[(data['SafeBrowsing(domain)'] == "['MALWARE']")]
some_rows
some_rows = data[(data['SafeBrowsing(domain)'] == "['SOCIAL_ENGINEERING']")]
some_rows
sbs = data.groupby('SafeBrowsing(subdomain)')
sbs_counter = sbs.size().to_frame('size')
print(sbs_counter)
result_sbs = sbs_counter.sort_values('size', ascending=False)
result_sbs['percent'] = result_sbs['size']/total*100
result_sbs_table = result_sbs.head()
print(result_sbs_table)
result_sbs['size'].sum()
some_rows = data[(data['SafeBrowsing(subdomain)'] == "['MALWARE']")]
some_rows
some_rows = data[(data['SafeBrowsing(subdomain)'] == "['SOCIAL_ENGINEERING']")]
some_rows
# #### VirusTotal
VirustotalAPI = data.groupby('VirustotalAPI')
VirustotalAPI_counter = VirustotalAPI.size().to_frame('size')
result_VirustotalAPI = VirustotalAPI_counter.sort_values('size', ascending=False)
result_VirustotalAPI['percent'] = result_VirustotalAPI['size']/total*100
result_VirustotalAPI_table = result_VirustotalAPI.head(40)
print(result_VirustotalAPI_table)
result_VirustotalAPI['size'].sum()
some_rows = data[(data['VirustotalAPI'] > 0)]
some_rows
# ### Fingerprinting
# #### OS
OS = data.groupby('OS')
OS_counter = OS.size().to_frame('size')
result_OS = OS_counter.sort_values('size', ascending=False)
result_OS['percent'] = result_OS['size']/total*100
result_OS_table = result_OS.head()
print(result_OS_table)
result_OS['size'].sum()
# #### Enrichment of data from banners
data['ban'] = data['Banners'].str.split('FreeBSD').str[1]
Banners = data.groupby(['ban','IP of NS','Server'])
Banners_counter = Banners.size().to_frame('size')
result_Banners = Banners_counter.sort_values('size', ascending=False)
result_Banners['percent'] = result_Banners['size']/total*100
result_Banners_table = result_Banners.head(50)
result_Banners['percent'].sum()
data['ban'] = data['Banners'].str.split('Windows').str[1]
Banners = data.groupby(['ban','IP of NS','Server'])
Banners_counter = Banners.size().to_frame('size')
result_Banners = Banners_counter.sort_values('size', ascending=False)
result_Banners['percent'] = result_Banners['size']/total*100
result_Banners_table = result_Banners.head(50)
result_Banners['percent'].sum()
# #### DNS server
SERVER = data.groupby('Server')
SERVER_counter = SERVER.size().to_frame('size')
result_SERVER = SERVER_counter.sort_values('size', ascending=False)
result_SERVER['percent'] = result_SERVER['size']/total*100
result_SERVER_table = result_SERVER.head()
print(result_SERVER_table)
result_SERVER['size'].sum()
# ### SSL/TLS information
# #### SSL/TLS
ssl = data.groupby(['SSL/TLS'])
ssl_counter = ssl.size().to_frame('size')
result_ssl = ssl_counter.sort_values('size', ascending=False)
result_ssl['percent'] = result_ssl['size']/total*100
result_ssl_table = result_ssl.head(100)
print(result_ssl_table)
result_ssl['size'].sum()
# #### CA
CA = data.groupby('CA')
CA_counter = CA.size().to_frame('size')
result_CA = CA_counter.sort_values('size', ascending=False)
result_CA['percent'] = result_CA['size']/4202*100
result_CA_table = result_CA.head(20)
print(result_CA_table)
result_CA['size'].sum()
def group_lower_ranking_v(column):
rating_counts = data.groupby(column).agg('count')
pct_value = rating_counts[lambda x: x.columns[0]].quantile(0.98)
values_below_pct_value = rating_counts[lambda x: x.columns[0]].loc[lambda s: s < pct_value].index.values
def fix_values(row):
if row[column] in values_below_pct_value:
row[column] = 'Other'
return row
rating_grouped = data.apply(fix_values, axis=1).groupby(column).agg('count')
return rating_grouped
# +
rating_grouped = group_lower_ranking_v('CA')
print(rating_grouped)
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
plt.figure(1, figsize=(20,10))
the_grid = GridSpec(2, 2)
plt.subplot(the_grid[0, 1], aspect=1, title='CA')
type_show_ids = plt.pie(rating_grouped['Domain'], labels=rating_grouped.index, autopct='%1.1f%%', shadow=True)
plt.show()
# -
# ### Registrar
REG = data.groupby('REG')
REG_counter = REG.size().to_frame('size')
result_REG = REG_counter.sort_values('size', ascending=False)
result_REG['percent'] = result_REG['size']/total*100
result_REG_table = result_REG.head()
print(result_REG_table)
result_REG['size'].sum()
# ### Country(WEB)/Country(NS)
world = gpd.read_file(gpd.datasets.get_path('naturalearth_lowres'))
# #### Distribution of domains based on location of DNS servers
country_dns = data.groupby('Country(NS)')
country_dns_counter = country_dns.size().to_frame('size')
country = country_dns_counter.sort_values('size', ascending=False)
country = country.reset_index()
country.loc[1,'Country(NS)'] = "United States of America"
country['percent'] = country['size']/total*100
result_country_dns_table = country.head()
print(result_country_dns_table)
country['size'].sum()
co=country[['Country(NS)','size']]
world=world.merge(co,left_on='name',right_on='Country(NS)',how='outer')
world.plot(column='size', cmap='tab20b',figsize=(20,10),legend=True,missing_kwds={'color': 'lightgrey'})
# #### Distribution of domains based on location of WEB servers
country_web = data.groupby('Country(WEB)')
country_web_counter = country_web.size().to_frame('size')
country_w = country_web_counter.sort_values('size', ascending=False)
country_w['percent'] = country_w['size']/total*100
country_w = country_w.reset_index()
result_country_web_table = country_w.head(100)
print(result_country_web_table)
country_w['size'].sum()
country_w.loc[9,'Country(WEB)'] = "GB"
country_w.loc[14,'Country(WEB)'] = "CH"
country_w.loc[21,'Country(WEB)'] = "AT"
country_w.loc[23,'Country(WEB)'] = "MY"
# add country name by applying the convert method
country_w['country'] = country_w['Country(WEB)'].apply(lambda x: coco.convert(names=x, to='name_short', not_found=None))
country_w.loc[1,'country'] = "United States of America"
co_w=country_w[['country','size']]
world = gpd.read_file(gpd.datasets.get_path('naturalearth_lowres'))
world=world.merge(co_w,left_on='name',right_on='country',how='outer')
world.plot(column='size', cmap='tab20b',figsize=(20,10),legend=True,missing_kwds={'color': 'lightgrey'})
# ### Metadata
md = data.groupby('Metadata')
md_counter = md.size().to_frame('size')
result_md = md_counter.sort_values('size', ascending=False)
result_md['percent'] = result_md['size']/total*100
result_md_table = result_md.head()
print(result_md_table)
result_md['size'].sum()
# ### Sublist3r
Sublist3r = data.groupby('Sublist3r')
Sublist3r_counter = Sublist3r.size().to_frame('size')
result_Sublist3r = Sublist3r_counter.sort_values('size', ascending=False)
result_Sublist3r['percent'] = result_Sublist3r['size']/total*100
result_Sublist3r_table = result_Sublist3r.head()
print(result_Sublist3r_table)
result_Sublist3r['size'].sum()
# ### TheHarvester
# #### emails
em_h = data.groupby('theharvester-emails')
em_h_counter = em_h.size().to_frame('size')
result_em_h = em_h_counter.sort_values('size', ascending=False)
result_em_h['percent'] = result_em_h['size']/total*100
result_em_h_table = result_em_h.head()
print(result_em_h_table)
result_em_h['size'].sum()
# #### hosts
em_h = data.groupby('theharvester-hosts')
em_h_counter = em_h.size().to_frame('size')
result_em_h = em_h_counter.sort_values('size', ascending=False)
result_em_h['percent'] = result_em_h['size']/total*100
result_em_h_table = result_em_h.head()
print(result_em_h_table)
result_em_h['size'].sum()
# #### IP addresses
em_h = data.groupby('theharvester-ips')
em_h_counter = em_h.size().to_frame('size')
result_em_h = em_h_counter.sort_values('size', ascending=False)
result_em_h['percent'] = result_em_h['size']/total*100
result_em_h_table = result_em_h.head()
print(result_em_h_table)
result_em_h['size'].sum()
# #### Employees
em_h = data.groupby('theharvester-people')
em_h_counter = em_h.size().to_frame('size')
result_em_h = em_h_counter.sort_values('size', ascending=False)
result_em_h['percent'] = result_em_h['size']/total*100
result_em_h_table = result_em_h.head()
print(result_em_h_table)
result_em_h['size'].sum()
# ### WEB technologies
# #### WhatWeb
ww = data.groupby('WhatWeb')
ww_counter = ww.size().to_frame('size')
result_ww = ww_counter.sort_values('size', ascending=False)
result_ww['percent'] = result_ww['size']/total*100
result_ww_table = result_ww.head()
print(result_ww_table)
result_ww['size'].sum()
# #### Wappalazer
data['ww'] = data['Wappalazer'].str.split('Apache').str[1]
ww = data.groupby('ww')
ww_counter = ww.size().to_frame('size')
result_ww = ww_counter.sort_values('size', ascending=False)
result_ww['percent'] = result_ww['size']/7000*100
result_ww_table = result_ww.head()
print(result_ww_table)
result_ww['percent'].sum()
|
data analysis.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
#
# Hidden Markov Model
# ===================
#
# In this example, we will follow [1] to construct a semi-supervised Hidden Markov
# Model for a generative model with observations are words and latent variables
# are categories. Instead of automatically marginalizing all discrete latent
# variables (as in [2]), we will use the "forward algorithm" (which exploits the
# conditional independent of a Markov model - see [3]) to iteratively do this
# marginalization.
#
# The semi-supervised problem is chosen instead of an unsupervised one because it
# is hard to make the inference works for an unsupervised model (see the
# discussion [4]). On the other hand, this example also illustrates the usage of
# JAX's `lax.scan` primitive. The primitive will greatly improve compiling for the
# model.
#
# **References:**
#
# 1. https://mc-stan.org/docs/2_19/stan-users-guide/hmms-section.html
# 2. http://pyro.ai/examples/hmm.html
# 3. https://en.wikipedia.org/wiki/Forward_algorithm
# 4. https://discourse.pymc.io/t/how-to-marginalized-markov-chain-with-categorical/2230
#
# +
import argparse
import os
import time
import matplotlib.pyplot as plt
import numpy as onp
from scipy.stats import gaussian_kde
from jax import lax, random
import jax.numpy as np
from jax.scipy.special import logsumexp
import numpyro
import numpyro.distributions as dist
from numpyro.infer import MCMC, NUTS
def simulate_data(rng_key, num_categories, num_words, num_supervised_data, num_unsupervised_data):
rng_key, rng_key_transition, rng_key_emission = random.split(rng_key, 3)
transition_prior = np.ones(num_categories)
emission_prior = np.repeat(0.1, num_words)
transition_prob = dist.Dirichlet(transition_prior).sample(key=rng_key_transition,
sample_shape=(num_categories,))
emission_prob = dist.Dirichlet(emission_prior).sample(key=rng_key_emission,
sample_shape=(num_categories,))
start_prob = np.repeat(1. / num_categories, num_categories)
categories, words = [], []
for t in range(num_supervised_data + num_unsupervised_data):
rng_key, rng_key_transition, rng_key_emission = random.split(rng_key, 3)
if t == 0 or t == num_supervised_data:
category = dist.Categorical(start_prob).sample(key=rng_key_transition)
else:
category = dist.Categorical(transition_prob[category]).sample(key=rng_key_transition)
word = dist.Categorical(emission_prob[category]).sample(key=rng_key_emission)
categories.append(category)
words.append(word)
# split into supervised data and unsupervised data
categories, words = np.stack(categories), np.stack(words)
supervised_categories = categories[:num_supervised_data]
supervised_words = words[:num_supervised_data]
unsupervised_words = words[num_supervised_data:]
return (transition_prior, emission_prior, transition_prob, emission_prob,
supervised_categories, supervised_words, unsupervised_words)
def forward_one_step(prev_log_prob, curr_word, transition_log_prob, emission_log_prob):
log_prob_tmp = np.expand_dims(prev_log_prob, axis=1) + transition_log_prob
log_prob = log_prob_tmp + emission_log_prob[:, curr_word]
return logsumexp(log_prob, axis=0)
def forward_log_prob(init_log_prob, words, transition_log_prob, emission_log_prob):
# Note: The following naive implementation will make it very slow to compile
# and do inference. So we use lax.scan instead.
#
# >>> log_prob = init_log_prob
# >>> for word in words:
# ... log_prob = forward_one_step(log_prob, word, transition_log_prob, emission_log_prob)
def scan_fn(log_prob, word):
return forward_one_step(log_prob, word, transition_log_prob, emission_log_prob), np.zeros((0,))
log_prob, _ = lax.scan(scan_fn, init_log_prob, words)
return log_prob
def semi_supervised_hmm(transition_prior, emission_prior,
supervised_categories, supervised_words,
unsupervised_words):
num_categories, num_words = transition_prior.shape[0], emission_prior.shape[0]
transition_prob = numpyro.sample('transition_prob', dist.Dirichlet(
np.broadcast_to(transition_prior, (num_categories, num_categories))))
emission_prob = numpyro.sample('emission_prob', dist.Dirichlet(
np.broadcast_to(emission_prior, (num_categories, num_words))))
# models supervised data;
# here we don't make any assumption about the first supervised category, in other words,
# we place a flat/uniform prior on it.
numpyro.sample('supervised_categories', dist.Categorical(transition_prob[supervised_categories[:-1]]),
obs=supervised_categories[1:])
numpyro.sample('supervised_words', dist.Categorical(emission_prob[supervised_categories]),
obs=supervised_words)
# computes log prob of unsupervised data
transition_log_prob = np.log(transition_prob)
emission_log_prob = np.log(emission_prob)
init_log_prob = emission_log_prob[:, unsupervised_words[0]]
log_prob = forward_log_prob(init_log_prob, unsupervised_words[1:],
transition_log_prob, emission_log_prob)
log_prob = logsumexp(log_prob, axis=0, keepdims=True)
# inject log_prob to potential function
numpyro.factor('forward_log_prob', log_prob)
def print_results(posterior, transition_prob, emission_prob):
header = semi_supervised_hmm.__name__ + ' - TRAIN'
columns = ['', 'ActualProb', 'Pred(p25)', 'Pred(p50)', 'Pred(p75)']
header_format = '{:>20} {:>10} {:>10} {:>10} {:>10}'
row_format = '{:>20} {:>10.2f} {:>10.2f} {:>10.2f} {:>10.2f}'
print('\n', '=' * 20 + header + '=' * 20, '\n')
print(header_format.format(*columns))
quantiles = onp.quantile(posterior['transition_prob'], [0.25, 0.5, 0.75], axis=0)
for i in range(transition_prob.shape[0]):
for j in range(transition_prob.shape[1]):
idx = 'transition[{},{}]'.format(i, j)
print(row_format.format(idx, transition_prob[i, j], *quantiles[:, i, j]), '\n')
quantiles = onp.quantile(posterior['emission_prob'], [0.25, 0.5, 0.75], axis=0)
for i in range(emission_prob.shape[0]):
for j in range(emission_prob.shape[1]):
idx = 'emission[{},{}]'.format(i, j)
print(row_format.format(idx, emission_prob[i, j], *quantiles[:, i, j]), '\n')
def main(args):
print('Simulating data...')
(transition_prior, emission_prior, transition_prob, emission_prob,
supervised_categories, supervised_words, unsupervised_words) = simulate_data(
random.PRNGKey(1),
num_categories=args.num_categories,
num_words=args.num_words,
num_supervised_data=args.num_supervised,
num_unsupervised_data=args.num_unsupervised,
)
print('Starting inference...')
rng_key = random.PRNGKey(2)
start = time.time()
kernel = NUTS(semi_supervised_hmm)
mcmc = MCMC(kernel, args.num_warmup, args.num_samples,
progress_bar=False if "NUMPYRO_SPHINXBUILD" in os.environ else True)
mcmc.run(rng_key, transition_prior, emission_prior, supervised_categories,
supervised_words, unsupervised_words)
samples = mcmc.get_samples()
print_results(samples, transition_prob, emission_prob)
print('\nMCMC elapsed time:', time.time() - start)
# make plots
fig, ax = plt.subplots(1, 1)
x = onp.linspace(0, 1, 101)
for i in range(transition_prob.shape[0]):
for j in range(transition_prob.shape[1]):
ax.plot(x, gaussian_kde(samples['transition_prob'][:, i, j])(x),
label="trans_prob[{}, {}], true value = {:.2f}"
.format(i, j, transition_prob[i, j]))
ax.set(xlabel="Probability", ylabel="Frequency",
title="Transition probability posterior")
ax.legend()
plt.savefig("hmm_plot.pdf")
plt.tight_layout()
if __name__ == '__main__':
assert numpyro.__version__.startswith('0.2.4')
parser = argparse.ArgumentParser(description='Semi-supervised Hidden Markov Model')
parser.add_argument('--num-categories', default=3, type=int)
parser.add_argument('--num-words', default=10, type=int)
parser.add_argument('--num-supervised', default=100, type=int)
parser.add_argument('--num-unsupervised', default=500, type=int)
parser.add_argument('-n', '--num-samples', nargs='?', default=1000, type=int)
parser.add_argument('--num-warmup', nargs='?', default=500, type=int)
parser.add_argument("--num-chains", nargs='?', default=1, type=int)
parser.add_argument('--device', default='cpu', type=str, help='use "cpu" or "gpu".')
args = parser.parse_args()
numpyro.set_platform(args.device)
numpyro.set_host_device_count(args.num_chains)
main(args)
|
numpyro/_downloads/83a510f7be054c2e2c361f98237ae6b0/hmm.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Born Again Training:
# ## Does re-labeling training/validation positives with model predictions in first round of training reduce overfitting to positives in upsampled batches in second round of training?
# Relevant: https://arxiv.org/pdf/1805.04770.pdf
# ## Outline<a name='outline'>
# <ol>
# <li><a href=#1>Input data</a></li>
# <li><a href=#2>Train Model</a></li>
# <li><a href=#3>Performance and Interpretation</a></li>
# <li><a href=#4>Relabel Training/Validation Positives</a></li>
# <li><a href=#5>Retrain the model from early stoppping phase</a></li>
# <li><a href=#6>Performance and Interpretation on Round 2 </a></li>
# <li><a href=#9>Conclusions</a></li>
# </ol>
# +
# Making sure our results are reproducible
from numpy.random import seed
seed(1234)
from tensorflow import set_random_seed
set_random_seed(1234)
#housekeeping
# %reload_ext autoreload
# %autoreload 2
# %matplotlib inline
import warnings
warnings.filterwarnings('ignore')
# -
# ## Inputs <a name='1'>
# <a href=#outline>Home</a>
#
# Seqdataloader was used to generate genome-wide regression labels for the SPI TF Chip-seq dataset (see Tutorial 5), yielding these files:
# ```
# SPI1.train.regression.hdf5
# SPI1.valid.regression.hdf5
# SPI1.test.regression.hdf5
# ```
# Set of bins with non-zero coverage values:
from seqdataloader import *
# +
train_set_params={
'task_list':"SPI1.task.tsv",
'outf':"train.positives.regression.hdf5",
'store_values_above_thresh':0,
'output_type':'hdf5',
'chrom_sizes':'hg19.chrom.sizes',
'chroms_to_exclude':['chr1','chr2','chr19','chrY'],
'bin_stride':50,
'left_flank':400,
'right_flank':400,
'bin_size':200,
'threads':4,
'subthreads':4,
'allow_ambiguous':False,
'labeling_approach':'peak_summit_in_bin_regression'
}
genomewide_labels(train_set_params)
#2) Validation set: Chromosome 1
valid_set_params={'task_list':"SPI1.task.tsv",
'outf':"valid.positives.regression.hdf5",
'store_values_above_thresh':0,
'output_type':'hdf5',
'chrom_sizes':'hg19.chrom.sizes',
'chroms_to_keep':'chr1',
'bin_stride':50,
'left_flank':400,
'right_flank':400,
'bin_size':200,
'threads':1,
'subthreads':4,
'allow_ambiguous':True,
'labeling_approach':'peak_summit_in_bin_regression'
}
genomewide_labels(valid_set_params)
#3) Test set: Chromosomes 2, 19
test_set_params={
'task_list':"SPI1.task.tsv",
'outf':"test.positives.regression.hdf5",
'store_values_above_thresh':0,
'output_type':'hdf5',
'chrom_sizes':'hg19.chrom.sizes',
'chroms_to_keep':['chr2','chr19'],
'bin_stride':50,
'left_flank':400,
'right_flank':400,
'bin_size':200,
'threads':2,
'subthreads':4,
'allow_ambiguous':False,
'labeling_approach':'peak_summit_in_bin_regression'
}
genomewide_labels(test_set_params)
# -
# ## Performance of SPI1 regression model from tutorial 5 <a name='1'>
# <a href=#outline>Home</a>
from dragonn.generators import *
from keras.models import load_model
#Start at best epoch from early stopping
spi1_regression_model=load_model('SPI1.regression.model.hdf5')
# ## Performance and Interpretation<a name='3'>
# <a href=#outline>Home</a>
spi1_test_regression_gen=DataGenerator("SPI1.test.regression.hdf5",
"hg19.genome.fa.gz",
upsample=False,
add_revcomp=False,
batch_size=1000,
tasks=['SPI1'])
spi1_test_regression_predictions=spi1_regression_model.predict_generator(spi1_test_regression_gen,
max_queue_size=5000,
workers=40,
use_multiprocessing=True,
verbose=1)
spi1_test_regression_truth=spi1_test_regression_gen.data
#Calculate spearman and pearson correlation between truth labels and predictions
from scipy.stats import pearsonr, spearmanr
corr_pearson=pearsonr(spi1_test_regression_truth,spi1_test_regression_predictions)
corr_spearman=spearmanr(spi1_test_regression_truth,spi1_test_regression_predictions)
print("Pearson correlation on test set:"+str(corr_pearson))
print("Spearman correlation on test set:"+str(corr_spearman))
import matplotlib.pyplot as plt
#normalize the truth values for more direct comparison
spi1_test_regression_truth_normed=(spi1_test_regression_truth['SPI1']-min(spi1_test_regression_truth['SPI1']))/(max(spi1_test_regression_truth['SPI1'])-min(spi1_test_regression_truth['SPI1']))
plt.scatter(spi1_test_regression_truth_normed, spi1_test_regression_predictions, alpha=0.01)
plt.xlabel("Truth (normalized)")
plt.ylabel("Predicted")
plt.title("SPI1 regression model predictions on test set vs truth")
plt.show()
#Sanity-check that the model is learning the SPI1 motif by running DeepLIFT on True Positives with high confidence (>0.9)
#get the true positive predictions
true_pos=spi1_test_regression_truth[(spi1_test_regression_truth.values*spi1_test_regression_predictions)>2]
true_pos.shape
true_pos.sort_values(by="SPI1").tail()
from dragonn.utils import one_hot_from_bed
deep_lift_input=one_hot_from_bed([i for i in true_pos.index],"/mnt/data/annotations/by_release/hg19.GRCh37/hg19.genome.fa")
deep_lift_input.shape
from dragonn.tutorial_utils import deeplift
deep_lift_scores=deeplift(spi1_regression_model,deep_lift_input,target_layer_idx=-1)
from dragonn.tutorial_utils import plot_seq_importance
plot_seq_importance(deep_lift_scores[0],deep_lift_input[0])
plot_seq_importance(deep_lift_scores[0].squeeze()[550:650],deep_lift_input[0].squeeze()[550:650])
#get the model predictions on the positives in train set (needed for relabeling)
pos_train_predict_gen=DataGenerator("SPI1.train.positives.regression.hdf5", "/mnt/data/annotations/by_release/hg19.GRCh37/hg19.genome.fa",upsample=False,add_revcomp=False, batch_size=1000)
pos_train_predictions=spi1_regression_model.predict_generator(pos_train_predict_gen,
max_queue_size=5000,
workers=50,
use_multiprocessing=True,
verbose=1)
#get the model predictions on the validation set (needed for relabeling)
pos_valid_predict_gen=DataGenerator("SPI1.valid.positives.regression.hdf5", "/mnt/data/annotations/by_release/hg19.GRCh37/hg19.genome.fa",upsample=False,add_revcomp=False, batch_size=1000)
pos_valid_predictions=spi1_regression_model.predict_generator(pos_valid_predict_gen,
max_queue_size=5000,
workers=50,
use_multiprocessing=True,
verbose=1)
#Store the predictions
import h5py
predictions=h5py.File("predictions.hdf5",'w')
predictions.create_dataset("pos_valid",data=pos_valid_predictions)
predictions.create_dataset("pos_train",data=pos_train_predictions)
predictions.close()
# ## Relabel training/validation positives <a name='5'>
# <a href=#outline>Home</a>
import pandas as pd
train_set=pd.read_hdf("SPI1.train.regression.hdf5",index_col=[0,1,2])
validation_set=pd.read_hdf("SPI1.valid.regression.hdf5",index_col=[0,1,2])
train_set_pos=pd.read_hdf("SPI1.train.positives.regression.hdf5",index_col=[0,1,2])
validation_set_pos=pd.read_hdf("SPI1.valid.positives.regression.hdf5",index_col=[0,1,2])
#0.1 was the minimum value observed in a positive bin
pos_train_predictions[pos_train_predictions<0.1]=0.1
pos_valid_predictions[pos_valid_predictions<0.1]=0.1
train_set.shape
pos_train_predictions.shape
#the dataframe size is not an even multiple of batch size, so we truncate some of the trailing values
pos_train_index=train_set_pos.index
pos_valid_index=validation_set_pos.index
pos_train_index.shape
pos_train_predictions=pd.DataFrame(data=pos_train_predictions,index=pos_train_index,columns=['SPI1'])
pos_valid_predictions=pd.DataFrame(data=pos_valid_predictions,index=pos_valid_index,columns=['SPI1'])
#update the positive values in place
train_set.update(pos_train_predictions)
validation_set.update(pos_valid_predictions)
#Store the updated training & validation labels to hdf5
train_set.to_hdf("SPI1.train.relabeled.hdf5",key='data',mode='w',format='table')
validation_set.to_hdf("SPI1.valid.relabeled.hdf5",key='data',mode='w',format='table')
# ## Continue training model starting from early stopping state with relabeled set of labels <a name='6'>
# <a href=#outline>Home</a>
#create the generator for keras training. Guarantee 30% positives in each batch
train_gen2=DataGenerator("SPI1.train.relabeled.hdf5","/mnt/data/annotations/by_release/hg19.GRCh37/hg19.genome.fa",upsample_ratio=0.3, upsample_thresh=0.1)
#create the generator for keras validation. Guarantee 30% positives in each batch
valid_gen2=DataGenerator("SPI1.valid.relabeled.hdf5","/mnt/data/annotations/by_release/hg19.GRCh37/hg19.genome.fa",upsample_ratio=0.3, upsample_thresh=0.1)
#create test set generator for prediction
test_gen2=DataGenerator("SPI1.test.regression.hdf5", "/mnt/data/annotations/by_release/hg19.GRCh37/hg19.genome.fa",upsample=False,add_revcomp=False, batch_size=1000)
#Train the CTCF model
## use the keras fit_generator function to train the model with early stopping after 3 epochs
history2=spi1_regression_model.fit_generator(train_gen2,
validation_data=valid_gen2,
steps_per_epoch=10000,
validation_steps=5000,
epochs=10,
verbose=1,
use_multiprocessing=True,
workers=50,
max_queue_size=100,
callbacks=[History()])
loss2=pd.DataFrame(history2.history)
loss2['epoch']=list(loss2.index)
#Plot the loss curves
from plotnine import *
subset2 = pd.melt(loss2, id_vars=['epoch'], value_vars=['loss', 'val_loss'])
p2 = ggplot(subset2, aes(x='epoch', y='value', color='variable'))
(p2 + geom_line()
+ geom_point()
+ scale_color_manual(['r', 'b'])
)
# ## Performance and Interpretation on Round 2 <a name='7'>
# <a href=#outline>Home</a>
spi1_test_regression_gen=DataGenerator("SPI1.test.regression.hdf5",
"hg19.genome.fa.gz",
upsample=False,
add_revcomp=False,
batch_size=1000,
tasks=['SPI1'])
spi1_test_regression_predictions2=spi1_regression_model.predict_generator(spi1_test_regression_gen,
max_queue_size=5000,
workers=40,
use_multiprocessing=True,
verbose=1)
spi1_test_regression_truth=spi1_test_regression_gen.data
#Calculate spearman and pearson correlation between truth labels and predictions
from scipy.stats import pearsonr, spearmanr
corr_pearson=pearsonr(spi1_test_regression_truth,spi1_test_regression_predictions2)
corr_spearman=spearmanr(spi1_test_regression_truth,spi1_test_regression_predictions2)
print("Pearson correlation on test set:"+str(corr_pearson))
print("Spearman correlation on test set:"+str(corr_spearman))
plt.scatter(spi1_test_regression_truth_normed, spi1_test_regression_predictions2, alpha=0.01)
plt.xlabel("Truth")
plt.ylabel("Predicted")
plt.title("SPI1 regression model 2 predictions on test set vs truth")
plt.show()
# ## Conclusions<a name='8'>
# <a href=#outline>Home</a>
|
tutorials/SPI1 Genomewide regression Born Again Training .ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Softmax exercise
#
# *Complete and hand in this completed worksheet (including its outputs and any supporting code outside of the worksheet) with your assignment submission. For more details see the [assignments page](http://vision.stanford.edu/teaching/cs231n/assignments.html) on the course website.*
#
# This exercise is analogous to the SVM exercise. You will:
#
# - implement a fully-vectorized **loss function** for the Softmax classifier
# - implement the fully-vectorized expression for its **analytic gradient**
# - **check your implementation** with numerical gradient
# - use a validation set to **tune the learning rate and regularization** strength
# - **optimize** the loss function with **SGD**
# - **visualize** the final learned weights
#
# +
import random
import numpy as np
from cs231n.data_utils import load_CIFAR10
import matplotlib.pyplot as plt
from __future__ import print_function
# %matplotlib inline
plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots
plt.rcParams['image.interpolation'] = 'nearest'
plt.rcParams['image.cmap'] = 'gray'
# for auto-reloading extenrnal modules
# see http://stackoverflow.com/questions/1907993/autoreload-of-modules-in-ipython
# %load_ext autoreload
# %autoreload 2
# +
def get_CIFAR10_data(num_training=49000, num_validation=1000, num_test=1000, num_dev=500):
"""
Load the CIFAR-10 dataset from disk and perform preprocessing to prepare
it for the linear classifier. These are the same steps as we used for the
SVM, but condensed to a single function.
"""
# Load the raw CIFAR-10 data
cifar10_dir = 'cs231n/datasets/cifar-10-batches-py'
X_train, y_train, X_test, y_test = load_CIFAR10(cifar10_dir)
# subsample the data
mask = list(range(num_training, num_training + num_validation))
X_val = X_train[mask]
y_val = y_train[mask]
mask = list(range(num_training))
X_train = X_train[mask]
y_train = y_train[mask]
mask = list(range(num_test))
X_test = X_test[mask]
y_test = y_test[mask]
mask = np.random.choice(num_training, num_dev, replace=False)
X_dev = X_train[mask]
y_dev = y_train[mask]
# Preprocessing: reshape the image data into rows
X_train = np.reshape(X_train, (X_train.shape[0], -1))
X_val = np.reshape(X_val, (X_val.shape[0], -1))
X_test = np.reshape(X_test, (X_test.shape[0], -1))
X_dev = np.reshape(X_dev, (X_dev.shape[0], -1))
# Normalize the data: subtract the mean image
mean_image = np.mean(X_train, axis = 0)
X_train -= mean_image
X_val -= mean_image
X_test -= mean_image
X_dev -= mean_image
# add bias dimension and transform into columns
X_train = np.hstack([X_train, np.ones((X_train.shape[0], 1))])
X_val = np.hstack([X_val, np.ones((X_val.shape[0], 1))])
X_test = np.hstack([X_test, np.ones((X_test.shape[0], 1))])
X_dev = np.hstack([X_dev, np.ones((X_dev.shape[0], 1))])
return X_train, y_train, X_val, y_val, X_test, y_test, X_dev, y_dev
# Invoke the above function to get our data.
X_train, y_train, X_val, y_val, X_test, y_test, X_dev, y_dev = get_CIFAR10_data()
print('Train data shape: ', X_train.shape)
print('Train labels shape: ', y_train.shape)
print('Validation data shape: ', X_val.shape)
print('Validation labels shape: ', y_val.shape)
print('Test data shape: ', X_test.shape)
print('Test labels shape: ', y_test.shape)
print('dev data shape: ', X_dev.shape)
print('dev labels shape: ', y_dev.shape)
# -
# ## Softmax Classifier
#
# Your code for this section will all be written inside **cs231n/classifiers/softmax.py**.
#
# +
# First implement the naive softmax loss function with nested loops.
# Open the file cs231n/classifiers/softmax.py and implement the
# softmax_loss_naive function.
from cs231n.classifiers.softmax import softmax_loss_naive
import time
# Generate a random softmax weight matrix and use it to compute the loss.
W = np.random.randn(3073, 10) * 0.0001
loss, grad = softmax_loss_naive(W, X_dev, y_dev, 0.0)
# As a rough sanity check, our loss should be something close to -log(0.1).
print('loss: %f' % loss)
print('sanity check: %f' % (-np.log(0.1)))
# -
# ## Inline Question 1:
# Why do we expect our loss to be close to -log(0.1)? Explain briefly.**
#
# **Your answer:** *Fill this in*
#
# +
# Complete the implementation of softmax_loss_naive and implement a (naive)
# version of the gradient that uses nested loops.
loss, grad = softmax_loss_naive(W, X_dev, y_dev, 0.0)
# As we did for the SVM, use numeric gradient checking as a debugging tool.
# The numeric gradient should be close to the analytic gradient.
from cs231n.gradient_check import grad_check_sparse
f = lambda w: softmax_loss_naive(w, X_dev, y_dev, 0.0)[0]
grad_numerical = grad_check_sparse(f, W, grad, 10)
# similar to SVM case, do another gradient check with regularization
loss, grad = softmax_loss_naive(W, X_dev, y_dev, 5e1)
f = lambda w: softmax_loss_naive(w, X_dev, y_dev, 5e1)[0]
grad_numerical = grad_check_sparse(f, W, grad, 10)
# +
# Now that we have a naive implementation of the softmax loss function and its gradient,
# implement a vectorized version in softmax_loss_vectorized.
# The two versions should compute the same results, but the vectorized version should be
# much faster.
tic = time.time()
loss_naive, grad_naive = softmax_loss_naive(W, X_dev, y_dev, 0.000005)
toc = time.time()
print('naive loss: %e computed in %fs' % (loss_naive, toc - tic))
from cs231n.classifiers.softmax import softmax_loss_vectorized
tic = time.time()
loss_vectorized, grad_vectorized = softmax_loss_vectorized(W, X_dev, y_dev, 0.000005)
toc = time.time()
print('vectorized loss: %e computed in %fs' % (loss_vectorized, toc - tic))
# As we did for the SVM, we use the Frobenius norm to compare the two versions
# of the gradient.
grad_difference = np.linalg.norm(grad_naive - grad_vectorized, ord='fro')
print('Loss difference: %f' % np.abs(loss_naive - loss_vectorized))
print('Gradient difference: %f' % grad_difference)
# +
# Use the validation set to tune hyperparameters (regularization strength and
# learning rate). You should experiment with different ranges for the learning
# rates and regularization strengths; if you are careful you should be able to
# get a classification accuracy of over 0.35 on the validation set.
from cs231n.classifiers import Softmax
results = {}
best_val = -1
best_softmax = None
learning_rates = [1e-7, 5e-7]
regularization_strengths = [2.5e4, 5e4]
################################################################################
# TODO: #
# Use the validation set to set the learning rate and regularization strength. #
# This should be identical to the validation that you did for the SVM; save #
# the best trained softmax classifer in best_softmax. #
################################################################################
pass
################################################################################
# END OF YOUR CODE #
################################################################################
# Print out results.
for lr, reg in sorted(results):
train_accuracy, val_accuracy = results[(lr, reg)]
print('lr %e reg %e train accuracy: %f val accuracy: %f' % (
lr, reg, train_accuracy, val_accuracy))
print('best validation accuracy achieved during cross-validation: %f' % best_val)
# -
# evaluate on test set
# Evaluate the best softmax on test set
y_test_pred = best_softmax.predict(X_test)
test_accuracy = np.mean(y_test == y_test_pred)
print('softmax on raw pixels final test set accuracy: %f' % (test_accuracy, ))
# +
# Visualize the learned weights for each class
w = best_softmax.W[:-1,:] # strip out the bias
w = w.reshape(32, 32, 3, 10)
w_min, w_max = np.min(w), np.max(w)
classes = ['plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
for i in range(10):
plt.subplot(2, 5, i + 1)
# Rescale the weights to be between 0 and 255
wimg = 255.0 * (w[:, :, :, i].squeeze() - w_min) / (w_max - w_min)
plt.imshow(wimg.astype('uint8'))
plt.axis('off')
plt.title(classes[i])
|
assignment1/softmax.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
df = pd.read_csv("data/Test.csv")
df = df[['Supplier', 'Supplier Site', 'Description', 'Number', 'Effective Date',
'Expires On', 'Amount Agreed', 'Amount', 'Buyer', 'Matched Amount',
'Approval Status', 'Freight Terms', 'Currency']]
non_emea_bpa = df[df['Supplier'].isin(['Matt', 'Sam']) ]
# +
# non_emea_bpa = df[df['Supplier'].isin(['<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>', '<NAME>']) ]
# -
df['Consumed'] = df['Amount']/df['Amount Agreed']
df = df[df.Supplier != '<NAME>']
df = df[df.Supplier != '<NAME>']
df = df[df.Supplier != '<NAME>']
df = df[df.Supplier != '<NAME>']
df = df[df.Supplier != '<NAME>']
df = df[df.Supplier != 'Bas Zwartele']
# https://pandas.pydata.org/pandas-docs/stable/user_guide/style.html
df.head(5)
color = (df.Supplier == 'Matt').map({True: 'background-color: yellow', False: ''})
df.style.apply(lambda s: color)
# +
# df['Amount Agreed'] = df['Amount Agreed'].replace(np.nan, -1)
# df['Buyer'] = df['Buyer'].replace(np.nan, -1)
# df['Effective Date'] = df['Effective Date'].replace(np.nan, -1)
# df['Expires On'] = df['Expires On'].replace(np.nan, -1)
# -
df.loc[:].style.highlight_null(null_color='yellow')
df
color = (df.Supplier == 'Matt').map({True: 'background-color: yellow', False: ''})
df.style.apply(lambda s: color)
df['Amount Agreed'].style.highlight_null('red')
# df['Expires On'] = pd.to_datetime(df['Expires On'])
# df['Effective Date'] = pd.to_datetime(df['Effective Date'])
df
df['Expires On'] = df['Expires On'].dt.strftime('%m/%d/%Y')
# +
start_date = '2000-01-01'
end_date = '2021-12-01'
mask = (df['Expires On'] > start_date) & (df['Expires On'] <= end_date)
expired = df.loc[mask]
expired
# +
df = pd.DataFrame([[2,3,1], [3,2,2], [2,4,4]], columns=list("ABC"))
df.style.apply(lambda x: ["background: red" if v > x.iloc[0] else "" for v in x], axis = 1)
# -
def highlight_max(s, props=''):
return np.where(s == 2, props, '')
df.apply(highlight_max, props='color:white;background-color:darkblue', axis=0)
# +
start_date = '2021-12-01'
end_date = '2030-12-01'
mask = (df['Expires On'] > start_date) & (df['Expires On'] <= end_date)
active_bpas = df.loc[mask]
active_bpas.head()
# -
def highlight_80(y):
if y.Consumed > .8:
return ['background-color: yellow']*14
else:
return ['background-color: white']*14
df.style.apply(highlight_80, axis=1)
def equals(y):
if y.EffectiveDate == 0.997050:
return ['background-color: orange']*14
else:
return ['background-color: white']*14
df.style.apply(equals, axis=1)
# +
# Create some Pandas dataframes from some data.
df1 = pd.DataFrame({'Data': [11, 12, 13, 14]})
df2 = pd.DataFrame({'Data': [21, 22, 23, 24]})
df3 = pd.DataFrame({'Data': [31, 32, 33, 34]})
# Create a Pandas Excel writer using XlsxWriter as the engine.
writer = pd.ExcelWriter('Test2.xlsx', engine='xlsxwriter')
# Write each dataframe to a different worksheet.
df.to_excel(writer, sheet_name='ACTIVE BPAS')
non_emea_bpa.to_excel(writer, sheet_name='NON EMEA BPAS')
expired.to_excel(writer, sheet_name='EXPIRED')
# Close the Pandas Excel writer and output the Excel file.
writer.save()
# -
|
Test Project.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
'hello'
"hello"
'"python course"'
"'python course'"
'I didn't work'
"I didn't work"
'hello'
'hello'
'world'
print('hello')
print('world')
a = "hello world"
a.upper()
a
a = "KING"
a.lower()
a = "hello world in python"
a.split()
a.split()
a = 'hello world'
len(a)
# Indexing
a
a[0]
a[7]
a[9]
a[-1]
# Slicing
a[1:6]
a[0:5]
a[2:]
a[:9]
a[:]
a[:-1]
a[::2]
a[::1]
# Immutability
a
a[0] = 'p'
# Concatenation
a = a + ' in python'
a
b = 'hello'
c = 'world'
d = b + c
d
a = 'b'
a * 5
|
Python Basics/Strings.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Visualize 2D synthetic density estimation result
# +
import numpy as np
import matplotlib.pyplot as plt
import sys
sys.path.append('../../')
from tqdm.notebook import tqdm
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.optim import Adam
from models.modules import FCNet, IsotropicGaussian, FCResNet
from models.ae import AE, VAE
from models.nae import NAE
from models.bnaf import BNAF
from loader.synthetic import sample2d
# -
device = 'cpu:0'
dset = '8gaussians'
if dset == '8gaussians':
xmin, xmax, ymin, ymax = [-4, 4, -4, 4]
elif dset == '2spirals':
xmin, xmax, ymin, ymax = [-4, 4, -4, 4]
elif dset == 'checkerboard':
xmin, xmax, ymin, ymax = [-4, 4, -4, 4]
XX, YY = torch.meshgrid(torch.linspace(xmin, xmax, 100), torch.linspace(ymin,ymax, 100))
grid = torch.cat([XX.reshape(-1,1), YY.reshape(-1,1)], dim=1)
grid_gpu = grid
grid_gpu = grid.to(device)
# # AE
zdim = 2
encoder = FCResNet(2, zdim, res_dim=256, n_res_hidden=1024, n_resblock=5, out_activation='spherical')
decoder = FCResNet(zdim, 2, res_dim=256, n_res_hidden=1024, n_resblock=5, out_activation='linear')
ae = AE(encoder, IsotropicGaussian(decoder, sigma=0.5, sigma_trainable=True, error_normalize=False))
ae.load_state_dict(torch.load('ae_2_8gaussians.pth'))
ae.to(device);
z_grid = ae.encoder(grid_gpu)
E_ae = - ae.decoder.log_likelihood(grid_gpu, z_grid).detach().cpu().reshape(100, 100)
Omega = ((8 / 100 * 8 / 100) * np.exp(-E_ae)).sum()
p_ae = np.exp(-E_ae.T)/Omega
zdim = 3
encoder = FCResNet(2, zdim, res_dim=256, n_res_hidden=1024, n_resblock=5, out_activation='tanh')
decoder = FCResNet(zdim, 2, res_dim=256, n_res_hidden=1024, n_resblock=5, out_activation='linear')
ae = AE(encoder, IsotropicGaussian(decoder, sigma=0.5, sigma_trainable=True, error_normalize=False))
ae.load_state_dict(torch.load('ae_3_8gaussians.pth'))
ae.to(device);
z_grid = ae.encoder(grid_gpu)
E_ae = - ae.decoder.log_likelihood(grid_gpu, z_grid).detach().cpu().reshape(100, 100)
Omega = ((8 / 100 * 8 / 100) * np.exp(-E_ae)).sum()
p_ae_3 = np.exp(-E_ae.T)/Omega
# # VAE
zdim = 1
encoder = FCResNet(2, zdim * 2, res_dim=256, n_res_hidden=1024, n_resblock=5, out_activation='linear')
decoder = FCResNet(zdim, 2, res_dim=256, n_res_hidden=1024, n_resblock=5, out_activation='linear')
vae = VAE(encoder, decoder, sigma_trainable=True, use_mean=False)
vae.load_state_dict(torch.load('vae_1_8gaussians.pth'))
vae.to(device);
vae.n_sample = 100
p_vae = np.exp(-vae.reconstruction_probability(grid_gpu).detach().cpu()).reshape((100,100))
gg = vae.marginal_likelihood(grid_gpu, n_sample=500).detach().cpu()
p_vae = torch.exp(gg.reshape(100, 100))
zdim = 3
encoder = FCResNet(2, zdim * 2, res_dim=256, n_res_hidden=1024, n_resblock=5, out_activation='linear')
decoder = FCResNet(zdim, 2, res_dim=256, n_res_hidden=1024, n_resblock=5, out_activation='linear')
vae = VAE(encoder, decoder, sigma_trainable=True, use_mean=False)
vae.load_state_dict(torch.load('vae_3_8gaussians.pth'))
vae.to(device);
vae.n_sample = 100
# p_vae_3 = np.exp(-vae.reconstruction_probability(grid_gpu).detach().cpu()).reshape((100,100))
gg = vae.marginal_likelihood(grid_gpu, n_sample=500).detach().cpu() #- np.log(1000)
p_vae_3 = torch.exp(gg.reshape(100, 100))
p_vae.flatten().numpy()
plt.hist(np.log(p_vae.flatten().numpy()))
# # NAE
zdim = 2
encoder = FCResNet(2, zdim, res_dim=256, n_res_hidden=1024, n_resblock=5, out_activation='linear', use_spectral_norm=False)
decoder = FCResNet(zdim, 2, res_dim=256, n_res_hidden=1024, n_resblock=5, out_activation='linear', use_spectral_norm=False)
nae = NAE(encoder, decoder, sampling='on_manifold',
x_step=30, x_stepsize=None, x_noise_std=0.1, x_bound=(-5, 5), x_clip_langevin_grad=None,
z_step=10, z_stepsize=None, z_noise_std=0.1, z_bound=None, z_clip_langevin_grad=None,
gamma=1, delta=0., spherical=True,
sigma=0.5, sigma_trainable=True,
temperature=0.5, temperature_trainable=True,
l2_norm_reg=None, l2_norm_reg_en=None, z_norm_reg=None,
initial_dist='gaussian', replay=True, replay_ratio=0.95, buffer_size=10000,
deterministic=True, mh=True, mh_z=False, reject_boundary=True, reject_boundary_z=True)
nae.load_state_dict(torch.load('nae_2_8gaussians.pth'))
# nae.to(device);
E = nae.energy_T(grid_gpu).detach().cpu().reshape(100, 100)
# E = nae.energy(grid_gpu).detach().cpu().reshape(100, 100)
Omega = ((8 / 100 * 8 / 100) * np.exp(-E)).sum()
p_nae = np.exp(-E.T)/Omega
zdim = 3
encoder = FCResNet(2, zdim, res_dim=256, n_res_hidden=1024, n_resblock=5, out_activation='linear', use_spectral_norm=False)
decoder = FCResNet(zdim, 2, res_dim=256, n_res_hidden=1024, n_resblock=5, out_activation='linear', use_spectral_norm=False)
nae = NAE(encoder, decoder, sampling='on_manifold',
x_step=30, x_stepsize=None, x_noise_std=0.1, x_bound=(-5, 5), x_clip_langevin_grad=None,
z_step=10, z_stepsize=None, z_noise_std=0.1, z_bound=None, z_clip_langevin_grad=None,
gamma=1, delta=0., spherical=False,
sigma=1, sigma_trainable=False,
temperature=0.1, temperature_trainable=True,
l2_norm_reg=None, l2_norm_reg_en=None, z_norm_reg=0.01,
initial_dist='gaussian', replay=True, replay_ratio=0.95, buffer_size=10000,
deterministic=True, mh=True, mh_z=False, reject_boundary=True, reject_boundary_z=True)
nae.load_state_dict(torch.load('nae_3_8gaussians.pth'))
# nae.to(device);
E = nae.energy_T(grid_gpu).detach().cpu().reshape(100, 100)
# E = nae.energy(grid_gpu).detach().cpu().reshape(100, 100)
Omega = ((8 / 100 * 8 / 100) * np.exp(-E)).sum()
p_nae_3 = np.exp(-E.T)/Omega
# # BNAF
#
# * 저장했던 파일을 불러옴
# +
# bnaf = BNAF(1,3, 2, 50)
# bnaf.load_state_dict(torch.load('bnaf_8gaussians.pth'))
# bnaf.to(device)
# +
# model = bnaf
# gg = model.log_likelihood(grid.to(device)).detach().cpu()
# prd = gg.reshape(100, 100)
# bnaf_pred = prd
# +
# plt.imshow(np.exp(bnaf_pred), origin='upper', extent=(-4, 4, -4, 4))
# plt.colorbar()
# # plt.xticks(np.linspace(-4,4,100));
# -
# # Gaussian
from torch.distributions import Normal
g1 = Normal(torch.tensor([4/np.sqrt(2),0]), torch.tensor([0.5/np.sqrt(2), 0.5/np.sqrt(2)]))
g2 = Normal(torch.tensor([-4/np.sqrt(2),0]), torch.tensor([0.5/np.sqrt(2), 0.5/np.sqrt(2)]))
g3 = Normal(torch.tensor([0,4/np.sqrt(2)]), torch.tensor([0.5/np.sqrt(2), 0.5/np.sqrt(2)]))
g4 = Normal(torch.tensor([0,-4/np.sqrt(2)]), torch.tensor([0.5/np.sqrt(2), 0.5/np.sqrt(2)]))
g5 = Normal(torch.tensor([2,2]), torch.tensor([0.5/np.sqrt(2), 0.5/np.sqrt(2)]))
g6 = Normal(torch.tensor([-2,2]), torch.tensor([0.5/np.sqrt(2), 0.5/np.sqrt(2)]))
g7 = Normal(torch.tensor([2,-2]), torch.tensor([0.5/np.sqrt(2), 0.5/np.sqrt(2)]))
g8 = Normal(torch.tensor([-2,-2]), torch.tensor([0.5/np.sqrt(2), 0.5/np.sqrt(2)]))
p1 = torch.exp(g1.log_prob(grid).sum(dim=1))
p2 = torch.exp(g2.log_prob(grid).sum(dim=1))
p3 = torch.exp(g3.log_prob(grid).sum(dim=1))
p4 = torch.exp(g4.log_prob(grid).sum(dim=1))
p5 = torch.exp(g5.log_prob(grid).sum(dim=1))
p6 = torch.exp(g6.log_prob(grid).sum(dim=1))
p7 = torch.exp(g7.log_prob(grid).sum(dim=1))
p8 = torch.exp(g8.log_prob(grid).sum(dim=1))
p_8gaussian = (p1 + p2 + p3 + p4 + p5+ p6+ p7 + p8) / 8
plt.imshow(p_8gaussian.reshape(100,100))
# # Figure drawing
from mpl_toolkits.axes_grid1 import ImageGrid
p_vae.max()
p_vae_3.max()
p_nae_3.max()
p_8gaussian.max()
p_ae.max()
cat_p = np.concatenate([p_ae.flatten(), p_vae.flatten(), p_nae.flatten(), p_ae_3.flatten(), p_nae_3.flatten()]) # p_vae_3.flatten(),
p_max = cat_p.max()
p_max = p_8gaussian.max()
# +
plt.rcParams.update({'font.size': 25})
fig = plt.figure(constrained_layout=True, figsize=(16,8))
spec = fig.add_gridspec(ncols=4, nrows=2,)
ax = fig.add_subplot(spec[:,0])
ax.imshow(p_8gaussian.reshape(100,100), origin='lower', extent=(-4, 4, -4, 4), cmap='Reds',)
ax.set_xticks([]); ax.set_yticks([])
ax.set_title('Data Distribution')
plt.colorbar(im, ax=ax, shrink=0.47)
ax = fig.add_subplot(spec[0,1])
ax.imshow(p_ae, origin='lower', extent=(-4, 4, -4, 4), cmap='Reds', vmin=0, vmax=p_max)
ax.set_title('AE ($D_Z$=1)')
ax.set_xticks([]); ax.set_yticks([])
ax = fig.add_subplot(spec[0,2])
ax.imshow(p_vae, origin='lower', extent=(-4, 4, -4, 4), cmap='Reds', vmin=0, vmax=p_max)
ax.set_title('VAE ($D_Z$=1)')
ax.set_xticks([]); ax.set_yticks([])
ax = fig.add_subplot(spec[0,3])
im = ax.imshow(p_nae, origin='lower', extent=(-4, 4, -4, 4), cmap='Reds', vmin=0, vmax=p_max)
ax.set_title('NAE ($D_Z$=1)')
ax.set_xticks([]); ax.set_yticks([])
ax = fig.add_subplot(spec[1,1])
ax.imshow(p_ae_3, origin='lower', extent=(-4, 4, -4, 4), cmap='Reds', vmin=0, vmax=p_max)
ax.set_title('AE ($D_Z$=3)')
ax.set_xticks([]); ax.set_yticks([])
ax = fig.add_subplot(spec[1,2])
ax.imshow(p_vae_3, origin='lower', extent=(-4, 4, -4, 4), cmap='Reds', vmin=0, vmax=p_max)
ax.set_title('VAE ($D_Z$=3)')
ax.set_xticks([]); ax.set_yticks([])
ax = fig.add_subplot(spec[1,3])
ax.imshow(p_nae_3, origin='lower', extent=(-4, 4, -4, 4), cmap='Reds', vmin=0, vmax=p_max)
ax.set_title('NAE ($D_Z$=3)')
ax.set_xticks([]); ax.set_yticks([])
plt.savefig('fig_2d_density_estimation.pdf', bbox_inches='tight')
|
2d_density_estimation/draw_2d_density_estimation_figure.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# + [markdown] id="aNvfu_46jZtz"
# # Objects and Classes
#
# ---
#
# ## The baisc idea is to capture the atributes of an object (plane, matrix, pet, ...) in an abstract description, along with the methods to interact with such objects.
#
# >> ## This abstract description is what we call a class
#
#
#
# ---
#
# ## Specific instances of a class are captured as objects.
#
# >> convention is tht class names are specificed with capital letters
# + id="-swJxl3CjXmd"
class Complex:
def __init__(self, realpart, imagpart):
self.r = realpart
self.i = imagpart
x = Complex(3.0, -4.5)
# + colab={"base_uri": "https://localhost:8080/", "height": 34} id="hzFcRc9Ml3-6" outputId="78acda33-265a-4c1d-c89d-3583c6b50940"
x.r, x.i
# + [markdown] id="6oJtkr_PmQ_L"
# Try to write a class that takes in a point as an object. three-space
# + id="HvNM7yDel6Vh"
class Point3D:
def __init__(self, x, y, z):
"""Initialize a point in a three dimensional plane of real values"""
self.x = x
self.y = y
self.z = z
def distance(self, point):
"""Compute Distance to Another Point"""
d = (
(self.x - point.x) ** 2 + (self.y - point.y) ** 2 + (self.z - point.z) ** 2
) ** 0.5
return d
def shiftedPoint(self, shx, shy, shz):
"""shift point by specified offset"""
newx = self.x + shx
newy = self.y + shy
newz = self.x + shz
return Point3D(newx, newy, newz)
# + colab={"base_uri": "https://localhost:8080/", "height": 34} id="rHoPvH4CoSZF" outputId="267ee5a2-fd73-4476-997d-5ba0d829a7a3"
p = Point3D(0,0,1)
q = Point3D(0,0,2)
p.distance(q)
# + id="U8fKRZ33rT4c"
# + colab={"base_uri": "https://localhost:8080/", "height": 34} id="Up8UBIiroW17" outputId="e836b512-69f2-4e52-e9ee-7e46cc854e5f"
q = p.shiftedPoint(42,0,5)
q.x
# + id="fUP2VW8ZszZH"
def Euclidean_GCD(a, b):
while b != 0:
t = b
b = a % b
a = t
return a
class Rational:
def __init__(self, n, d):
"""construct a rational number in the lowest term"""
if d == 0:
raise ZeroDivisionError("Denominator of rational may not be zero.")
else:
g = Euclidean_GCD(n, d)
self.n = n / g
self.d = d / g
def __add__(self, other):
"""add two rational numbers"""
return Rational(self.n * other.d + other.n * self.d, self.d * other.d)
def __sub__(self, other):
"""subtract two rational numbers"""
return Rational(self.n * other.d - other.n * self.d, self.d * other.d)
def __mul__(self, other):
"""multiply two rational numbers"""
return Rational(self.n * other.n, self.d * other.d)
def __div__(self, other):
"""divide two rational numbers"""
return Rational(self.n * other.d, self.d * other.n)
def __eq__(self, other):
"""check if two rational numbers are equivalent"""
if self.n * other.d == other.n * self.d:
return True
else:
return False
def __str__(self):
"""convert fraction to string"""
return str(self.n) + "/" + str(self.d)
def __repr__(self):
"""returns a valid python description of a fraction"""
return "Rational(" + str(int(self.n)) + "," + str(int(self.d)) + ")"
def __le__(self):
"""<= for fractions"""
self_float = self.n / self.d
other_float = other.n / other.d
if self.n * other.d <= other.n * self.d:
return True
else:
return False
# + id="GADB13QifvPs"
peter=Rational(1,2)
# + colab={"base_uri": "https://localhost:8080/", "height": 34} id="M06aTJXTh136" outputId="4dd7ee7f-ffdb-400f-a7bd-54975c7ff741"
print(peter)
# + colab={"base_uri": "https://localhost:8080/", "height": 34} id="yCJnemb6iPPm" outputId="c51d6982-89c0-40be-f778-6a2b03ed3805"
petra = Rational(1,2)
peter = Rational(2,4)
alice = Rational(3,5)
petra == peter
# + colab={"base_uri": "https://localhost:8080/", "height": 34} id="4m7tf8_3lIZM" outputId="81a4fcc8-0e5c-40c1-fb6e-88533ac03ba8"
petra == alice
# + colab={"base_uri": "https://localhost:8080/", "height": 34} id="qNMj6KVslMLE" outputId="a1f8ec2c-b6ee-4f60-a5b2-95bf9a07c65d"
alice + petra == alice + peter
# + colab={"base_uri": "https://localhost:8080/", "height": 34} id="Q7pYBKcDnPLM" outputId="11a48b1b-569c-4bae-a840-142c81fd73f5"
petra - alice == alice - peter
# + id="4PEt1PC9t1a8"
# + [markdown] id="oRyYuSJPx9am"
# # Iterators in Python
#
# ---
#
# ## To iterate over an an object in Python wiht a for-loop, the following steps are performed:
#
#
# >>**1. Derive an assoicated iterator by applying iter() to the object**
#
# >> **2. The next function is applied to the iterator until a stop iteration exception occurs**
#
#
# + colab={"base_uri": "https://localhost:8080/", "height": 34} id="JDFH1xzhypWa" outputId="31c3385f-7482-465e-94bf-e73a7fc70f1f"
a = 'Hey there'
aa = iter(a)
aa
# + colab={"base_uri": "https://localhost:8080/", "height": 34} id="XM4e9n-VzAf9" outputId="cc55394b-c8ab-448d-933e-5d0df1a2d174"
type(a)
# + colab={"base_uri": "https://localhost:8080/", "height": 34} id="C9piyVGwzB28" outputId="c4e9bcfd-b47f-4973-9c38-a3e6f1e19cda"
next(aa)
# + colab={"base_uri": "https://localhost:8080/", "height": 34} id="ES8vtFxnzE0j" outputId="485fb9f9-e67b-4721-8cb7-9f5d3bb5557f"
next(aa)
# + colab={"base_uri": "https://localhost:8080/", "height": 34} id="rXOcG6MLzGfT" outputId="72a00d61-7f7c-4481-c4cc-9f682b4ace28"
next(aa)
# + colab={"base_uri": "https://localhost:8080/", "height": 34} id="KC5OlAFNzHBD" outputId="1a3cfeab-6305-4b28-d9d7-8b52454dc2b8"
next(aa)
# + colab={"base_uri": "https://localhost:8080/", "height": 34} id="Y-ChdUZVzHWb" outputId="8a45d7c7-0fa7-45c2-d530-873f256fe87d"
next(aa)
# + colab={"base_uri": "https://localhost:8080/", "height": 34} id="_agmUOZ8zH1R" outputId="f6a096e1-e0aa-481b-d22b-1ecdfa16cde0"
next(aa)
# + colab={"base_uri": "https://localhost:8080/", "height": 34} id="Pgde9yXczIOr" outputId="4a057ca2-40fd-406e-95f7-edee864f89bd"
next(aa)
# + colab={"base_uri": "https://localhost:8080/", "height": 34} id="l34nmk3xzIjD" outputId="5e0ac5b0-015a-43d2-a8b4-2d81714791ab"
next(aa)
# + colab={"base_uri": "https://localhost:8080/", "height": 34} id="c5Ts0DclzJ2T" outputId="3053efd1-b8dd-4686-ae87-caedd905951f"
next(aa)
# + colab={"base_uri": "https://localhost:8080/", "height": 164} id="jH_kl35xzL1S" outputId="fa1bed32-f4aa-4540-d339-25ed70367a84"
next(aa)
# + colab={"base_uri": "https://localhost:8080/", "height": 164} id="FdU5afXIzlas" outputId="88a46231-1adc-4b84-e49c-830bfdd10363"
next(aa)
# + id="JU1bC5M_z0KV"
class SmallMatrix:
def __init__(self, m11, m12, m21, m22):
self.row1 = (
m11,
m12,
)
self.row2 = (
m21,
m22,
)
def __str__(self):
"""convert fraction to string"""
row1_string = str(self.row1[0]) + " " + str(self.row1[1])
row2_string = str(self.row2[0]) + " " + str(self.row1[1])
return row1_string + "\n" + row2_string
def __iter__(self):
self._counter = 0 # common conventon in python code. A single underscore means for private use only
return self
def __next__(self):
if self._counter == 0:
self_counter += 1
return self.row1[0]
if self._counter == 1:
self_counter += 1
return self.row1[1]
if self._counter == 2:
self_counter += 1
return self.row2[0]
if self._counter == 3:
self_counter += 1
return self.row2[1]
raise StopIteration
# + colab={"base_uri": "https://localhost:8080/", "height": 181} id="wFUhRbMN1lf1" outputId="e38a2a12-2ee1-4040-8b83-e472c85d19cb"
a = SmallMatrix(42, 0, 9, 18)
for i in a.row1:
print(i)
# + [markdown] id="R1xOPfjH8m0n"
# # Generators in Python
#
# ---
#
# ## Often, we can work with a generator which saves us from implementing __next__ and __iter__. Generators look just like functions, but instead of "return" they use yeild. When a generator is called repeatedly. It continues after the yeild statement, maintaining all values from the prior call.
#
#
# + id="OFdf8y7_75Jm"
def squares():
a = 0
while True:
yield a * a
a+=1
# + id="uUeMYkYy-coM"
g = squares()
# + colab={"base_uri": "https://localhost:8080/", "height": 34} id="w3N9xRAi-iSu" outputId="e1470a63-69de-4466-8c71-82f6902bb585"
next(g)
# + colab={"base_uri": "https://localhost:8080/", "height": 34} id="f5t5YeV8-j5W" outputId="5064e38d-471c-4b10-b857-fa7a27188182"
next(g)
# + colab={"base_uri": "https://localhost:8080/", "height": 34} id="e-Pa3AP7-lds" outputId="65a23cd7-f865-4a1a-ff9a-2bbd7cb1c34c"
next(g)
# + colab={"base_uri": "https://localhost:8080/", "height": 34} id="pF4vyF5N-l9E" outputId="f84a7c48-0ea6-4d3a-da08-9e793100db87"
next(g)
# + colab={"base_uri": "https://localhost:8080/", "height": 34} id="gdYaP9QH-mZE" outputId="06be9ec1-a846-4653-de8b-4bd145ce7c45"
next(g)
# + colab={"base_uri": "https://localhost:8080/", "height": 867} id="Oj195sti-m_V" outputId="72e0d024-4109-46cd-f6bd-09a0eb334734"
[next(g) for i in range(50)]
# + id="BrdI8YFL-tRv"
def is_prime(m):
"""return True if and only if n is a prime number"""
n = abs(m)
if n == 0 or n == 1 or (n % 2 == 0 and n > 2):
return False
for i in range(3, int(n ** (1 / 2) + 1), 2):
if n % i == 0:
return False
return True
# + id="bjyL--1y_iZz"
def Endless_Primes():
yield 2
n += 3
while True:
if isprime(n):
yield n
n += 12
# + id="GtexHZP3CSnh"
def twinprimes(b):
a = 3
while True:
if is_prime(a) == True and is_prime(b) == True:
yield a + b
a += 2
# + colab={"base_uri": "https://localhost:8080/", "height": 357} id="oUE2GJSrDEoQ" outputId="2c7e39b0-f833-48ed-cc83-a893105df365"
[next(g) for i in range (20)]
# + id="MA6zUE92DJB7"
k = twinprimes(3)
x=3
# + colab={"base_uri": "https://localhost:8080/", "height": 164} id="L0fgQgxlDMxd" outputId="f974bf3d-71da-4213-ab59-ca8bdd82604c"
[next(k) for i in range (x)] ### this runs super long for x>3
# + id="5BaDB8Nzb7_M"
|
Python/1. Python Basics/Notebooks/3. Classes and OOP/Objects_Classes_(some more primes fun).ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: python38
# language: python
# name: python38
# ---
# +
#default_exp sentryUtil
# -
# # sentryUtil
#hide
import sentry_sdk
sentry_sdk.init(
"https://0bab4064545e4a95b35d4e73f20f7632@o839457.ingest.sentry.io/5894971",
# Set traces_sample_rate to 1.0 to capture 100%
# of transactions for performance monitoring.
# We recommend adjusting this value in production.
traces_sample_rate=1.0
)
#export
from sentry_sdk import add_breadcrumb, capture_exception, capture_message
from nicHelper.exception import traceback
from typing import Any
from copy import deepcopy
#export
def logSentry(message:str, data:Any = (lambda :{})(), level:str = 'info', section:str='main'):
'''
just add docs for ease of logging to sentry
Input:
message ::str:: required :: message to send to sentry
data ::dict:: optional :: and object to send to sentry (default is an empty dict)
level ::str::optional:: log level (default:info)
section ::str::optional:: section name or function name (default: main)
Response:
Bool:: true means logged properly, false for error, print error message to console
'''
try:
add_breadcrumb(
category=section,
data={'data':deepcopy(data)},
level=level,
message=message
)
return True
except Exception as e:
print(message, data, level, section)
print(f'error is {e}, {traceback()}')
return False
logSentry('hello', {'hello':'this is a test'}, level='error', section = 'test')
capture_message('testing')
# ## full example
from nicHelper.sentryUtil import logSentry
logSentry('this is a test', {'testobject': 'testvalue'})
|
nbs/sentryUtil.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# %matplotlib inline
#
# # Decoding source space data
#
#
# Decoding, a.k.a MVPA or supervised machine learning applied to MEG
# data in source space on the left cortical surface. Here f-test feature
# selection is employed to confine the classification to the potentially
# relevant features. The classifier then is trained to selected features of
# epochs in source space.
#
#
# +
# Author: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# License: BSD (3-clause)
import mne
import os
import numpy as np
from mne import io
from mne.datasets import sample
from mne.minimum_norm import apply_inverse_epochs, read_inverse_operator
print(__doc__)
data_path = sample.data_path()
fname_fwd = data_path + 'MEG/sample/sample_audvis-meg-oct-6-fwd.fif'
fname_evoked = data_path + '/MEG/sample/sample_audvis-ave.fif'
subjects_dir = data_path + '/subjects'
subject = os.environ['SUBJECT'] = subjects_dir + '/sample'
os.environ['SUBJECTS_DIR'] = subjects_dir
# -
# Set parameters
#
#
# +
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
fname_cov = data_path + '/MEG/sample/sample_audvis-cov.fif'
label_names = 'Aud-rh', 'Vis-rh'
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
tmin, tmax = -0.2, 0.5
event_id = dict(aud_r=2, vis_r=4) # load contra-lateral conditions
# Setup for reading the raw data
raw = io.read_raw_fif(raw_fname, preload=True)
raw.filter(2, None) # replace baselining with high-pass
events = mne.read_events(event_fname)
# Set up pick list: MEG - bad channels (modify to your needs)
raw.info['bads'] += ['MEG 2443'] # mark bads
picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=True, eog=True,
exclude='bads')
# Read epochs
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
picks=picks, baseline=None, preload=True,
reject=dict(grad=4000e-13, eog=150e-6),
decim=5) # decimate to save memory and increase speed
epochs.equalize_event_counts(list(event_id.keys()))
epochs_list = [epochs[k] for k in event_id]
# Compute inverse solution
snr = 3.0
lambda2 = 1.0 / snr ** 2
method = "dSPM" # use dSPM method (could also be MNE or sLORETA)
n_times = len(epochs.times)
n_vertices = 3732
n_epochs = len(epochs.events)
# Load data and compute inverse solution and stcs for each epoch.
noise_cov = mne.read_cov(fname_cov)
inverse_operator = read_inverse_operator(fname_inv)
X = np.zeros([n_epochs, n_vertices, n_times])
# to save memory, we'll load and transform our epochs step by step.
for condition_count, ep in zip([0, n_epochs // 2], epochs_list):
stcs = apply_inverse_epochs(ep, inverse_operator, lambda2,
method, pick_ori="normal", # saves us memory
return_generator=True)
for jj, stc in enumerate(stcs):
X[condition_count + jj] = stc.lh_data
# -
# Decoding in sensor space using a linear SVM
#
#
# +
# Make arrays X and y such that :
# X is 3d with X.shape[0] is the total number of epochs to classify
# y is filled with integers coding for the class to predict
# We must have X.shape[0] equal to y.shape[0]
# we know the first half belongs to the first class, the second one
y = np.repeat([0, 1], len(X) / 2) # belongs to the second class
X = X.reshape(n_epochs, n_vertices * n_times)
# we have to normalize the data before supplying them to our classifier
X -= X.mean(axis=0)
X /= X.std(axis=0)
# prepare classifier
from sklearn.svm import SVC # noqa
from sklearn.cross_validation import ShuffleSplit # noqa
# Define a monte-carlo cross-validation generator (reduce variance):
n_splits = 10
clf = SVC(C=1, kernel='linear')
cv = ShuffleSplit(len(X), n_splits, test_size=0.2)
# setup feature selection and classification pipeline
from sklearn.feature_selection import SelectKBest, f_classif # noqa
from sklearn.pipeline import Pipeline # noqa
# we will use an ANOVA f-test to preselect relevant spatio-temporal units
feature_selection = SelectKBest(f_classif, k=500) # take the best 500
# to make life easier we will create a pipeline object
anova_svc = Pipeline([('anova', feature_selection), ('svc', clf)])
# initialize score and feature weights result arrays
scores = np.zeros(n_splits)
feature_weights = np.zeros([n_vertices, n_times])
# hold on, this may take a moment
for ii, (train, test) in enumerate(cv):
anova_svc.fit(X[train], y[train])
y_pred = anova_svc.predict(X[test])
y_test = y[test]
scores[ii] = np.sum(y_pred == y_test) / float(len(y_test))
feature_weights += feature_selection.inverse_transform(clf.coef_) \
.reshape(n_vertices, n_times)
print('Average prediction accuracy: %0.3f | standard deviation: %0.3f'
% (scores.mean(), scores.std()))
# prepare feature weights for visualization
feature_weights /= (ii + 1) # create average weights
# create mask to avoid division error
feature_weights = np.ma.masked_array(feature_weights, feature_weights == 0)
# normalize scores for visualization purposes
feature_weights /= feature_weights.std(axis=1)[:, None]
feature_weights -= feature_weights.mean(axis=1)[:, None]
# unmask, take absolute values, emulate f-value scale
feature_weights = np.abs(feature_weights.data) * 10
vertices = [stc.lh_vertno, np.array([], int)] # empty array for right hemi
stc_feat = mne.SourceEstimate(feature_weights, vertices=vertices,
tmin=stc.tmin, tstep=stc.tstep,
subject='sample')
brain = stc_feat.plot(views=['lat'], transparent=True,
initial_time=0.1, time_unit='s')
|
0.14/_downloads/plot_decoding_spatio_temporal_source.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # CH. 7 - TOPIC MODELS
# ## Exercises
# #### Exercise 7.01: Load Libraries
# +
# not necessary
# put this in to block deprecation warnings
# from pyLDAvis
import warnings
warnings.filterwarnings('ignore')
# -
import langdetect # language detection
import matplotlib.pyplot # plotting
import nltk # natural language processing
import numpy # arrays and matrices
import pandas # dataframes
import pyLDAvis # plotting
import pyLDAvis.sklearn # plotting
import regex # regular expressions
import sklearn # machine learning
# +
# installing specific word dictionarys
# used for stopword removal and lemmatization
nltk.download('wordnet')
nltk.download('stopwords')
# -
# %matplotlib inline
# #### Exercise 7.02: Load and Examine Data
# +
# define path and load data
path = "News_Final.csv"
df = pandas.read_csv(path, header=0)
# +
# define quick look function for data frame
def dataframe_quick_look(df, nrows):
print("SHAPE:\n{shape}\n".format(shape=df.shape))
print("COLUMN NAMES:\n{names}\n".format(names=df.columns))
print("HEAD:\n{head}\n".format(head=df.head(nrows)))
# -
dataframe_quick_look(df, nrows=2)
# +
# data set came with prespecified topics
# print the topics out with counts
print("TOPICS:\n{topics}\n".format(topics=df["Topic"].value_counts()))
# +
# lets look at the final data we are going to move forward with
raw = df["Headline"].tolist()
print("HEADLINES:\n{lines}\n".format(lines=raw[:5]))
print("LENGTH:\n{length}\n".format(length=len(raw)))
# -
# #### Exercise 7.03: Step-by-step Data Cleaning
# +
# select one headline to use as an example
example = raw[5]
print(example)
# +
# check language of headline
# filter to english only
def do_language_identifying(txt):
try: the_language = langdetect.detect(txt)
except: the_language = 'none'
return the_language
# -
print("DETECTED LANGUAGE:\n{lang}\n".format(lang=do_language_identifying(example)))
# +
# tokenize the data
example = example.split(" ")
print(example)
# +
# find and replace website addresses
example = [
'URL' if bool(regex.search("http[s]?://", i))
else i for i in example
]
print(example)
# +
# remove punctuation
example = [regex.sub("[^\\w\\s]|\n", "", i) for i in example]
print(example)
# +
# remove numbers
example = [regex.sub("^[0-9]*$", "", i) for i in example]
print(example)
# +
# make everything lowercase
example = [i.lower() if i not in ["URL"] else i for i in example]
print(example)
# +
# remove the url placeholder
example = [i for i in example if i not in ["URL",""]]
print(example)
# +
# remove stopwords
# uses stopwords dictionary previously loaded
list_stop_words = nltk.corpus.stopwords.words("English")
list_stop_words = [regex.sub("[^\\w\\s]", "", i) for i in list_stop_words]
print(list_stop_words)
# -
example = [i for i in example if i not in list_stop_words]
print(example)
# +
# perform lemmatization
# uses wordnet dictionary previously loaded
def do_lemmatizing(wrd):
out = nltk.corpus.wordnet.morphy(wrd)
return (wrd if out is None else out)
# -
example = [do_lemmatizing(i) for i in example]
print(example)
# +
# remove words less than 5 characters long
example = [i for i in example if len(i) >= 5]
print(example)
# -
# #### Exercise 7.04: Complete Data Cleaning
# +
# define consolidated data cleaning function
def do_headline_cleaning(txt):
# identify language of tweet
# return null if language not english
lg = do_language_identifying(txt)
if lg != 'en':
return None
# split the string on whitespace
out = txt.split(" ")
# identify urls
# replace with URL
out = ['URL' if bool(regex.search("http[s]?://", i)) else i for i in out]
# remove all punctuation
out = [regex.sub("[^\\w\\s]|\n", "", i) for i in out]
# remove all numerics
out = [regex.sub("^[0-9]*$", "", i) for i in out]
# make all non-keywords lowercase
out = [i.lower() if i not in ["URL"] else i for i in out]
# remove URL
out = [i for i in out if i not in ["URL",""]]
# remove stopwords
list_stop_words = nltk.corpus.stopwords.words("english")
list_stop_words = [regex.sub("[^\\w\\s]", "", i) for i in list_stop_words]
out = [i for i in out if i not in list_stop_words]
# lemmatizing
out = [do_lemmatizing(i) for i in out]
# keep words 5 or more characters long
out = [i for i in out if len(i) >= 5]
return out
from time import time
# -
# execute function
# takes several minutes
tick = time()
clean = list(map(do_headline_cleaning, raw))
print(time()-tick)
# +
# remove none types
clean = list(filter(None.__ne__, clean))
print("HEADLINES:\n{lines}\n".format(lines=clean[:5]))
print("LENGTH:\n{length}\n".format(length=len(clean)))
# +
# turn tokens back into string
# concatenate with white spaces
clean_sentences = [" ".join(i) for i in clean]
# -
print(clean_sentences[0:10])
# #### Exercise 7.05: Count Vectorizer
# +
# define some global variables
number_words = 10
number_docs = 10
number_features = 1000
# +
# execute bag of words model
# use raw term counts for lda
# as it is a probabilistic graphical model
vectorizer1 = sklearn.feature_extraction.text.CountVectorizer(
analyzer="word",
max_df=0.5,
min_df=20,
max_features=number_features
)
clean_vec1 = vectorizer1.fit_transform(clean_sentences)
print(clean_vec1[0])
feature_names_vec1 = vectorizer1.get_feature_names()
# -
# #### Exercise 7.06: Select Number of Topics
# +
# define function to calculate perplexity for LDA model trained on ntopics
def perplexity_by_ntopic(data, ntopics):
output_dict = {
"Number Of Topics": [],
"Perplexity Score": []
}
for t in ntopics:
lda = sklearn.decomposition.LatentDirichletAllocation(
n_components=t,
learning_method="online",
random_state=0
)
lda.fit(data)
output_dict["Number Of Topics"].append(t)
output_dict["Perplexity Score"].append(lda.perplexity(data))
output_df = pandas.DataFrame(output_dict)
index_min_perplexity = output_df["Perplexity Score"].idxmin()
output_num_topics = output_df.loc[
index_min_perplexity, # index
"Number Of Topics" # column
]
return (output_df, output_num_topics)
# +
# find perplexity scores for several different numbers of topics
# takes several minutes
df_perplexity, optimal_num_topics = perplexity_by_ntopic(
clean_vec1,
ntopics=[1, 2, 3, 4, 6, 8, 10]
)
# -
print(df_perplexity)
df_perplexity.plot.line("Number Of Topics", "Perplexity Score")
# #### Exercise 7.07: Latent Dirichlet Allocation
# +
# define and fit LDA model
lda = sklearn.decomposition.LatentDirichletAllocation(
n_components=optimal_num_topics,
learning_method="online",
random_state=0
)
lda.fit(clean_vec1)
# +
# output matrix
# h: docs to topics
lda_transform = lda.transform(clean_vec1)
print(lda_transform.shape)
print(lda_transform)
# +
# output matrix
# w: words to topics
lda_components = lda.components_
print(lda_components.shape)
print(lda_components)
# +
# define function to reformat output matrices into easily readable tables
def get_topics(mod, vec, names, docs, ndocs, nwords):
# word to topic matrix
W = mod.components_
W_norm = W / W.sum(axis=1)[:, numpy.newaxis]
# topic to document matrix
H = mod.transform(vec)
W_dict = {}
H_dict = {}
for tpc_idx, tpc_val in enumerate(W_norm):
topic = "Topic{}".format(tpc_idx)
# formatting w
W_indices = tpc_val.argsort()[::-1][:nwords]
W_names_values = [
(round(tpc_val[j], 4), names[j])
for j in W_indices
]
W_dict[topic] = W_names_values
# formatting h
H_indices = H[:, tpc_idx].argsort()[::-1][:ndocs]
H_names_values = [
(round(H[:, tpc_idx][j], 4), docs[j])
for j in H_indices
]
H_dict[topic] = H_names_values
W_df = pandas.DataFrame(
W_dict,
index=["Word" + str(i) for i in range(nwords)]
)
H_df = pandas.DataFrame(
H_dict,
index=["Doc" + str(i) for i in range(ndocs)]
)
return (W_df, H_df)
# +
# run function
W_df, H_df = get_topics(
mod=lda,
vec=clean_vec1,
names=feature_names_vec1,
docs=raw,
ndocs=number_docs,
nwords=number_words
)
# +
# word-topic table
print(W_df)
# +
# document-topic table
print(H_df)
# -
# #### Exercise 7.08: Visualizing LDA
# iterative visualization
# featuring pca biplot and histogram
lda_plot = pyLDAvis.sklearn.prepare(lda, clean_vec1, vectorizer1, R=10)
pyLDAvis.display(lda_plot)
# +
# define functiont to fit and plot t-SNE model
def plot_tsne(data, threshold):
# filter data according to threshold
index_meet_threshold = numpy.amax(data, axis=1) >= threshold
lda_transform_filt = data[index_meet_threshold]
# fit tsne model
# x-d -> 2-d, x = number of topics
tsne = sklearn.manifold.TSNE(
n_components=2,
verbose=0,
random_state=0,
angle=0.5,
init='pca'
)
tsne_fit = tsne.fit_transform(lda_transform_filt)
# most probable topic for each headline
most_prob_topic = []
for i in range(tsne_fit.shape[0]):
most_prob_topic.append(lda_transform_filt[i].argmax())
print("LENGTH:\n{}\n".format(len(most_prob_topic)))
unique, counts = numpy.unique(
numpy.array(most_prob_topic),
return_counts=True
)
print("COUNTS:\n{}\n".format(numpy.asarray((unique, counts)).T))
# make plot
color_list = ['b', 'g', 'r', 'c', 'm', 'y', 'k']
for i in list(set(most_prob_topic)):
indices = [idx for idx, val in enumerate(most_prob_topic) if val == i]
matplotlib.pyplot.scatter(
x=tsne_fit[indices, 0],
y=tsne_fit[indices, 1],
s=0.5,
c=color_list[i],
label='Topic' + str(i),
alpha=0.25
)
matplotlib.pyplot.xlabel('x-tsne')
matplotlib.pyplot.ylabel('y-tsne')
matplotlib.pyplot.legend(markerscale=10)
# -
plot_tsne(data=lda_transform, threshold=0.75)
# #### Exercise 7.09: Trying 4 Topics
# +
# rerun LDA model using number of topics equal to 4
lda4 = sklearn.decomposition.LatentDirichletAllocation(
n_components=4, # number of topics data suggests
learning_method="online",
random_state=0
)
lda4.fit(clean_vec1)
# +
# run function to output raw matrices in nice table format
W_df4, H_df4 = get_topics(
mod=lda4,
vec=clean_vec1,
names=feature_names_vec1,
docs=raw,
ndocs=number_docs,
nwords=number_words
)
# +
# word-topic table
print(W_df4)
# +
# document-topic table
print(H_df4)
# +
# iteractive visualization
lda4_plot = pyLDAvis.sklearn.prepare(lda4, clean_vec1, vectorizer1, R=10)
pyLDAvis.display(lda4_plot)
# -
# #### Exercise 7.10: TF-IDF Vectorizer
# +
# convert to bag of words model
# use tf-idf method this time
vectorizer2 = sklearn.feature_extraction.text.TfidfVectorizer(
analyzer="word",
max_df=0.5,
min_df=20,
max_features=number_features,
smooth_idf=False
)
clean_vec2 = vectorizer2.fit_transform(clean_sentences)
print(clean_vec2[0])
# -
feature_names_vec2 = vectorizer2.get_feature_names()
feature_names_vec2
# #### Exercise 7.11: Non-negative Matrix Factorization
# +
# define and fit nmf model
nmf = sklearn.decomposition.NMF(
n_components=4,
init="nndsvda",
solver="mu",
beta_loss="frobenius",
random_state=0,
alpha=0.1,
l1_ratio=0.5
)
nmf.fit(clean_vec2)
# +
# run function to produce nice output tables
W_df, H_df = get_topics(
mod=nmf,
vec=clean_vec2,
names=feature_names_vec2,
docs=raw,
ndocs=number_docs,
nwords=number_words
)
# +
# word-topic table
print(W_df)
# +
# document-topic table
print(H_df)
# -
# #### Exercise 7.12: Visualizing NMF
# +
# output raw document-topic matrix for t-SNE plot
nmf_transform = nmf.transform(clean_vec2)
print(nmf_transform.shape)
print(nmf_transform)
# +
# run function to produce t-SNE plot
plot_tsne(data=nmf_transform, threshold=0)
|
Exercise01-Exercise12/Exercise01-Exercise12.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:cse6363] *
# language: python
# name: conda-env-cse6363-py
# ---
# +
from sklearn.datasets import load_iris
from sklearn.model_selection import cross_val_score, train_test_split
from sklearn.tree import DecisionTreeClassifier
from dtreeviz.trees import dtreeviz
# %matplotlib inline
# -
iris = load_iris()
x_train, x_test, y_train, y_test = train_test_split(iris.data, iris.target, test_size=0.10, random_state=42)
model = DecisionTreeClassifier(max_depth=3, min_samples_leaf=10)
# model = DecisionTreeClassifier()
model.fit(x_train, y_train)
model.score(x_train, y_train)
model.score(x_test, y_test)
# model.decision_path(x_test)
tree_vis = dtreeviz(model, x_train, y_train, target_name="target", feature_names=iris.feature_names, class_names=list(iris.target_names))
tree_vis
print(iris.feature_names, x_test[0])
|
decision_trees/iris_tree.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + id="zYyNboLVncDg" executionInfo={"status": "ok", "timestamp": 1604518193419, "user_tz": 180, "elapsed": 2599, "user": {"displayName": "<NAME>\u<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjfL0Egrvj7vCUWsAEEiusTURo2hzbS9ee-olk=s64", "userId": "17023645381607303930"}}
from tensorflow.compat.v1 import ConfigProto
from tensorflow.compat.v1 import InteractiveSession
config = ConfigProto()
config.gpu_options.allow_growth = True
session = InteractiveSession(config=config)
# + id="zh-guHjToTpx" executionInfo={"status": "ok", "timestamp": 1604518197072, "user_tz": 180, "elapsed": 6246, "user": {"displayName": "<NAME>\u0<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjfL0Egrvj7vCUWsAEEiusTURo2hzbS9ee-olk=s64", "userId": "17023645381607303930"}} outputId="98abb70e-4ff2-43ee-a1af-fabc1c8bb2bb" colab={"base_uri": "https://localhost:8080/"}
# !pip install pandas_summary
# + id="XxTrvlz5ncDk" executionInfo={"status": "ok", "timestamp": 1604518197073, "user_tz": 180, "elapsed": 6241, "user": {"displayName": "<NAME>\u00e1<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjfL0Egrvj7vCUWsAEEiusTURo2hzbS9ee-olk=s64", "userId": "17023645381607303930"}}
import pandas as pd
import numpy as np
import datetime
from pandas_summary import DataFrameSummary
# + id="LqTV35YNncDm" executionInfo={"status": "ok", "timestamp": 1604518197073, "user_tz": 180, "elapsed": 6237, "user": {"displayName": "<NAME>\u00e1<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjfL0Egrvj7vCUWsAEEiusTURo2hzbS9ee-olk=s64", "userId": "17023645381607303930"}}
from tensorflow.keras.utils import to_categorical
# + id="Wcbcqdo_ncDo" executionInfo={"status": "ok", "timestamp": 1604518203065, "user_tz": 180, "elapsed": 12225, "user": {"displayName": "<NAME>\u00e1<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjfL0Egrvj7vCUWsAEEiusTURo2hzbS9ee-olk=s64", "userId": "17023645381607303930"}}
PATH = '/content/drive/My Drive/Colab Notebooks/kaggle-rossmann-master/rossmann/rossmann/'
df = pd.read_feather(PATH+'train_normalized_data.fth')
df_test = pd.read_feather(PATH+'test_normalized_data.fth')
# + id="SeEyt7FdoFIz" executionInfo={"status": "ok", "timestamp": 1604518203069, "user_tz": 180, "elapsed": 12225, "user": {"displayName": "<NAME>\u00e1<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjfL0Egrvj7vCUWsAEEiusTURo2hzbS9ee-olk=s64", "userId": "17023645381607303930"}} outputId="9a5f475a-81ec-42cc-f9dd-57ac77c1dc0a" colab={"base_uri": "https://localhost:8080/"}
from google.colab import drive
drive.mount('/content/drive')
# + id="h3Ul_1MQncDq" executionInfo={"status": "ok", "timestamp": 1604518203070, "user_tz": 180, "elapsed": 12220, "user": {"displayName": "<NAME>\u0<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjfL0Egrvj7vCUWsAEEiusTURo2hzbS9ee-olk=s64", "userId": "17023645381607303930"}}
cat_vars = ['Store', 'DayOfWeek', 'Year', 'Month', 'Day', 'StateHoliday', 'CompetitionMonthsOpen', 'Promo2Weeks',
'StoreType', 'Assortment', 'PromoInterval', 'CompetitionOpenSinceYear', 'Promo2SinceYear', 'State',
'Week', 'Events', 'Promo_fw', 'Promo_bw', 'StateHoliday_bool_fw', 'StateHoliday_bool_bw', 'SchoolHoliday_fw', 'SchoolHoliday_bw']
cat_vars = ['Store', 'DayOfWeek']
# + id="ZWD8GWbencDs" executionInfo={"status": "ok", "timestamp": 1604518203071, "user_tz": 180, "elapsed": 12217, "user": {"displayName": "<NAME>\u00e1<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjfL0Egrvj7vCUWsAEEiusTURo2hzbS9ee-olk=s64", "userId": "17023645381607303930"}}
contin_vars = ['CompetitionDistance',
'Max_TemperatureC', 'Mean_TemperatureC', 'Min_TemperatureC', 'Precipitationmm',
'Max_Humidity', 'Mean_Humidity', 'Min_Humidity', 'Max_Wind_SpeedKm_h',
'Mean_Wind_SpeedKm_h', 'CloudCover', 'trend', 'trend_DE',
'AfterStateHoliday_bool', 'BeforeStateHoliday_bool', 'Promo', 'SchoolHoliday', 'StateHoliday_bool']
contin_vars = ['BeforeStateHoliday_bool', 'Max_TemperatureC']
# + id="E0ntGPGJncDu" executionInfo={"status": "ok", "timestamp": 1604518203071, "user_tz": 180, "elapsed": 12212, "user": {"displayName": "<NAME>\u00e1<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjfL0Egrvj7vCUWsAEEiusTURo2hzbS9ee-olk=s64", "userId": "17023645381607303930"}}
uniques = DataFrameSummary(df[cat_vars]).summary().loc[['uniques']]
# + id="uY8lk3m2ncDx" executionInfo={"status": "ok", "timestamp": 1604518203072, "user_tz": 180, "elapsed": 12209, "user": {"displayName": "<NAME>\u00e1<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjfL0Egrvj7vCUWsAEEiusTURo2hzbS9ee-olk=s64", "userId": "17023645381607303930"}} outputId="ae4748f9-b333-4e6d-cd01-18ee498bcd25" colab={"base_uri": "https://localhost:8080/", "height": 106}
uniques.T
# + [markdown] id="vSGndOS8ncD0"
# # Asignación de dimensión de embeddings
# + id="vUTnG3HgncD0" executionInfo={"status": "ok", "timestamp": 1604518203072, "user_tz": 180, "elapsed": 12203, "user": {"displayName": "<NAME>\u0<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjfL0Egrvj7vCUWsAEEiusTURo2hzbS9ee-olk=s64", "userId": "17023645381607303930"}}
cat_var_dict = {'Store': 50, 'DayOfWeek': 2, 'Year': 2, 'Month': 2,
'Day': 10, 'StateHoliday': 2, 'CompetitionMonthsOpen': 2,
'Promo2Weeks': 1, 'StoreType': 2, 'Assortment': 3, 'PromoInterval': 3,
'CompetitionOpenSinceYear': 4, 'Promo2SinceYear': 4, 'State': 6,
'Week': 25, 'Events': 4, 'Promo_fw': 1,
'Promo_bw': 1, 'StateHoliday_bool_fw': 1,
'StateHoliday_bool_bw': 1, 'SchoolHoliday_fw': 1,
'SchoolHoliday_bw': 1}
# + id="6Nm7Y9OAncD2" executionInfo={"status": "ok", "timestamp": 1604518203073, "user_tz": 180, "elapsed": 12200, "user": {"displayName": "<NAME>\u00e1<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjfL0Egrvj7vCUWsAEEiusTURo2hzbS9ee-olk=s64", "userId": "17023645381607303930"}} outputId="db9ca25d-27d1-4448-a848-8f8cc2aaf56d" colab={"base_uri": "https://localhost:8080/"}
for v in cat_vars:
uniques_ = df[v].unique()
uniques_.sort()
print(v, cat_var_dict[v], len(uniques_), uniques_)
print()
# + [markdown] id="Sb8pVbiTncD7"
# # Definición de modelo
# + id="glH3W1B5ncD7" executionInfo={"status": "ok", "timestamp": 1604518203073, "user_tz": 180, "elapsed": 12194, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjfL0Egrvj7vCUWsAEEiusTURo2hzbS9ee-olk=s64", "userId": "17023645381607303930"}}
add_customers = True
log_output = False
output_activation = 'linear'
# + id="wZ8Hk04EncD-" executionInfo={"status": "ok", "timestamp": 1604518203074, "user_tz": 180, "elapsed": 12191, "user": {"displayName": "<NAME>\u<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjfL0Egrvj7vCUWsAEEiusTURo2hzbS9ee-olk=s64", "userId": "17023645381607303930"}}
from tensorflow.keras.models import Model
from tensorflow.keras.callbacks import ModelCheckpoint
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.layers import Embedding, Input, Flatten, Concatenate, Dense, BatchNormalization, Activation, LeakyReLU, Dropout
from tensorflow.keras.regularizers import l2
# + id="zJ7XAV4PncEA" executionInfo={"status": "ok", "timestamp": 1604518203074, "user_tz": 180, "elapsed": 12188, "user": {"displayName": "<NAME>\u0<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjfL0Egrvj7vCUWsAEEiusTURo2hzbS9ee-olk=s64", "userId": "17023645381607303930"}}
def get_cat_vars_model(cat_vars, uniques, cat_var_dict):
cat_vars_embed_outs = []
cat_var_inputs = []
for cat_var in cat_vars:
cat_var_in = Input(shape=(1,), name=f"{cat_var}_input")
cat_var_inputs.append(cat_var_in)
embed_out = Embedding(uniques[cat_var][0], cat_var_dict[cat_var], name=f'{cat_var}_Embed')(cat_var_in)
flatten_out = Flatten(name=f"{cat_var}_flat")(embed_out)
cat_vars_embed_outs.append(flatten_out)
return cat_var_inputs, cat_vars_embed_outs
def get_cont_vars_input(contin_vars, dense_layer=False):
cont_vars_inputs = []
cont_vars_outputs = []
for cont_var in contin_vars:
cont_var_in = Input(shape=(1,), name=f"{cont_var}_input")
cont_vars_inputs.append(cont_var_in)
if dense_layer:
cont_var_out = Dense(1, name=f"{cont_var}_input", activation = 'linear')(cont_var_in)
cont_vars_outputs.append(cont_var_out)
else:
cont_vars_outputs.append(cont_var_in)
return cont_vars_inputs, cont_vars_outputs
# + id="U1gARZzsncED" executionInfo={"status": "ok", "timestamp": 1604518203361, "user_tz": 180, "elapsed": 12471, "user": {"displayName": "<NAME>\u00e1<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjfL0Egrvj7vCUWsAEEiusTURo2hzbS9ee-olk=s64", "userId": "17023645381607303930"}}
cat_var_inputs, cat_vars_embed_outs = get_cat_vars_model(cat_vars, uniques, cat_var_dict)
cont_vars_inputs, cont_vars_outs= get_cont_vars_input(contin_vars)
# + id="h36XRuwXncEF" executionInfo={"status": "ok", "timestamp": 1604518203361, "user_tz": 180, "elapsed": 12469, "user": {"displayName": "<NAME>\u<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjfL0Egrvj7vCUWsAEEiusTURo2hzbS9ee-olk=s64", "userId": "17023645381607303930"}}
# Descomentar para ver resultados
# cat_vars_embed_outs
# cat_var_inputs
# cont_vars_inputs
# cont_vars_outs
# + id="9mM3-tizncEH" executionInfo={"status": "ok", "timestamp": 1604518203362, "user_tz": 180, "elapsed": 12467, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjfL0Egrvj7vCUWsAEEiusTURo2hzbS9ee-olk=s64", "userId": "17023645381607303930"}}
first_hidden_units = 1000
second_hidden_units = 500
l2_lambda = 1e-3
merged = Concatenate(name='All_Concatenate')(cat_vars_embed_outs + cont_vars_inputs)
x = Dense(first_hidden_units, kernel_initializer="uniform", kernel_regularizer=l2(l2_lambda))(merged)
# x = BatchNormalization()(x)
x = Activation('relu')(x)
# x = LeakyReLU()(x)
x = Dense(second_hidden_units, kernel_initializer="uniform", kernel_regularizer=l2(l2_lambda))(x)
# x = BatchNormalization()(x)
x = Activation('relu')(x)
# x = LeakyReLU()(x)
output_1 = Dense(1, name='Sales', activation=output_activation)(x)
output_2 = Dense(1, name='Customers', activation=output_activation)(x)
# + id="_gXyPjd7ncEI" executionInfo={"status": "ok", "timestamp": 1604518203362, "user_tz": 180, "elapsed": 12463, "user": {"displayName": "<NAME>\u00e1<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjfL0Egrvj7vCUWsAEEiusTURo2hzbS9ee-olk=s64", "userId": "17023645381607303930"}}
if add_customers:
model = Model(cat_var_inputs + cont_vars_inputs, [output_1, output_2])
else:
model = Model(cat_var_inputs + cont_vars_inputs, [output_1])
# + id="7ueeYKuBncEK" executionInfo={"status": "ok", "timestamp": 1604518203363, "user_tz": 180, "elapsed": 12460, "user": {"displayName": "<NAME>\u00<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjfL0Egrvj7vCUWsAEEiusTURo2hzbS9ee-olk=s64", "userId": "17023645381607303930"}} outputId="0f270221-5446-47fa-838f-febe3de61bc0" colab={"base_uri": "https://localhost:8080/"}
# Descomentar para ver
model.summary()
# + id="A5mFBasyncEM" executionInfo={"status": "ok", "timestamp": 1604518203363, "user_tz": 180, "elapsed": 12454, "user": {"displayName": "<NAME>\u0<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjfL0Egrvj7vCUWsAEEiusTURo2hzbS9ee-olk=s64", "userId": "17023645381607303930"}}
model.save_weights('initial_weights.hdf5')
# + id="rXtDKBJFncEO" executionInfo={"status": "ok", "timestamp": 1604518203693, "user_tz": 180, "elapsed": 12779, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjfL0Egrvj7vCUWsAEEiusTURo2hzbS9ee-olk=s64", "userId": "17023645381607303930"}} outputId="0c718b99-ea31-4a13-b79a-d4e34e319f91" colab={"base_uri": "https://localhost:8080/"}
df_train = df[df.Date < datetime.datetime(2015, 7, 1)]
df_val = df[df.Date >= datetime.datetime(2015, 7, 1)]
print(f'Cantidad en val: {len(df_val)}, porcentaje: {len(df_train)/(len(df_train) + len(df_val))}')
# + id="zLuYaimFncEQ" executionInfo={"status": "ok", "timestamp": 1604518203693, "user_tz": 180, "elapsed": 12757, "user": {"displayName": "<NAME>\u00e1<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjfL0Egrvj7vCUWsAEEiusTURo2hzbS9ee-olk=s64", "userId": "17023645381607303930"}}
from matplotlib import pyplot as plt
# plt.figure(figsize=(20,5))
# plt.plot((df_train[df_train['Store']==1]['Sales'].values - df_train[df_train['Store']==1]['Sales'].mean())/df_train[df_train['Store']==1]['Sales'].std())
# plt.show()
# plt.figure(figsize=(20,5))
# plt.plot(np.log(df_train[df_train['Store']==1]['Sales'].values)/np.max(np.log(df_train[df_train['Store']==1]['Sales'].values)))
# plt.show()
# + id="Wm6IVWg-ncET" executionInfo={"status": "ok", "timestamp": 1604518205870, "user_tz": 180, "elapsed": 14924, "user": {"displayName": "<NAME>\u00<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjfL0Egrvj7vCUWsAEEiusTURo2hzbS9ee-olk=s64", "userId": "17023645381607303930"}}
stores_mean = {}
for store, g_df in df_train.groupby('Store'):
stores_mean[store] = g_df[g_df['Sales'] > 0]['Sales'].mean()
# + id="POw9p6CBncEU" executionInfo={"status": "ok", "timestamp": 1604518205871, "user_tz": 180, "elapsed": 14914, "user": {"displayName": "<NAME>\u00<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjfL0Egrvj7vCUWsAEEiusTURo2hzbS9ee-olk=s64", "userId": "17023645381607303930"}} outputId="71bc1247-7e2f-426d-ba23-77c99e177337" colab={"base_uri": "https://localhost:8080/"}
df_train.loc[:, 'mean_by_store'] = df_train['Store'].apply(stores_mean.get)
df_val.loc[:, 'mean_by_store'] = df_val['Store'].apply(stores_mean.get)
df_train.loc[:, 'Sales_store'] = df_train['Sales'] - df_train['mean_by_store']
df_val.loc[:, 'Sales_store'] = df_val['Sales'] - df_val['mean_by_store']
# + id="QhmgvSgUncEW" executionInfo={"status": "ok", "timestamp": 1604518206324, "user_tz": 180, "elapsed": 15351, "user": {"displayName": "<NAME>\u00<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjfL0Egrvj7vCUWsAEEiusTURo2hzbS9ee-olk=s64", "userId": "17023645381607303930"}}
from tensorflow.keras import backend as K
# + id="Etn_rtJ8ncEY" executionInfo={"status": "ok", "timestamp": 1604518206324, "user_tz": 180, "elapsed": 15343, "user": {"displayName": "<NAME>\u00<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjfL0Egrvj7vCUWsAEEiusTURo2hzbS9ee-olk=s64", "userId": "17023645381607303930"}}
def rmspe(y_true, y_pred):
return K.sqrt(K.mean(K.square((y_true - y_pred)/y_true)))
# + id="CeL7TP7QncEb" executionInfo={"status": "ok", "timestamp": 1604518206325, "user_tz": 180, "elapsed": 15337, "user": {"displayName": "<NAME>\u0<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjfL0Egrvj7vCUWsAEEiusTURo2hzbS9ee-olk=s64", "userId": "17023645381607303930"}}
def get_metric(df, sales_):
return np.sqrt((((df['Sales'] - sales_)/df['Sales'])**2).mean())
# + id="Z6tllNl6ncEd" executionInfo={"status": "ok", "timestamp": 1604518206325, "user_tz": 180, "elapsed": 15331, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjfL0Egrvj7vCUWsAEEiusTURo2hzbS9ee-olk=s64", "userId": "17023645381607303930"}} outputId="b15efc67-5142-4348-f4ff-07ad63714062" colab={"base_uri": "https://localhost:8080/"}
get_metric(df_val, df_val['mean_by_store'])
# + id="pEWknTptncEf" executionInfo={"status": "ok", "timestamp": 1604518206325, "user_tz": 180, "elapsed": 15320, "user": {"displayName": "<NAME>\u00e1<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjfL0Egrvj7vCUWsAEEiusTURo2hzbS9ee-olk=s64", "userId": "17023645381607303930"}}
all_vars = cat_vars + contin_vars
X_train = np.hsplit(df_train[all_vars].values, len(all_vars))
X_val = np.hsplit(df_val[all_vars].values, len(all_vars))
X_test = np.hsplit(df_test[all_vars].values, len(all_vars))
# + id="uvNxhO58ncEh" executionInfo={"status": "ok", "timestamp": 1604518206326, "user_tz": 180, "elapsed": 15314, "user": {"displayName": "<NAME>\u00<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjfL0Egrvj7vCUWsAEEiusTURo2hzbS9ee-olk=s64", "userId": "17023645381607303930"}}
if add_customers:
y_out_columns = ['Sales', 'Customers']
else:
y_out_columns = ['Sales_store']
if log_output:
# Escala logaritmica
max_log_y = np.max(np.log(df[y_out_columns])).values
y_train = np.log(df_train[y_out_columns].values)/max_log_y
y_val = np.log(df_val[y_out_columns].values)/max_log_y
else:
# Normalización
y_mean = df_train[y_out_columns].mean().values
y_std = df_train[y_out_columns].std().values
y_train = (df_train[y_out_columns].values - y_mean)/y_std
y_val = (df_val[y_out_columns].values - y_mean)/y_std
#y_max = df_train[y_out_columns].max().values
#y_train = df_train[y_out_columns].values/y_max
#y_val = df_val[y_out_columns].values/y_max
y_train = np.hsplit(y_train, y_train.shape[1])
y_val = np.hsplit(y_val, y_val.shape[1])
# + id="hsYQnAGkncEj" executionInfo={"status": "ok", "timestamp": 1604518206326, "user_tz": 180, "elapsed": 15307, "user": {"displayName": "<NAME>\u00<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjfL0Egrvj7vCUWsAEEiusTURo2hzbS9ee-olk=s64", "userId": "17023645381607303930"}}
lr = 0.001
model.compile(optimizer=Adam(lr=lr), metrics=['mse', rmspe], loss='mse')
# + id="hq3AxI-jncEm" executionInfo={"status": "ok", "timestamp": 1604518206327, "user_tz": 180, "elapsed": 15301, "user": {"displayName": "<NAME>", "photoUrl": "https://lh3.googleusercontent.com/a-/AOh14GjfL0Egrvj7vCUWsAEEiusTURo2hzbS9ee-olk=s64", "userId": "17023645381607303930"}}
if add_customers:
checkpoint = ModelCheckpoint('bestmodel.hdf5', monitor='val_Sales_mse', verbose=1, save_best_only=True)
else:
checkpoint = ModelCheckpoint('bestmodel.hdf5', monitor='val_loss', verbose=1, save_best_only=True)
# + id="jbMfQ4PXncEo" outputId="76f526b8-3dd0-4749-9c80-fea4e92db676" colab={"base_uri": "https://localhost:8080/"}
epochs = 20
batch_size = 256
history = model.fit(X_train, y_train, validation_data=(X_val, y_val), epochs=epochs, batch_size=batch_size, callbacks=[checkpoint], verbose=2)
# + id="B1pOFORincEp"
plt.plot(history.history['val_loss'])
# plt.plot(history.history['val_Customers_mse'])
# plt.plot(history.history['val_Sales_mse'])
plt.show()
plt.plot(history.history['loss'])
# plt.plot(history.history['Customers_mse'])
# plt.plot(history.history['Sales_mse'])
# + [markdown] id="1TReLppJncEr"
# # Métrica
# + [markdown] id="1wRSbv6ancEr"
# $$
# \textrm{RMSE} = \sqrt{\frac{1}{n} \sum_{i=1}^{n} \left(\frac{\hat{y}_i - y_i}{y_i}\right)^2}
# $$
# + id="FWgjq3BgncEs"
model.evaluate(X_val, y_val)
# + id="9_lWj--wncEu"
model.load_weights('bestmodel.hdf5')
model.evaluate(X_val, y_val)
# + id="Gj5mkn3incEv"
# model.load_weights('bestmodel.hdf5')
# model.evaluate(X_val, y_val)
# 30188/30188 [==============================] - 5s 172us/step
# [0.12197033089921382,
# 0.07211007360268763,
# 0.037183713050426136,
# 0.07211007360268763,
# 0.037183713050426136]
# [0.12932546436786652,
# 0.07751645147800446,
# 0.039259567856788635,
# 0.07751645147800446,
# 0.039259567856788635]
# + id="4pXDmEfVncEx"
if log_output:
if add_customers:
y_pred = np.exp(model.predict(X_val, verbose=1)[0][:, 0]*max_log_y[0])
y_pred_test = np.exp(model.predict(X_test, verbose=1)[0][:, 0]*max_log_y[0])
else:
y_pred = np.exp(model.predict(X_val, verbose=1)*max_log_y)[:,0]
y_pred_test = np.exp(model.predict(X_test, verbose=1)*max_log_y)[:,0]
else:
if add_customers:
y_pred = (model.predict(X_val, verbose=1)[0]*y_std[0] + y_std[0]*y_mean[0])[:,0]
y_pred_test = (model.predict(X_test, verbose=1)[0]*y_std[0] + y_std[0]*y_mean[0])[:,0]
#y_pred = (model.predict(X_val, verbose=1)[0]*y_max)[:,0]
#y_pred_test = (model.predict(X_test, verbose=1)[0]*y_max)[:,0]
else:
y_pred = model.predict(X_val, verbose=1)[:,0]*y_std + y_std[0]*y_mean
y_pred_test = model.predict(X_test, verbose=1)[:,0]*y_std + y_std[0]*y_mean
#y_pred = model.predict(X_val, verbose=1)[:,0]*y_max
#y_pred_test = model.predict(X_test, verbose=1)[:,0]*y_max
y_pred_test[df_test['Open'] == 0] = 0
# + id="LAlsm4BQncEy"
np.sqrt((((df_val['Sales'].values - y_pred)/df_val['Sales'].values)**2).sum()/len(y_pred))
# + id="fSi42hmVncE0"
y_pred_test[:100]
# + [markdown] id="OsJjY-xEncE2"
# # Baseline
# + id="3uLnrbTgncE2"
import pandas as pd
sample_csv = pd.read_csv(PATH+'sample_submission.csv')
# + id="ds8HXhr0ncE4"
stores_mean = {}
for store, g_df in df.groupby('Store'):
stores_mean[store] = g_df[g_df['Sales'] > 0]['Sales'].mean()
# + id="h1imARxXncE6"
df_test['Sales'] = df_test['Store'].apply(stores_mean.get)
df_test.loc[df_test['Open'] == 0, 'Sales'] = 0
# + id="KCORsUJHncE8"
df_test[['Store', 'Sales']].head(10)
# + id="6l4RokExncE-"
df_test[df_test['Open'] == 0][['Store', 'Sales']].head()
# + id="n8ujWmXwncFA"
sample_csv['Sales'] = df_test['Sales']
# + id="MmQcC1mVncFC"
sample_csv.to_csv(f'submision_baseline.csv', index=False)
# + id="E3XVKjEMncFF"
sample_csv.head()
# + [markdown] id="LEXeZLqCncFG"
# # Sumbit a la competición
# + id="WlPVFFvyncFH"
sample_csv = pd.read_csv(PATH+'sample_submission.csv')
sample_csv['Sales'] = y_pred_test
sample_csv.head()
sample_csv.to_csv(f'submision_{add_customers}-{log_output}-{output_activation}-{l2_lambda}-{first_hidden_units}-{epochs}-{batch_size}-{lr}.csv', index=False)
# + [markdown] id="SLaG3gjPncFI"
# # Analisis de embedings
# + id="97va0aZkncFI"
def plot_embed(layer_name, cat_names):
Y = model.get_layer(layer_name).get_weights()[0]
print(Y.shape)
plt.figure(figsize=(8,8))
plt.scatter(-Y[:, 0], -Y[:, 1])
for i, txt in enumerate(cat_names):
plt.annotate(txt, (-Y[i, 0],-Y[i, 1]), xytext = (-5, 8), textcoords = 'offset points')
# + id="U1kiXSXNncFK"
model.load_weights('bestmodel.hdf5')
plot_embed('DayOfWeek_Embed', ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat','Sun'])
# + id="XcdkZQL-ncFL"
plot_embed('Month_Embed', list(range(12)))
# + id="rHGsqgkfncFP"
from sklearn.manifold import TSNE
from sklearn.decomposition import PCA
# + id="uYTv2ZrxncFQ"
day_of_week_embedding.shape
# + id="1w5aT8xHncFR"
tsne = TSNE(n_components=2, random_state=1, learning_rate=10, n_iter=10000)
Y = tsne.fit_transform(day_of_week_embedding)
names = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat','Sun']
plt.figure(figsize=(8,8))
plt.scatter(-Y[:, 0], -Y[:, 1])
for i, txt in enumerate(names):
plt.annotate(txt, (-Y[i, 0],-Y[i, 1]), xytext = (-5, 8), textcoords = 'offset points')
# + id="Bkr_GbSxncFT"
cat_vars
# + [markdown] id="BhUpbDm0ncFX"
# ## Store embeddings
# + id="kkIx78SdncFY"
submodel = Model(cat_var_inputs + cont_vars_inputs, merged)
# + id="3i7o9lkHncFZ"
submodel.summary()
# + id="cREjugvdncFa"
submodel.save('embeddings_model.hdf5')
# + id="FtshFFTencFc"
|
04-rossman/kaggle-rossmann-master/06-full-model.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.6
# language: python
# name: python3
# ---
# # In this Jupyter notebook we demonstrate how to build a python Predictive Model with Scikit-learn.
#
# The Dataset for personal loan classification is taken from: https://www.kaggle.com/itsmesunil/bank-loan-modelling
# +
import pandas as pd
import numpy as np
import matplotlib.pyplot as plot
import seaborn as sns
# %matplotlib inline
sns.set(style="ticks")
from scipy.stats import zscore
from sklearn import tree
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import BaggingClassifier, RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn import model_selection
# +
# Insert Cloud Object Storage Credentials and Load Dataset here.
# +
# Replace the credentials that you got from Watson Machine Learning service
wml_credentials = {
"apikey": "<api key>",
"instance_id": "<instance id>",
"url": "<URL>"
}
# -
data.columns = ["ID","Age","Experience","Income","ZIPCode","Family","CCAvg","Education","Mortgage","PersonalLoan","SecuritiesAccount","CDAccount","Online","CreditCard"]
data.columns
# ### Exploring the dataset
# #### The dataset has 5000 rows of data and 14 attributes
data.shape
data.info()
# #### No columns have null data in the file
data.apply(lambda x : sum(x.isnull()))
# #### Eye balling the data
data.describe().transpose()
# #### Finding unique data
data.apply(lambda x: len(x.unique()))
# #### There are 52 records with negative experience. Before proceeding any further we need to clean the same
data[data['Experience'] < 0]['Experience'].count()
# #### Clean the negative variable
dfExp = data.loc[data['Experience'] >0]
negExp = data.Experience < 0
column_name = 'Experience'
# #### Getting the customer ID who has negative experience
mylist = data.loc[negExp]['ID'].tolist()
# #### There are 52 records with negative experience
negExp.value_counts()
# #### So we Remove the negative experience records
for id in mylist:
age = data.loc[np.where(data['ID']==id)]["Age"].tolist()[0]
education = data.loc[np.where(data['ID']==id)]["Education"].tolist()[0]
df_filtered = dfExp[(dfExp.Age == age) & (dfExp.Education == education)]
exp = df_filtered['Experience'].median()
data.loc[data.loc[np.where(data['ID']==id)].index, 'Experience'] = exp
# #### Verify records with negative experience are there or not
data[data['Experience'] < 0]['Experience'].count()
data.describe().transpose()
sns.boxplot(x='Education',y='Income',hue='PersonalLoan',data=data)
# **Observation** : It seems the customers whose education level is 1 is having more income. However customers who has taken the personal loan have the same income levels
sns.boxplot(x="Education", y='Mortgage', hue="PersonalLoan", data=data,color='yellow')
# **Inference** : From the above chart it seems that customer who do not have personal loan and customer who has personal loan have high mortgage
sns.countplot(x="SecuritiesAccount", data=data,hue="PersonalLoan")
# **Observation** : Majority of customers who does not have loan have securities account
sns.countplot(x='Family',data=data,hue='PersonalLoan',palette='Set1')
# **Observation** : Family size does not have any impact in personal loan. But it seems families with size of 3 are more likely to take loan. When considering future campaign this might be good association.
sns.countplot(x='CDAccount',data=data,hue='PersonalLoan')
# **Observation** : Customers who does not have CD account , does not have loan as well. This seems to be majority. But almost all customers who has CD account has loan as well
sns.boxplot(x=data.Family,y=data.Income,hue=data.PersonalLoan)
# **Observation** : Looking at the above plot, families with income less than 100K are less likely to take loan, than families with high income
print('Credit card spending of Non-Loan customers: ',data[data.PersonalLoan == 0]['CCAvg'].median()*1000)
print('Credit card spending of Loan customers : ', data[data.PersonalLoan == 1]['CCAvg'].median()*1000)
# ### Develop a Naive Bayes Model
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import GaussianNB
# Split the data in to Training(70%) and Testing(30%)
train_set, test_set = train_test_split(data.drop(['ID','Experience'], axis=1), test_size=0.3 , random_state=100)
train_labels = train_set.pop('PersonalLoan')
test_labels = test_set.pop('PersonalLoan')
# #### Train the Model and get Predictions
# +
naive_model = GaussianNB()
naive_model.fit(train_set, train_labels)
prediction = naive_model.predict(test_set)
naive_model.score(test_set,test_labels)
# -
# #### The model scores an accuracy of 88.67%
print(prediction)
# # Deploy the model to Watson Machine Learning
# !pip install watson-machine-learning-client
from watson_machine_learning_client import WatsonMachineLearningAPIClient
client = WatsonMachineLearningAPIClient(wml_credentials)
instance_details = client.service_instance.get_details()
published_model = client.repository.store_model(model=naive_model, meta_props={'name':'Personal Loan Prediction Model'}, \
training_data=train_set, training_target=train_labels)
# +
import json
published_model_uid = client.repository.get_model_uid(published_model)
model_details = client.repository.get_details(published_model_uid)
print(json.dumps(model_details, indent=2))
# -
models_details = client.repository.list_models()
loaded_model = client.repository.load(published_model_uid)
test_predictions = loaded_model.predict(test_set[:10])
# The predictions made by the model.
print(test_predictions)
created_deployment = client.deployments.create(published_model_uid, 'Deployment of Personal Loan Prediction model')
deployments = client.deployments.get_details()
scoring_endpoint = client.deployments.get_scoring_url(created_deployment)
print(scoring_endpoint)
# +
#Age Income ZIPCode Family CCAvg Education Mortgage SecuritiesAccount CDAccount Online CreditCard
#39 139 95616 3 3.4 1 483 0 0 1 0
#29 31 92126 4 0.3 2 0 0 0 1 0
scoring_payload = { "fields":["Age","Income","ZIPCode","Family","CCAvg","Education","Mortgage","SecuritiesAccount","CDAccount","Online", "CreditCard"],"values":[[39,139,95616,3,3.4,1,483,0,0,1,0]]}
# scoring_payload = { "fields":["Age","Income","ZIPCode","Family","CCAvg","Education","Mortgage","SecuritiesAccount","CDAccount","Online", "CreditCard"],"values":[[29,31,92126,4,0.3,2,0,0,0,1,0]]}
# -
predictions = client.deployments.score(scoring_endpoint, scoring_payload)
print(json.dumps(predictions, indent=2))
print(predictions['values'][0][0])
|
notebook/Python_Predictive_Model.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <img src="images/logodwengo.png" alt="Banner" width="150"/>
# <div>
# <font color=#690027 markdown="1">
# <h1>PYTHAGORAS</h1>
# </font>
# </div>
# <div class="alert alert-block alert-success">
# De stelling van Pythagoras leerde je reeds in de wiskundeles. Met deze notebook zal je het rekenwerk van de oefeningen automatiseren. Bij het maken van de oefeningen in de wiskundeles kan je je dan volop concentreren op het denkwerk.
# </div>
# + [markdown] tags=[]
# <div class="alert alert-block alert-info">
# Er zijn eigenlijk maar twee types van oefeningen bij de stelling van Pythagoras:<br>
# - de lengte van de schuine zijde berekenen als de rechthoekszijden gekend zijn;<br>
# - de lengte van een rechthoekszijde berekenen als de schuine zijde en de andere rechthoekszijde gekend zijn.<br>
# Had je dit al door? Dan deed je aan <b>patroonherkenning</b>, een concept van computationeel denken.
# </div>
# -
# Dat betekent dat je voor de automatisatie slechts 2 functies moet definiëren. Een voor elk type oefening.
# ### Nodige modules importeren
# + tags=[]
import math # om vierkantswortels en pi te kunnen gebruiken
# + [markdown] tags=[]
# <div class="alert alert-block alert-warning">
# Meer voorbeelden van het gebruik van de module math vind je in de notebook 'Rekenen'.<br>
# In plaats van de module math kan je ook de module NumPy gebruiken. Numpy komt ook verder in deze notebook aan bod.
# </div>
# -
# <div>
# <font color=#690027 markdown="1">
# <h2>1. Functies</h2>
# </font>
# </div>
# <div>
# <font color=#690027 markdown="1">
# <h3>1.1 De lengte van de schuine zijde berekenen als de rechthoekszijden gekend zijn</h3>
# </font>
# </div>
# ### Opdracht
# Schrijf een script waarin je een functie `pythagoras` definieert. <br>
# Het script vraagt de gebruiker om de lengtes van de twee rechthoekszijden van een rechthoekige driehoek in te geven. Via de functie `pythagoras` zal het script de lengte van de schuine zijde berekenen. Het programma eindigt met het tonen van de lengte van de schuine zijde aan de gebruiker.<br>
# <br>
# Test het script uit en verbeter indien nodig.
# <div class="alert alert-block alert-info">
# Het bovenstaande script kunnen we opsplitsen in 4 onderdelen:
# <ol>
# <li>definitie functie</li>
# <li>input</li>
# <li>verwerking</li>
# <li>output</li>
# </ol>
# Voor het definiëren van een functie heb je enkel een definitie nodig. Eens je de functie wilt gaan testen en/of gebruiken, zal je ook invoer nodig hebben en zal er hopelijk een uitvoer verschijnen.
# <img src="images/invoerverwerkinguitvoerdwengo.png" alt="Banner" width="150"/>
# <img src="images/decompositiedwengo.png" alt="Banner" width="150"/>
# <img src="images/abstractiedwengo.png" alt="Banner" width="150"/>
# </div>
# <div class="alert alert-block alert-warning">
# Uitleg over input en output vind je terug in de notebook "InputEnOutput".
# </div>
# <div>
# <font color=#690027 markdown="1">
# <h3>1.2 De lengte van een rechthoekszijde berekenen als de schuine zijde en de andere rechthoekszijde gekend zijn</h3>
# </font>
# </div>
# ### Opdracht
# Schrijf een script waarin je een functie `pythagoras2` definieert. <br>
# Het script vraagt de gebruiker om de lengtes van de schuine zijde en een rechthoekszijde van een rechthoekige driehoek in te geven. Via de functie `pythagoras` zal het script de lengte van de andere rechthoekszijde berekenen. Het programma eindigt met het tonen van de lengte van de andere rechthoekszijde aan de gebruiker.<br>
# <br>
# Test het script uit en verbeter indien nodig.
# <div>
# <font color=#690027 markdown="1">
# <h2>2. Voorbeeld</h2>
# </font>
# </div>
# ### Voorbeeld 2.1
# Bereken de oppervlakte van een rechthoekige driehoek met een rechthoekszijde van 8 cm en een schuine zijde van 14 cm.
# +
import math
# oppervlakte kan berekend worden met functie uit vorige notebook
def oppdriehoek(b, h):
"""Oppervlakte van driehoek met basis b en hoogte h."""
opp = b * h / 2
return opp
# oppervlakte driehoek kan je berekenen als de basis en de hoogte gekend zijn, dus hier de twee rechthoekszijden
# andere rechthoekszijde berekenen met Pythagoras2
def pythagoras2(a, c):
"""Rechthoekszijde rechthoekige driehoek met schuine zijde c en rechthoekszijde a."""
rechthoekszijde = math.sqrt(c**2 - a**2)
return rechthoekszijde
rhz1 = 8 # gegeven (invoer)
sz = 14 # gegeven (invoer)
rhz2 = pythagoras2(rhz1, sz) # andere rechthoekszijde (verwerking)
opp = oppdriehoek(rhz1, rhz2) # gezochte oppervlakte (verwerking)
print("De oppervlakte van de driehoek is", opp, "cm².") # uitvoer
# -
# <div>
# <font color=#690027 markdown="1">
# <h2>3. Oefeningen</h2>
# </font>
# </div>
# ### Oefening 3.1
# Het stratenplan van New York bestaat vooral uit straten die loodrecht op elkaar staan.<br>
# Bekijk de figuren en bereken m.b.v. Python-code hoe ver het is in vogelvlucht van het Empire State Building naar Times Square.
# <img src="images/kaartnewyork.png" alt="Banner" width="400"/> <br>
# <img src="images/gpsnewyork.png" alt="Banner" width="400"/>
# Antwoord:
# ### Oefening 3.2
# Als je een ladder veilig tegen de muur wilt plaatsen, dan gebruik je best de '4 t.o.v. 1'- regel. Dat betekent dat voor elke 4 eenheden hoogte, de ladder 1 eenheid van de muur moet staan. <br><br>
# Stel dat een ladder veilig tegen de muur staat volgens de '4 t.o.v. 1'-regel en de muur raakt op 5 meter boven de grond.
# Hoe lang is de ladder? Bereken met Python-code.
# Antwoord:
# ### Oefening 3.3
# Een ladder van 10 meter lang staat tegen een muur op 1,5 meter van de muur. Staat de ladder veilig?
# Voer je rekenwerk uit met Python-code.
# Antwoord:
# <img src="images/cclic.png" alt="Banner" align="left" width="100"/><br><br>
# Notebook Python in wiskunde, zie Computationeel denken - Programmeren in Python van <a href="http://www.aiopschool.be">AI Op School</a>, van <NAME>, B. Van de Velde & N. Gesquière is in licentie gegeven volgens een <a href="http://creativecommons.org/licenses/by-nc-sa/4.0/">Creative Commons Naamsvermelding-NietCommercieel-GelijkDelen 4.0 Internationaal-licentie</a>.
|
Wiskunde/FunctiesPythagoras/0300_Pythagoras.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.9.5 64-bit
# name: python3
# ---
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
game = pd.read_csv("game.csv")
game
game.isna().sum()
# ## No Missing Files
# +
# Creating a Tfidf Vectorizer to remove all stop words
tfidf = TfidfVectorizer(stop_words = "english") # taking stop words from tfid vectorizer
tfidf
# taking top english top words
# +
# Preparing the Tfidf matrix by fitting and transforming
tfidf_matrix = tfidf.fit_transform(game.game)
# Transform a count matrix to a normalized tf or tf-idf representation
tfidf_matrix.shape
# -
tfidf_matrix
# with the above matrix we need to find the similarity score¶
#
# There are several metrics for this such as the euclidean,
#
# the Pearson and the cosine similarity scores
#
# For now we will be using cosine similarity matrix
#
# A numeric quantity to represent the similarity between 2 Games
#
# Cosine similarity - metric is independent of magnitude and easy to calculate
from sklearn.metrics.pairwise import linear_kernel
# Computing the cosine similarity on Tfidf matrix
cosine_sim_matrix = linear_kernel(tfidf_matrix, tfidf_matrix)
cosine_sim_matrix
# +
# creating a mapping of entertainment name to index number
gamee_index = pd.Series(game.index, index = game['game']).drop_duplicates()
gamee_index
# +
def get_recommendations(game, topN):
# topN = 10
# Getting the game index using its title
game_id = gamee_index[game]
# Getting the pair wise similarity score for all the entertainment's with that entertainment
cosine_scores = list(enumerate(cosine_sim_matrix[game_id]))
# Sorting the cosine_similarity scores based on scores
cosine_scores = sorted(cosine_scores, key=lambda x:x[1], reverse = True)
# Get the scores of top N most similar movies
cosine_scores_N = cosine_scores[0: topN+1]
# Getting the movie index
game_idx = [i[0] for i in cosine_scores_N]
game_scores = [i[1] for i in cosine_scores_N]
# Similar movies and scores
game_similar_show = pd.DataFrame(columns=["Score"])
game_similar_show["game"] = game.loc[game_idx, "game"]
game_similar_show["Score"] = game_scores
game_similar_show.reset_index(inplace = True)
print (game_similar_show)
# -
get_recommendations("The Legend of Zelda: Ocarina of Time", topN = 10)
gamee_index["The Legend of Zelda: Ocarina of Time"]
|
game.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Decision Trees
#
# Imagine you have a set of data you would like to classify. One potential solution to this problem would be to attempt to define rules that would allow you to do quickly evaluate where the best locations to split the data are. Then, imagine you could add rules to either side of the decision. This idea of branching based on features is the fundamental concept behind a decision tree.
#
# Let's visualize.
# +
import pandas as pd
import numpy as np
import graphviz
from matplotlib import pyplot as plt
import seaborn as sns
sns.set()
from sklearn import datasets
from sklearn import tree
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_validate
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import confusion_matrix
from sklearn.metrics import precision_score
from sklearn.metrics import accuracy_score
from sklearn.metrics import roc_auc_score
from sklearn import metrics
# +
#Data Generation
AB, response = datasets.make_blobs(n_samples=100, centers=2, n_features=2, center_box=(-2,2))
data = {'A':[],'B':[],'R':[]}
for i in AB:
data['A'].append(i[0])
data['B'].append(i[1])
data['R'].append(response[len(data['A'])-1])
data = pd.DataFrame(data)
sns.relplot(data=data, x='A',y='B', hue='R')
# -
#One Deep
t = DecisionTreeClassifier(max_depth=1,criterion='entropy')
t.fit(data[['A','B']],data['R'])
graphviz.Source(tree.export_graphviz(t, out_file=None))
# Entropy can be defined as a measure of homogeneity of a group where at $entropy=1$ you have an even mixture of observations and at $entropy=0$ you have a completely pure class. We won't dive to deep into the mathematics, but it can be mathematically expressed as $$entropy=\sum_{i=1}^n(-p_ilog_2(p_i))$$
#One Deep Decision Surface
x_min, x_max = data['A'].min()-1, data['A'].max()+1
y_min, y_max = data['B'].min()-1, data['B'].max()+1
xx, yy = np.meshgrid(np.arange(x_min, x_max, .01),np.arange(y_min, y_max, .01))
Z = t.predict(np.c_[xx.ravel(), yy.ravel()]).reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap='RdBu')
plt.scatter(x=data['A'],y=data['B'],c=data['R'],cmap='RdBu',vmin=-.2, vmax=1.2,edgecolors='white')
plt.xlabel('A')
plt.ylabel('B')
# Now, to get more precision, we can increase the level of depth.
#Two Deep
t = DecisionTreeClassifier(max_depth=2,criterion='entropy')
t.fit(data[['A','B']],data['R'])
graphviz.Source(tree.export_graphviz(t, out_file=None))
#One Deep Decision Surface
x_min, x_max = data['A'].min()-1, data['A'].max()+1
y_min, y_max = data['B'].min()-1, data['B'].max()+1
xx, yy = np.meshgrid(np.arange(x_min, x_max, .01),np.arange(y_min, y_max, .01))
Z = t.predict(np.c_[xx.ravel(), yy.ravel()]).reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap='RdBu')
plt.scatter(x=data['A'],y=data['B'],c=data['R'],cmap='RdBu',vmin=-.2, vmax=1.2,edgecolors='white')
plt.xlabel('A')
plt.ylabel('B')
#Very Deep
t = DecisionTreeClassifier(max_depth=15,criterion='entropy')
t.fit(data[['A','B']],data['R'])
x_min, x_max = data['A'].min()-1, data['A'].max()+1
y_min, y_max = data['B'].min()-1, data['B'].max()+1
xx, yy = np.meshgrid(np.arange(x_min, x_max, .01),np.arange(y_min, y_max, .01))
Z = t.predict(np.c_[xx.ravel(), yy.ravel()]).reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap='RdBu')
plt.scatter(x=data['A'],y=data['B'],c=data['R'],cmap='RdBu',vmin=-.2, vmax=1.2,edgecolors='white')
plt.xlabel('A')
plt.ylabel('B')
# Though we may increase the level of accuracy we can achieve (on our train data), what happens is that this is no longer fitting the general trends of the data, rather it is modeling the idiosyncrancries of our dataset. If we are to run a split and then test, we can find what the optimal level of depth is.
# +
fig, axs = plt.subplots(ncols=3,nrows=3,figsize=(15,15))
for j in range(3):
#Data Generation
AB, response = datasets.make_moons(n_samples=500,noise=0.2*(j+1))
data = {'A':[],'B':[],'R':[]}
for i in AB:
data['A'].append(i[0])
data['B'].append(i[1])
data['R'].append(response[len(data['A'])-1])
data = pd.DataFrame(data)
xx, yy = np.meshgrid(np.arange(data['A'].min()-1, data['A'].max()+1, .01),
np.arange(data['B'].min()-1, data['B'].max()+1, .01))
#Train/Test Split
X_train, X_test, y_train, y_test = train_test_split(data[['A','B']], data['R'], test_size=0.2)
#Predictions
test_accuracy = []
train_accuracy = []
for i in range(30):
t = DecisionTreeClassifier(max_depth=i+1,criterion='entropy')
t.fit(X_train,y_train)
train = cross_validate(t,X=X_test,y=y_test,scoring='roc_auc',
cv=3,return_train_score=True)
test_accuracy.append(sum(train['test_score'])/len(train['test_score']))
train_accuracy.append(sum(train['train_score'])/len(train['train_score']))
accs = pd.DataFrame({'Test':test_accuracy, 'Train':train_accuracy}).set_index(np.linspace(1,31,30))
#Create Plots
sns.scatterplot(data=data, x='A',y='B', hue='R',ax=axs[j,0]).set(title='Data Set')
sns.lineplot(data=accs,ax=axs[j,1]).set(title='Accuracy over Depth')
#Plot Contour
bestDepth = test_accuracy.index(max(test_accuracy))
t = DecisionTreeClassifier(max_depth=bestDepth+1,criterion='entropy')
t.fit(X_train,y_train)
Z = t.predict(np.c_[xx.ravel(), yy.ravel()]).reshape(xx.shape)
axs[j,2].contourf(xx, yy, Z, cmap='RdBu')
axs[j,2].scatter(x=data['A'],y=data['B'],c=data['R'],cmap='RdBu',vmin=-.2, vmax=1.2,edgecolors='white')
axs[j,2].set(xlabel='A',ylabel='B',title='Depth'+str(bestDepth+1))
# -
# You're not required to shoot in the dark as it come to customization of your tree. There exists something called a hyperparameter. Recall from our session on logistic regression that there are certain inputs you give your model that are not driven by the data - rather selected explicitly. In the case of the hyperparameter $C=\frac{1}{\lambda}$ we tune the level of punishment we give for complexity. This type of change results in a different model for each value we give. In the case of a decision tree, we have the following hyperparameters:
#
# 1) **max_depth**: This is what we see as a the maximum number of layers to go down in the tree, i.e. max splits
#
# 2) **min_samples_split**: The minimum number or ratio of samples required to have a split, default = 2
#
# 3) **min_samples_leaf**: The minimum number of samples in a leaf (i.e. end point), default = 1
#
# 4) **max_features**: The maximum number of features to include, default = unlimited
#
# 5) **max_leaf_nodes**: The maximum number of leafs allowed, default = unlimited
#
# 6) **criterion**: 'gini' or 'entropy' (Impurity or Information Gain)
#
# There are no hard rules as to what works best - much of this is heavily dependent on the size and quality of your data. For example with millions of data points, you likely don't have a need for leafs with only one observation. Likewise, for small datasets, you will likely find some gain from having that be allowed. This part of model creation is more art than science and requires experimentation.
#
# ## Regression
# You can also use this sort of method for regression - to predict a number rather than a category.
#Generate Data
X = np.random.randn(200)*2
Y = list(map(lambda x: np.sin(x) + np.random.randn(1)[0]/10,X))
sns.relplot(x='X',y='Y',data=pd.DataFrame({'X':X,'Y':Y}))
# +
#Generate Decision Trees
fig, axs = plt.subplots(ncols=3,nrows=2,figsize=(15,12))
c = 0
r = 0
for i in range(6):
t = DecisionTreeRegressor(max_depth=i+2,min_samples_leaf=4)
t.fit(np.array(X).reshape(-1,1),Y)
score = t.score(np.array(X).reshape(-1,1),Y)
sns.scatterplot(x='X',y='Y',data=pd.DataFrame({'X':X,'Y':Y}),
ax=axs[r,c]).set(title='Depth: '+str(i+1)+', Score: '+str(round(score,2)))
p = t.predict(np.array(X).reshape(-1,1))
sns.lineplot(x='X',y='P',data=pd.DataFrame({'X':X,'P':p}),ax=axs[r,c],color='r')
if c == 2 :
c=0
r+=1
else:
c+=1
# -
# ## Tuning the Hyperparameters
#
# How we saw above where there is such a large number of tunable variables, we would like to develop a way to analyze the results of our changes to these, and eventually develop a programmatic method for evaluating our success. We will find that the process can be automated to an extent, but as it comes to the complexity accuracy tradeoff, it will become a game of experience and feeling.
#
# Let's start by remembering our methods of tuning that we developed during the Logistic Regression session, but for a decision tree. For now, we consider only max_depth.
# +
#Generate Data
ABCD, response = datasets.make_blobs(n_samples=500, centers=2,
n_features=4, center_box=(-2,2))
X_train, X_test, y_train, y_test = train_test_split(ABCD, response, test_size=0.2)
depths = [1,2,3,4,5,10,15,20,25,30]
accs = []
for i in depths:
dtc = DecisionTreeClassifier(max_depth=i)
dtc.fit(X_train,y_train)
accs.append(dtc.score(X_test,y_test))
print('Scores:\n',pd.DataFrame({'Accuracy':accs},index=depths))
# -
# From this, we can quickly see how our data is behaving as we increase the depth of the trees. One thing we might also be interested in doing is evaluating this as a cross-validation to make sure that we are getting what is as close to a real number as possible. This may look something like the following.
# +
#Generate Data
ABCD, response = datasets.make_blobs(n_samples=500, centers=2,
n_features=4, center_box=(-2,2))
depths = [1,2,3,4,5,10,15,20,25,30]
accs = []
for i in depths:
dtc = DecisionTreeClassifier(max_depth=i)
accs.append(cross_val_score(dtc,ABCD,response,cv=5).mean())
print('Scores:\n',pd.DataFrame({'Accuracy':accs},index=depths))
# -
# In the situation that we would like to evaluate more than just the depth of the tree, for example, also the min_samples_leaf, we can nest some for loops to create this behaviors. Let's see.
# +
#Generate Data
ABCD, response = datasets.make_blobs(n_samples=500, centers=2,
n_features=4, center_box=(-2,2))
X_train, X_test, y_train, y_test = train_test_split(ABCD, response, test_size=0.2)
depths = [1,2,3,4,5,10,15,20,25,30]
leafs = [10,20,30,40,50,60]
accs = pd.DataFrame({'Depth':[],'Leaf Size':[],'Accuracy':[]})
for i in depths:
for j in leafs:
dtc = DecisionTreeClassifier(max_depth=i,min_samples_leaf=j)
accs = accs.append({'Depth':i,'Leaf Size':j,
'Accuracy':cross_val_score(dtc,ABCD,response,cv=5).mean()},ignore_index=True)
print('Scores:\n',accs.sort_values('Accuracy',ascending=False))
# -
# We can continue to do this sort of methodology with $n$ nested for-loops for $n$ hyperparameters - but who has time for that. SKLearn has an in built feature to do exactly this sort of analysis. This method, called GridSearchCV does more or less exactly what we have been exploring, but with even more features. The way you pass the sets of values you would like to evaluate over is very similar to what we have done so far. Just like in doing our decision trees, we can see that the relationship between the number of hyperparameters and the computation time is exponential.
#
# The time to perform these computations has the following form where $\mid P_k\mid$ is the length of the list of potential values for each parameter:
#
# $$Computation\ Time=\left(\prod_{k=1}^{n}\mid P_k\mid\right)\times CV,\ n\ parameters$$
#
# Let's do the same model as above, but with this new technique.
# +
ABCD, response = datasets.make_blobs(n_samples=500, centers=2,
n_features=4, center_box=(-2,2))
dtc = DecisionTreeClassifier(criterion='entropy') #Note, you can declare a constant hyperparameter in your model
grid = {'max_depth':[1,2,3,4,5,10,15,20,25,30],
'min_samples_leaf':[10,20,30,40,50,60]}
gs = GridSearchCV(dtc,grid,cv=5,verbose=True,return_train_score=False)
gs.fit(ABCD,response)
scores = pd.DataFrame(gs.cv_results_).filter(regex='param_+|mean_test_score'
).sort_values('mean_test_score',
ascending=False).reset_index().drop(['index'],axis=1)
scores.head(20)
# -
# It's important to note that this final method to declare scores uses a powerful tool called a regular expression where you can match text based on certain conditions. In our case, we are saying to match anything that starts with 'param_' and has at least one character after that, or something that is exactly 'mean_test_score'. What this gives us is each of our hyperparameters (which are now param_name).
#
# Now, as an exercise, you will modify the code that we used to analyze two hyperparameters, and now also include the 'criterion' hyperparameter. Please consult the documentation for details on its possible values. Then print out exactly the same type of table as above with this new column
# +
ABCD, response = datasets.make_blobs(n_samples=500, centers=2,
n_features=4, center_box=(-2,2))
dtc = DecisionTreeClassifier(max_depth=4)
grid = {'min_samples_leaf':[10,20,30,40,50,60],
'criterion':['gini','entropy'],
'max_features':[1,2,3,4]}
gs = GridSearchCV(dtc,grid,cv=5,verbose=True,return_train_score=False)
gs.fit(ABCD,response)
scores = pd.DataFrame(gs.cv_results_).filter(regex='param_+|mean_test_score'
).sort_values('mean_test_score',
ascending=False).reset_index().drop(['index'],axis=1)
scores.head(10)
# -
# It might be tempting to simply say, we want to maximize our accuracy so we choose the best performing model. Now, in some cases this may be what you end up doing, but more often, you will select a model that strikes a balance between complexity and accuracy.
#
# Let's discuss a theoretical example. If you were to have a model that is evaluating max_depth and min_samples_leaf, what you would like to do is predict accurately and simply to avoid overfitting. One method for this is to do a cost-benefit analysis on the $\Delta complexity\ $to$\ \Delta accuracy$ relationship.
#
# If our output from the grid search were to give us something like: (max_depth,min_samples_leaf,accuracy)=(10,50,.8),(15,50,.802) we must make a decision of which one is better. In thinking of this in terms of the above relationship, we have a percentage change of 50% in max_depth, with a resulting percentage change of 0.25% in accuracy. It should be a relatively safe bet that this change in accuracy would likely fall within a CI of accuracy based on our cross validation, or at least is so insignificant (not in the statistical way, but philosophical way) that including 50% more complexity is not worth it.
#
# Based on this, let's go ahead and try to select what we think the best model might be out of scores. Note, there is no right answer to this question (though I would argue there are wrong answers).
scores.head(20)
# Now that we can understand decision trees and build a functional, well-tuned model, let's move on to something that is a little bit more complex.
# # Random Forest
# Just like how having one decision tree is a good way to model data, if you have many simultaneous trees, you can find even better predictive power, especially in situations where you have many features. Similarly to all of the models that we have seen, this can overfit as well. Let's go over the techniques for preparing, analyzing, and predicting.
#Data Generation
AB, response = datasets.make_blobs(n_samples=1000, centers=2, n_features=5, center_box=(-2,2))
X_train, X_test, y_train, y_test = train_test_split(AB, response, test_size=0.2)
# In this case, we are dealing with 2 classes, but the process can be generalized to n classes. Let's start with a single tree. This is declared using the n_estimators parameter meaning the number of trees we are using to do our estimation.
rfc = RandomForestClassifier(max_depth=10,n_estimators=5)
rfc.fit(X_train,y_train)
print(rfc.score(X_test,y_test))
# As we can see, this behaves exactly like any other model in SKLearn. In fact, there are some methods shared by nearly all models: fit and score. Using these, you can quickly change and evaluate different types of models, something that may come in useful over the following days while you fit your own models.
#
# It should be noted that as we increase the complexity of our machine learning models, what we are going to find is that computation will take longer and longer. Oftentimes, in preparing my own models I will be tuning hyperparameters on a time scale measured in hours rather than in miliseconds as we have had so far. If your projects end up requiring a significant amount of processing power, we can consult on techniques to offload this CPU demand to the cloud - but for now we will simply suffer the waiting.
#
# Below you will see some skeleton code for how to implement the grid search over our newfound parameter for the number of trees. It will be your task to flesh out this code.
# +
Xs, y = datasets.make_classification(n_samples=500,n_features=5,n_redundant=1,n_informative=4)
rfc = RandomForestClassifier()
grid = {'n_estimators':[1,2,3,4], #n_estimators - 4 or 5 values
'max_depth':[2,4,6,8,10,12,14,16], #max_depth
'min_samples_leaf':[10,20,30,40,50]} #min_samples_leaf
gs = GridSearchCV(rfc,grid,cv=5,verbose=True,return_train_score=False)
gs.fit(Xs,y)
scores = pd.DataFrame(gs.cv_results_).filter(regex='param_+|mean_test_score'
).sort_values('mean_test_score',
ascending=False).reset_index().drop(['index'],axis=1)
scores.head(15)
# -
# Just like how we saw the numerical prediction equivalent to DecisionTreeClassifier is DecisionTreeRegressor, the numerical predictor for RandomForestClassifier is RandomForestRegressor. There is really no difference between how you evaluate the accuracy of a classifier versus a regression model for RandomForest, and you gain a little more freedom than in pure regressions as the assumptions are relaxed. In general, so long as the model performs well for regression, all is okay.
#
# Let's see a quick example of how we can do this.
# +
Xs, y = datasets.make_regression(n_samples=500,n_features=5,n_informative=4,noise=0.5)
rfr = RandomForestRegressor()
grid = {'n_estimators':[1,2,3,4,5,10,15,20],
'max_depth':[1,2,3,4,5,6,7,8,9,10],
'min_samples_leaf':[10,20,30,40,50]}
gs = GridSearchCV(rfr,grid,cv=5,verbose=True,return_train_score=False)
gs.fit(Xs,y)
scores = pd.DataFrame(gs.cv_results_).filter(regex='param_+|mean_test_score'
).sort_values('mean_test_score',
ascending=False).reset_index().drop(['index'],axis=1)
scores.head(10)
# -
# As individuals, please now select which model you think performs the best based on what you can see in the scores and relative complexity - note that the number of estimators is a huge complexity factor.
#
# Please then make a plot of the residuals for this graph by plotting the predicted values against the real values of y.
# +
Xs, y = datasets.make_regression(n_samples=500,n_features=5,n_informative=4,noise=0.5)
X_train, X_test, y_train, y_test = train_test_split(Xs, y, test_size=0.2)
rfr = RandomForestRegressor(n_estimators=20,max_depth=8,min_samples_leaf=10)
rfr.fit(X_train,y_train)
predictions = rfr.predict(X_test) #=y-hat, y=y
resid = y_test-predictions
sns.relplot(data=pd.DataFrame({'Residuals':resid}))
# -
# Now that we have built out well tuned models for both regression and classification, you are ready to go out and build for real!
#
# Before that though, let's speak briefly on how we should go about evaluating our successes.
# ## Evaluating Accuracy
# The below will illustrate three very similar measures of accuracy that you will select based on what sort of performance you care about in your model. Generally, the behavior of one will be highly correlated to the behavior of another with them often having exactly the same model all the way through to the thousands place.
# +
#Train/Test Split
X_train, X_test, y_train, y_test = train_test_split(AB, response, test_size=0.2)
#Predictions
accuracy = []
precision = []
roc = []
for i in range(30):
t = DecisionTreeClassifier(max_depth=i+1,criterion='entropy')
t.fit(X_train,y_train)
accuracy.append(accuracy_score(y_test,t.predict(X_test)))
precision.append(precision_score(y_test,t.predict(X_test),average='macro'))
roc.append(roc_auc_score(y_test,t.predict(X_test),average='macro'))
#Create Plots
fig, axs = plt.subplots(ncols=3,figsize=(15,5))
sns.lineplot(x='Depth',y='Accuracy',data=pd.DataFrame({'Depth':np.linspace(1,31,30),'Accuracy':accuracy}),
ax=axs[0]).set(title='Prediction Accuracy over Depth')
sns.lineplot(x='Depth',y='Precision',data=pd.DataFrame({'Depth':np.linspace(1,31,30),'Precision':precision}),
ax=axs[1]).set(title='Prediction Precision over Depth')
sns.lineplot(x='Depth',y='AUC',data=pd.DataFrame({'Depth':np.linspace(1,31,30),'AUC':roc}),
ax=axs[2]).set(title='Prediction AUC over Depth')
# -
# What we see are three different calculations for how to evaluate the success of a model.
#
# $$tp=True\ Positive,\ tn=True\ Negative,\ fp=False\ Positive,\ fn=False\ Negative$$
#
# $$Accuracy = \frac{tp+tn}{tp+tn+fp+fn}$$
#
# $$Precision = \frac{tp}{tp+fp}$$
#
# $$AUC=Area\ Under\ ROC\ Curve$$
#
# Let us quickly also review what the ROC AUC means.
# +
Xs, y = datasets.make_moons(n_samples=500,noise=1.1)
#Train/Test Split
X_train, X_test, y_train, y_test = train_test_split(Xs, y, test_size=0.2)
#Predictions
rfc = RandomForestClassifier(max_depth=10,n_estimators=5).fit(X_train,y_train)
aucscore = metrics.roc_auc_score(rfc.predict(X_test),y_test)
##Computing false and true positive rates
fpr, tpr,_= metrics.roc_curve(rfc.predict(X_test),y_test,drop_intermediate=False)
##Adding the ROC
plt.plot(fpr, tpr, color='red',lw=2, label='ROC curve')
plt.plot([0, 1], [0, 1], color='blue', lw=2, linestyle='--')
plt.xlabel('FPR')
plt.ylabel('TPR')
plt.title('ROC Curve AUC = '+str(aucscore.round(4)))
plt.show()
# -
# ## Visualizing Random Forest Model Selection
# The below heatmaps show each of the measures of accuracy based on a set of heatmaps that have color representing the predictive capability of the model. This is an easy way of being able to quickly spot which families of models are likely going to be the best performing. As you move away from the origin, you would like to maximize the green while minimizing the distance you have to travel.
# +
#Train/Test Split
AB, response = datasets.make_moons(n_samples=500,noise=0.5)
X_train, X_test, y_train, y_test = train_test_split(AB, response, test_size=0.40)
scores = {'Accuracy':[],'Precision':[],'ROC':[]}
for i in range(30):
scores_acc = []
scores_pre = []
scores_roc = []
for j in range(20):
rf = RandomForestClassifier(n_estimators=i+1,max_depth=j+1)
rf.fit(X_train,y_train)
splits=3
scores_acc.append(accuracy_score(y_test,rf.predict(X_test)))
scores_pre.append(precision_score(y_test,rf.predict(X_test),average='micro'))
scores_roc.append(roc_auc_score(y_test,rf.predict(X_test),average='micro'))
scores['Accuracy'].append(scores_acc)
scores['Precision'].append(scores_pre)
scores['ROC'].append(scores_roc)
# +
fig, axs = plt.subplots(ncols=3,figsize=(16,10))
cbar_ax = fig.add_axes([.91,.3,.03,.4])
for i in range(3):
sns.heatmap(scores[list(scores.keys())[i]],xticklabels=np.arange(20)+1,yticklabels=np.arange(30)+1,
cmap='RdYlGn',ax=axs[i],cbar = i==0,cbar_ax=None if i else cbar_ax)
axs[i].set(title=list(scores.keys())[i],xlabel='Tree Depth',ylabel='Number of Trees')
axs[i].set_xticklabels(np.arange(20)+1,rotation=0)
axs[i].invert_yaxis()
fig.tight_layout(rect=[0, 0, .9, 1])
# -
# You can now select and predict based on whichever tree you feel may give you the best outcome. In our case, accuracy is likely ging to be the deciding factor.
# ## A Final Test
#
# We would like to now be able to check our understanding. Please take 15-30 minutes to build out each of these regression and classification models using the automated tuning system and select what you think the best model is and why. Then please discuss in your group why you chose that model.
Xs, y = datasets.make_classification(n_samples=500,n_features=5,n_redundant=0)
Xs, y = datasets.make_regression(n_samples=500,n_features=5,noise=0.5)
|
Instructor Documents/6DecisionTrees_and_RandomForest_Instructor.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
class EvenList:
def __init__(self, arr):
self.my_arr = arr
def filter_mod(self, mod=2):
mod_arr = []
for elem in self.my_arr:
if elem % mod == 0:
mod_arr.append(elem)
return mod_arr
class MyList(EvenList):
def print_sum(self):
return sum(self.my_arr)
def print_mod_sum(self, mod=2):
self.mod_list = self.filter_mod(mod)
print("Returned list is {}".format(self.mod_list))
return sum(self.mod_list)
my_arr = list(range(10))
print(my_arr)
my_obj = MyList(my_arr)
my_obj.my_arr
my_obj.print_sum()
my_obj.print_mod_sum(2)
my_obj.print_mod_sum(3)
my_obj.print_mod_sum(5)
|
Section_2/Object Oriented Programming.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] toc=true
# <h1>Text to Speech and Speech to Text<span class="tocSkip"></span></h1>
# <div class="toc"><ul class="toc-item"></ul></div>
# -
import speech_recognition as sr
import pyttsx3 as tts
import pyaudio as aud
r=sr.Recognizer()
mic=sr.Microphone()
# +
# Text to speech takes in the text and reads out loud
def TextToSpeech(text):
engine =tts.init()
engine.say(text)
# run and wait method, it processes the voice commands.
engine.runAndWait()
# Speech to text function take speech from user and convert them to text with the help of google recongnizer.
def SpeechToText():
with mic as source:
r.adjust_for_ambient_noise(source)
audio = r.listen(source)
text=r.recognize_google(audio)
TextToSpeech(text)
return text
print('I am gonna speech \'my name is <NAME>\' and it is gonna convert the speech to text and print \n')
SpeechToText()
# -
# These are the properties of pyttsx3 module. You can play with it as per your requirement
engine =tts.init()
o1=engine.getProperty('voices')
o2=engine.getProperty('rate')
o2=engine.getProperty('volume')
engine.setProperty('voice', o1[1].id) #o1[1] is female voice, o1[0] is male voice
engine.setProperty('rate', 100) #default rate is 200Words per minute
engine.setProperty('volume', 1) #volume from 0-1, default is 1
engine.say('hello world')
engine.runAndWait()
|
TextToSpeech & SpeechToText Python Module.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Import Model
from keras.models import load_model
from theano import ifelse
model = load_model('Models/best.h5') #specify path to model
# +
import os
import matplotlib.image as mplimg
import numpy as np
from pathlib import Path
import matplotlib.pyplot as plt
window_size = 50
patch_size = 16
padding = int((window_size - patch_size)/2)
# -
# # Test
# + code_folding=[0]
def Test(model,ws,ps,padding):
"""Test the model with testing data"""
def load_test_images():
test_dir = "test_set_images/"
files = os.listdir(test_dir)
n = len(files)-1
print("Loading " + str(n) + " images")
test_img=[]
for i in range(n):
cd=test_dir+"test_"+str(i+1)+"/"
im = mplimg.imread(cd + os.listdir(cd)[0])
test_img.append(im)
test_img = np.asarray(test_img)
return test_img
Test = load_test_images()
print(Test.shape)
def extract_patches(test_img):
Test_patches = np.empty((72200,ws,ws,3))
imgs_id=[]
def img_crop(im, w, h, l):
list_patches = np.empty((1444,ws,ws,3))
img_id=[]
imgwidth = im.shape[0]
imgheight = im.shape[1]
for i in range(0,imgheight-2*padding,h):
for j in range(0,imgwidth-2*padding,w):
im_patch = im[j:(j+w+2*padding), i:i+h+2*padding, :]
list_patches[int((i/h)*38+(j/w)),...]=im_patch
if (l<9):
img_id.append("00"+str(l+1)+"_"+str(i)+"_"+str(j))
else: img_id.append("0"+str(l+1)+"_"+str(i)+"_"+str(j))
return np.asarray(list_patches), img_id
def pad_img(img,p):
image=np.pad(img,((p,p),(p,p),(0,0)),'edge')
return image
for k in range(test_img.shape[0]):
image = test_img[k]
img = pad_img(image,int(padding))
img_patches, img_id=img_crop(img,ps,ps,k)
Test_patches[int(k*1444):int((k+1)*1444),...] = img_patches
imgs_id = np.append(imgs_id,img_id)
Test_patches = np.asarray(Test_patches)
print("Test_patches size")
print(Test_patches.shape)
print("imgs_id length")
print(len(imgs_id))
return Test_patches,imgs_id
Test_patches,img_id = extract_patches(Test)
Z = model.predict(Test_patches, verbose=1)
Z=(Z[:,0]>Z[:,1])*1
return Z,img_id,Test
# -
pred,img_id, Test_imgs=Test(model,window_size,patch_size,padding)
# # Visualize prediction
# + code_folding=[0]
def visualize_prediction(predictions, Test, path):
my_file=Path(path)
if not my_file.is_dir(): #check if directory already exists
print(my_file.is_dir())
os.makedirs(path) #creates directory if it does not exist
for k in range(Test.shape[0]):
gt_values = predictions[k*1444:(k+1)*1444]
gt_test = np.empty((38,38))
for i in range(38):
for j in range(38):
gt_test[j,i] = gt_values[i*38+j]
gt_test=np.asarray(gt_test)
fig = plt.figure(figsize=(10,10))
plt.imshow(Test[k],extent=(0,608,0,608))
plt.imshow(gt_test,cmap='gray_r',alpha=0.4,extent=(0,608,0,608))
plt.show()
#Save image in folder
image_name = path + "prediction_image_" + str(k)
fig.savefig(image_name)
# -
visualize_prediction(pred,Test_imgs,"prediction_loaded/")
# # Morphological post-processing
# + code_folding=[3]
import cv2 as cv2
from PIL import Image
def postProcessingMorphological(predictions, window_size_pp, doOpenning, doClosing):
postprocpred = np.empty(predictions.shape)
for k in range(50): #there are 50 windows in each picture
gt_values = predictions[k*1444:(k+1)*1444]
gt_test = np.empty((38,38))
for i in range(38):
for j in range(38):
gt_test[j,i] = gt_values[i*38+j]
arr = gt_test
arr = np.asarray(arr, dtype=np.uint8)
im = Image.fromarray(arr, mode='L')
im.save('image'+str(k)+'.png')
img = cv2.imread('image'+str(k)+'.png',0)
os.remove('image'+str(k)+'.png')
output = []
#USER CAN CHOSE BETWEEN CROSS AND SQUARE STRUCTURING ELEMENT
#kernel = np.ones((window_size_pp, window_size_pp),np.uint8) #structuring element: square
kernel = cv2.getStructuringElement(cv2.MORPH_CROSS,(window_size_pp,window_size_pp)) #structuring element: square
if(doOpenning and doClosing):
opening = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel)
output = cv2.morphologyEx(opening, cv2.MORPH_CLOSE, kernel)
elif (doOpenning):
output = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel)
elif (doClosing):
output = cv2.morphologyEx(img, cv2.MORPH_CLOSE, kernel)
gt_values = np.empty(gt_values.shape)
for i in range(38):
for j in range(38):
gt_values[i*38+j] = output[j,i]
postprocpred[k*1444:(k+1)*1444]=gt_values
return postprocpred
# -
window_size_pp = 3
pred_pp_5 = postProcessingMorphological(pred, window_size_pp, True, False)
visualize_prediction(pred_pp_5,Test_imgs,"prediction_images_with_pp_square" + str(window_size_pp) + "/") #Enables to see the image with image processsing
# # Neighborhood based post-processing
pred1=pred #duplicate predictions to use by reference without modifying original
pred1[pred1==0].shape
# + code_folding=[0]
def neighbors_processing(prediction):
ngbpred = prediction
for k in range(50):
print(k)
gt_values = prediction[k*1444:(k+1)*1444]
img1 = np.empty((38,38))
for i in range(38):
img1[:,i]=gt_values[i*38:(i+1)*38]
img1=np.asarray(img1)
fig = plt.figure(figsize=(3,3))
plt.imshow(img1,cmap='gray_r',alpha=0.4,extent=(0,608,0,608))
plt.show()
changed1 = 1
while(changed1!=0):
changed1=0
for n in range(1,37): #test pixel inside (1:36)
for m in range(1,37):
count=img1[n-1,m]+img1[n+1,m]+img1[n,m-1]+img1[n,m+1]
if ((count<=1 or (img1[n-1,m]==0 and img1[n+1,m]==0) or (img1[n,m-1]==0 and img1[n,m+1]==0)) and img1[n,m]==1): #if 3 or more road neighbors or two opposite neighbors, make road
img1[n,m] = 0
prediction[k*1444+38*m+n] = 0
changed1 += 1
elif(count==4 and img1[n,m]==0): #if 4 neighbors non-road, make non-road
img1[n,m] = 1
prediction[k*1444+38*m+n] = 1
changed1 += 1
for i in range(1,37): #test on border (0&37)
count1=img1[n-1,0]+img1[n+1,0] + img1[n,1]
count2=img1[n-1,37]+img1[n+1,37] + img1[n,36]
count3=img1[0,n-1]+img1[0,n+1] + img1[1,n]
count4=img1[37,n-1]+img1[37,n+1] + img1[36,n]
if((count1<=1 or (img1[n-1,0]==0 and img1[n+1,0]==0)) and img1[n,0]==1): # if 2 or more road neighbors, make road
img1[n,0]=0
prediction[k*1444+n] = 0
changed1 += 1
if(count1==3 and img1[n,0]==0): # if 3 neighbors non-road, make non-road
img1[n,0]=1
prediction[k*1444+n] = 1
changed1 +=1
if((count2<=1 or (img1[n-1,37]==0 and img1[n+1,37]==0)) and img1[n,37]==1):
img1[n,37]=0
prediction[k*1444+38*37+n] = 0
changed1 +=1
if(count2==3 and img1[n,37]==0):
img1[n,37]=1
prediction[k*1444+38*37+n] = 1
changed1 +=1
if((count3<=1 or (img1[0,n-1]==0 and img1[0,n+1]==0)) and img1[0,n]==1):
img1[0,n]=0
prediction[k*1444+38*n] = 0
changed1 +=1
if(count3==3 and img1[0,n]==0):
img1[0,n]=1
prediction[k*1444+38*n] = 1
changed1 +=1
if((count4<=1 or (img1[37,n-1]==0 and img1[37,n+1]==0)) and img1[37,n]==1):
img1[37,n]=0
prediction[k*1444+38*n+37] = 0
changed1 +=1
if(count4==3 and img1[37,n]==0):
img1[37,n]=1
prediction[k*1444+38*n+37] = 1
changed1 +=1
fig = plt.figure(figsize=(2,2))
plt.imshow(img1,cmap='gray_r',alpha=0.4,extent=(0,38,0,38))
plt.show()
# -
neighbors_processing(pred1)#pred_postproc = neighbors_processing(pred1)
# # Submit predictions
# + code_folding=[0]
def create_submission(predictions, ids, submission_filename):
my_file=Path(submission_filename)
if my_file.is_file():
os.remove(submission_filename)
with open(submission_filename, 'w') as f:
f.write('id,prediction\n')
for k in range(len(ids)):
f.writelines(ids[k]+','+str(int(predictions[k]))+'\n')
# -
create_submission(1-pred1,img_id,"ngb_predv2.csv")
pred_postproc[pred_postproc==0].shape
visualize_prediction(pred1,Test_imgs,"to_delete") #Enables to see the image with image processsing
|
Project_2/TEST_SAVED_MODEL.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # How to download and read the Solvency 2 legislation
#
# In our first NLP project we will download, clean and read the Delegated Acts of the Solvency 2 legislation in all European languages.
import os
import re
import requests
import fitz
# The languages of the European Union are
# Bulgarian (BG),
# Spanish (ES),
# Czech (CS),
# Danish (DA),
# German (DE),
# Estonian (ET),
# Greek (EL),
# English (EN),
# French (FR),
# Croatian (HR),
# Italian (IT),
# Latvian (LV),
# Lithuanian (LT),
# Hungarian (HU),
# Maltese (MT),
# Dutch (NL),
# Polish (PL),
# Portuguese (PT),
# Romanian (RO),
# Slovak (SK),
# Solvenian (SL),
# Finnish (FI),
# Swedish (SV).
languages = ['BG','ES','CS','DA','DE','ET','EL',
'EN','FR','HR','IT','LV','LT','HU',
'MT','NL','PL','PT','RO','SK','SL',
'FI','SV']
# The urls of the Delegated Acts of Solvency 2 are constructed for these languages.
urls = ['https://eur-lex.europa.eu/legal-content/' + lang +
'/TXT/PDF/?uri=OJ:L:2015:012:FULL&from=EN'
for lang in languages]
# The following for loop retrieves the pdfs of the Delegated Acts from the website of the European Union and stores them in da_path.
# +
da_path = '../../../../../10_central_data/legislation/'
for index in range(len(urls)):
print("Retrieving " + languages[index] + ' from ' + urls[index])
filename = 'Solvency II Delegated Acts - ' + languages[index]+ '.pdf'
if not(os.path.isfile(da_path + filename)):
r = requests.get(urls[index])
f = open(da_path + filename,'wb+')
f.write(r.content)
f.close()
fh = open(da_path + filename, "rb")
pdffile = PyPDF2.PdfFileReader(fh)
fh.close()
# -
# # Data cleaning
# If you look at the pdfs then you see that each page has a header with page number and information about the legislation and the language. These headers must be deleted to access the articles in the text.
DA_dict = dict({
'BG': 'Официален вестник на Европейския съюз',
'CS': 'Úřední věstník Evropské unie',
'DA': 'Den Europæiske Unions Tidende',
'DE': 'Amtsblatt der Europäischen Union',
'EL': 'Επίσημη Εφημερίδα της Ευρωπαϊκής Ένωσης',
'EN': 'Official Journal of the European Union',
'ES': 'Diario Oficial de la Unión Europea',
'ET': 'Euroopa Liidu Teataja',
'FI': 'Euroopan unionin virallinen lehti',
'FR': "Journal officiel de l'Union européenne",
'HR': 'Službeni list Europske unije',
'HU': 'Az Európai Unió Hivatalos Lapja',
'IT': "Gazzetta ufficiale dell'Unione europea",
'LT': 'Europos Sąjungos oficialusis leidinys',
'LV': 'Eiropas Savienības Oficiālais Vēstnesis',
'MT': 'Il-Ġurnal Uffiċjali tal-Unjoni Ewropea',
'NL': 'Publicatieblad van de Europese Unie',
'PL': 'Dziennik Urzędowy Unii Europejskiej',
'PT': 'Jornal Oficial da União Europeia',
'RO': 'Jurnalul Oficial al Uniunii Europene',
'SK': 'Úradný vestník Európskej únie',
'SL': 'Uradni list Evropske unije',
'SV': 'Europeiska unionens officiella tidning'})
# The following code reads the pdfs, deletes the headers from all pages and saves the clean text to a .txt file.
# +
DA = dict()
files = [f for f in os.listdir(da_path) if os.path.isfile(os.path.join(da_path, f))]
print("Reading language ", end='')
for language in languages:
print(language + " ", end='')
if not("Delegated_Acts_" + language + ".txt" in files):
# reading pages from pdf file
da_pdf = fitz.open(da_path + 'Solvency II Delegated Acts - ' + language + '.pdf', 'rb')
da_pages = [page.getText(output = "text") for page in da_pdf]
da_pdf.close()
# deleting page headers
header = "17.1.2015\\s+L\\s+\\d+/\\d+\\s+" + DA_dict[language].replace(' ','\\s+') + "\\s+" + language + "\\s+"
da_pages = [re.sub(header, '', page) for page in da_pages]
DA[language] = ''.join(da_pages)
# some preliminary cleaning -> should be more
DA[language] = DA[language].replace('\xad ', '')
# saving txt file
da_txt = open(da_path + "Delegated_Acts_" + language + ".txt", "wb")
da_txt.write(DA[language].encode('utf-8'))
da_txt.close()
else:
# loading txt file
da_txt = open(da_path + "Delegated_Acts_" + language + ".txt", "rb")
DA[language] = da_txt.read().decode('utf-8')
da_txt.close()
# -
# # Retrieve the text within articles
# Retrieving the text within articles is not straightforward. In English we have 'Article 1 some text', i.e. de word Article is put before the number. But some European languages put the word after the number and there are two languages, HU and LV, that put a dot between the number and the article. To be able to read the text within the articles we need to know this ordering (and we need of course the word for article in every language).
art_dict= dict({
'BG': ['Член', 'pre'],
'CS': ['Článek', 'pre'],
'DA': ['Artikel', 'pre'],
'DE': ['Artikel', 'pre'],
'EL': ['Άρθρο', 'pre'],
'EN': ['Article', 'pre'],
'ES': ['Artículo', 'pre'],
'ET': ['Artikkel', 'pre'],
'FI': ['artikla', 'post'],
'FR': ['Article', 'pre'],
'HR': ['Članak', 'pre'],
'HU': ['cikk', 'postdot'],
'IT': ['Articolo', 'pre'],
'LT': ['straipsnis','post'],
'LV': ['pants', 'postdot'],
'MT': ['Artikolu', 'pre'],
'NL': ['Artikel', 'pre'],
'PL': ['Artykuł', 'pre'],
'PT': ['Artigo', 'pre'],
'RO': ['Articolul', 'pre'],
'SK': ['Článok', 'pre'],
'SL': ['Člen', 'pre'],
'SV': ['Artikel', 'pre']})
# Next we can define a regex to select the text within an article.
def retrieve_article(language, article_num):
method = art_dict[language][1]
if method == 'pre':
string = art_dict[language][0] + ' ' + str(article_num) + '(.*?)' + art_dict[language][0] + ' ' + str(article_num + 1)
elif method == 'post':
string = str(article_num) + ' ' + art_dict[language][0] + '(.*?)' + str(article_num + 1) + ' ' + art_dict[language][0]
elif method == 'postdot':
string = str(article_num) + '. ' + art_dict[language][0] + '(.*?)' + str(article_num + 1) + '. ' + art_dict[language][0]
r = re.compile(string, re.DOTALL)
result = ' '.join(r.search(DA[language])[1].split())
return result
# Okay, where are we now? We have a function that can retrieve the text of all the articles in the Delegated Acts for each European language.
retrieve_article('EN', 292)
retrieve_article('DE', 292)
retrieve_article('FR', 292)
retrieve_article('EL', 292)
retrieve_article('NL', 295)
|
notebooks/How to download and read the Solvency 2 legislation.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="view-in-github"
# <a href="https://colab.research.google.com/github/Tessellate-Imaging/monk_v1/blob/master/study_roadmaps/1_getting_started_roadmap/8_expert_mode/1)%20Create%20experiment%20from%20scratch%20-%20Mxnet%20backend%20-%20train%2C%20validate%2C%20infer.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# -
# # Goals
#
#
# ### Learn how to use full potential of monk in it's expert mode
# # Table of Contents
#
#
# ## [0. Install](#0)
#
#
# ## [1. Load data, setup model, select params, and Train](#1)
#
#
# ## [2. Run validation on trained classifier](#2)
#
#
# ## [3. Run inferencing on trained classifier](#3)
# <a id='0'></a>
# # Install Monk
#
# - git clone https://github.com/Tessellate-Imaging/monk_v1.git
#
# - cd monk_v1/installation/Linux && pip install -r requirements_cu9.txt
# - (Select the requirements file as per OS and CUDA version)
# !git clone https://github.com/Tessellate-Imaging/monk_v1.git
# +
# If using Colab install using the commands below
# !cd monk_v1/installation/Misc && pip install -r requirements_colab.txt
# If using Kaggle uncomment the following command
# #!cd monk_v1/installation/Misc && pip install -r requirements_kaggle.txt
# Select the requirements file as per OS and CUDA version when using a local system or cloud
# #!cd monk_v1/installation/Linux && pip install -r requirements_cu9.txt
# -
# ## Dataset - Natural Images Classification
# - https://www.kaggle.com/prasunroy/natural-images
# ! wget --load-cookies /tmp/cookies.txt "https://docs.google.com/uc?export=download&confirm=$(wget --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1sbQ_KaEDd7kRrTvna-4odLqxM2G0QT0Z' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\1\n/p')&id=1sbQ_KaEDd7kRrTvna-4odLqxM2G0QT0Z" -O natural-images.zip && rm -rf /tmp/cookies.txt
# ! unzip -qq natural-images.zip
# # Imports
# Monk
import os
import sys
sys.path.append("monk_v1/monk/");
#Using mxnet-gluon backend
from gluon_prototype import prototype
# <a id='1'></a>
# # Load data, setup model, select params, and Train
gtf = prototype(verbose=1);
gtf.Prototype("project", "expert_mode");
# ## Set Data params
gtf.Dataset_Params(dataset_path="natural-images/train",
split=0.9,
input_size=224,
batch_size=16,
shuffle_data=True,
num_processors=3);
# ## Apply Transforms
gtf.apply_random_horizontal_flip(train=True, val=True);
gtf.apply_normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], train=True, val=True, test=True);
# ## Load Dataset
gtf.Dataset();
# ## Set Model Params
gtf.Model_Params(model_name="resnet18_v1",
freeze_base_network=True,
use_gpu=True,
use_pretrained=True);
# ## Append Custom layers to transfer learning base model
gtf.append_dropout(probability=0.1);
gtf.append_linear(final_layer=True);
# ## Load Model
gtf.Model();
# ## Freeze first few layers
gtf.Freeze_Layers(num=10);
# ## Set Training params
gtf.Training_Params(num_epochs=10,
display_progress=True,
display_progress_realtime=True,
save_intermediate_models=True,
intermediate_model_prefix="intermediate_model_",
save_training_logs=True);
# +
## Set Optimizer, losses and learning rate schedulers
# -
gtf.optimizer_sgd(0.001);
gtf.lr_fixed();
gtf.loss_softmax_crossentropy()
# +
#Start Training
gtf.Train();
#Read the training summary generated once you run the cell and training is completed
# -
# <a id='2'></a>
# # Validating the trained classifier
# +
gtf = prototype(verbose=1);
gtf.Prototype("project", "expert_mode", eval_infer=True);
# Just for example purposes, validating on the training set itself
gtf.Dataset_Params(dataset_path="natural-images/train");
gtf.Dataset();
accuracy, class_based_accuracy = gtf.Evaluate();
# -
# <a id='3'></a>
# # Running inference on test images
gtf = prototype(verbose=1);
gtf.Prototype("project", "expert_mode", eval_infer=True);
# +
img_name = "natural-images/test/test1.jpg";
predictions = gtf.Infer(img_name=img_name);
#Display
from IPython.display import Image
Image(filename=img_name)
# -
# +
img_name = "natural-images/test/test2.jpg";
predictions = gtf.Infer(img_name=img_name);
#Display
from IPython.display import Image
Image(filename=img_name)
# -
# +
img_name = "natural-images/test/test3.jpg";
predictions = gtf.Infer(img_name=img_name);
#Display
from IPython.display import Image
Image(filename=img_name)
# -
|
study_roadmaps/1_getting_started_roadmap/8_expert_mode/1) Create experiment from scratch - Mxnet backend - train, validate, infer.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: dnn
# language: python
# name: dnn
# ---
# # Visualize FFT Datasets
#
# Visualize the real TNG FFT datasets and the TVB sim FFT datasets.
# +
import sys
# sys.path.append('../../dnn/')
sys.path.append('../dnn/')
import time
import numpy as np
# np.random.seed(1234)
import math as m
import os
import processing
import processing.preprocessfft
from processing.util import DataHandler
import peakdetect
# import DNN frameworks
import tensorflow as tf
import keras
from sklearn.decomposition import PCA
import ntpath
sys.path.append('/Users/adam2392/Documents/fragility_analysis/')
from datainterface.loadpatient import LoadPat
from fragility.visualize.plotwins import VisualWins
import seaborn as sns
import matplotlib.pyplot as plt
# %load_ext autoreload
# %autoreload 2
# +
def convert(data):
if isinstance(data, list): return [datum.decode("utf-8") for datum in data]
if isinstance(data, bytes): return data.decode('ascii')
if isinstance(data, dict): return dict(map(convert, data.items()))
if isinstance(data, tuple): return map(convert, data)
return data
def decodebytes(metadata):
'''
A method for decoding a metadata dictionary from bytes -> unicode.
This type of conversion is needed when we have Python2 npz files being saved and then
read in by the Python3.
'''
def convert(data):
if isinstance(data, bytes): return data.decode('ascii')
if isinstance(data, dict): return dict(map(convert, data.items()))
if isinstance(data, tuple): return map(convert, data)
return data
try:
metadata = {k.decode("utf-8"): (v.decode("utf-8") if isinstance(v, bytes) else v) for k,v in metadata.items()}
except AttributeError as e:
print(e)
for key in metadata.keys():
convert(metadata[key])
return metadata
# -
# # Visualizing The FFT Computed Data
# +
patid = 'id008_gc'
patient = patid + '_sz2'
simid = patid + '_dist'
rawdatadir = '/Volumes/<NAME>/pydata/convertedtng/'
fftdir = '/Volumes/<NAME>/pydata/output_fft/tng/win500_step250/'
fftsimdir = '/Volumes/<NAME>/pydata/output_fft/tvbsim/full/win500_step250/'
# load in the data
realfftfile = os.path.join(fftdir, patient, patient + '_fftmodel.npz')
simfftfile = os.path.join(fftsimdir, simid + '-1.0_fftmodel.npz')
print(realfftfile)
print(simfftfile)
# +
# load real data
fftdata = np.load(realfftfile)
power = fftdata['power']
freqs = fftdata['freqs']
timepoints = fftdata['timepoints']
chanlabels = fftdata['chanlabels']
# load metadata for the actual patient
dataloader = LoadPat(patient, rawdatadir)
onsettime = dataloader.onset_time
offsettime = dataloader.offset_time
print(onsettime, offsettime)
print(freqs.shape)
print(timepoints.shape)
print(chanlabels.shape)
print(power.shape)
print(fftdata.keys())
# +
# load in the image type data
fftsimdata = np.load(simfftfile, encoding='bytes')
power = fftsimdata['power']
freqs = fftsimdata['freqs']
timepoints = fftsimdata['timepoints']
# chanlabels = fftsimdata['chanlabels']
metadata = fftsimdata['metadata'].item()
metadata = decodebytes(metadata)
gainmat = metadata['gainmat']
onsettimes = metadata['onsettimes']
offsettimes = metadata['offsettimes']
chanlabels = metadata['chanlabels']
print(fftsimdata.keys())
print(metadata.keys())
print(gainmat.shape)
print(power.shape)
print(freqs.shape)
print(timepoints.shape)
print(chanlabels.shape)
# -
chanlabels = list(chanlabels)
try:
chanlabels = [label.decode("utf-8") for label in chanlabels]
except AttributeError as e:
print(e)
print(chanlabels)
print(chanlabels.index('R1'))
# +
powwin = lambda ind: np.abs(power[ind,...].squeeze())
# plot and visualize the fft data
chanind = 0
titlestr = 'FFT For ' + chanlabels[chanind]
sns.set(font_scale=1.5)
vizwin = VisualWins(figsize=(7,5))
vizwin.loadtimewins(timepoints)
vizwin.loaddata(powwin(chanind), freqs, sort=True)
vizwin.heatwinmodel(titlestr=titlestr)
powwin = lambda ind: np.abs(power[ind,...].squeeze())
# plot and visualize the fft data
chanind = 0
titlestr = 'FFT For ' + chanlabels[chanind]
sns.set(font_scale=1.5)
vizwin = VisualWins(figsize=(7,5))
vizwin.loadtimewins(timepoints)
vizwin.loaddata(powwin(chanind), freqs, sort=True)
vizwin.heatwinmodel(titlestr=titlestr)
# -
# # Visualize Image Type Data
#
# This can just be PCA projected data, or it can be image data projected onto the region parcellations of freesurfer via an inverted gain matrix.
#
# S = G*R
#
# +
# plot and visualize the image data compared real vs sim
# -
|
notebooks/Visualize FFT Real and Sim.ipynb
|
;; -*- coding: utf-8 -*-
;; ---
;; jupyter:
;; jupytext:
;; text_representation:
;; extension: .scm
;; format_name: light
;; format_version: '1.5'
;; jupytext_version: 1.14.4
;; kernelspec:
;; display_name: Calysto Scheme 3
;; language: scheme
;; name: calysto_scheme
;; ---
;; ### 練習問題2.7
;; 区間の抽象化の実装を規定していないため、
;; Alyssaのプログラムは未完成である。区間のコンストラクタの定義は以下のようになる。
;;
;; (define (make-interval a b) (cons a b))
;;
;; セレクタ upper-bound と lower-bound を定義し、実装を完成させよ。
(define (make-interval a b) (cons a b))
; lower-bound,upper-bound手続き
(define (lower-bound x)(car x))
(define (upper-bound x)(cdr x))
; 引数a,bに制限(a<b)がないのなら、
; 以下のようにしてもいいと思う。
; 本来こういうのはコンストラクタでやるべき?
;(define (lower-bound x)
; (let ((a (car x))
; (b (cdr x)))
; (min a b))
; )
;(define (upper-bound x)
; (let ((a (car x))
; (b (cdr x)))
; (max a b))
; )
;; +
; 動作確認
(define (lower r e)(- r (* r e)))
(define (upper r e)(+ r (* r e)))
(define R 6.8)
(define E 0.1)
(define x (make-interval (lower R E) (upper R E)))
(display (lower-bound x))
(newline)
(display (upper-bound x))
(newline)
|
exercises/2.07.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernel_info:
# name: python3
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # WeatherPy
# ----
#
# #### Note
# * Instructions have been included for each segment. You do not have to follow them exactly, but they are included to help you think through the steps.
# +
# Dependencies and Setup
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import requests
import time
from scipy.stats import linregress
from pprint import pprint
# Import API key
from api_keys import weather_api_key
# Incorporated citipy to determine city based on latitude and longitude
from citipy import citipy
# Range of latitudes and longitudes
lat_range = (-90, 90)
lng_range = (-180, 180)
# -
# ## Generate Cities List
# +
# List for holding lat_lngs and cities
lat_lngs = []
cities = []
# Create a set of random lat and lng combinations
lats = np.random.uniform(lat_range[0], lat_range[1], size=1500)
lngs = np.random.uniform(lng_range[0], lng_range[1], size=1500)
lat_lngs = zip(lats, lngs)
# Identify nearest city for each lat, lng combination
for lat_lng in lat_lngs:
city = citipy.nearest_city(lat_lng[0], lat_lng[1]).city_name
# If the city is unique, then add it to a our cities list
if city not in cities:
cities.append(city)
# -
# Print the city count to confirm sufficient count
len(cities)
# +
# replace spaces in city names
cities = [i.replace(' ', '+') for i in cities]
for i in cities:
print(i)
# -
# ### Perform API Calls
# * Perform a weather check on each city using a series of successive API calls.
# * Include a print log of each city as it'sbeing processed (with the city number and city name).
#
# +
weather_data = []
base_url = f'https://api.openweathermap.org/data/2.5/weather?'
counter = 0
for city in cities:
counter = counter + 1
print(f'Making request number {counter} for current weather data in {city}.')
try:
url = f'{base_url}q={city}&appid={weather_api_key}'
response = requests.get(url).json()
weather_data.append(response)
except:
print(f'City not found. Skipping...')
# -
pprint(weather_data)
# ### Convert Raw Data to DataFrame
# * Export the city data into a .csv.
# * Display the DataFrame
# +
# empty lists to store values
name_list = []
country_list = []
lat_list = []
lng_list = []
temp_list = []
wind_list = []
cloud_list = []
humidity_list = []
# use a counter to index into appropriate dictionary within weather_data
line_tracker = 0
# create (to be used later) function to convert from kelvin to fahrenheit
def kelvin_to_f(k):
return ((9.0/5.0) * (k-273) + 32) # <- kelvin to farenheit formula
# walk through dataset
for line in weather_data:
# check to see if data exists in specified location
# if so, grab data
# add data to appropriate list
if weather_data[line_tracker]['cod'] == 200:
name = weather_data[line_tracker]['name']
name_list.append(name)
if weather_data[line_tracker]['cod'] == 200:
country = weather_data[line_tracker]['sys']['country']
country_list.append(country)
if weather_data[line_tracker]['cod'] == 200:
lat = weather_data[line_tracker]['coord']['lat']
lat_list.append(lat)
if weather_data[line_tracker]['cod'] == 200:
lng = weather_data[line_tracker]['coord']['lon']
lng_list.append(lng)
if weather_data[line_tracker]['cod'] == 200:
wind = weather_data[line_tracker]['wind']['speed']
wind_list.append(wind)
if weather_data[line_tracker]['cod'] == 200:
cloud = weather_data[line_tracker]['clouds']['all']
cloud_list.append(cloud)
if weather_data[line_tracker]['cod'] == 200:
humidity = weather_data[line_tracker]['main']['humidity']
humidity_list.append(humidity)
if weather_data[line_tracker]['cod'] == 200:
temp = weather_data[line_tracker]['main']['temp_max']
temp = kelvin_to_f(temp)
temp_list.append(temp)
line_tracker += 1
# +
weather_df = pd.DataFrame(list(zip(name_list, country_list, lat_list, lng_list, temp_list, wind_list, cloud_list, humidity_list)),
columns = ['Name', 'Country', 'Latitude', 'Longitude','Temperature(F)', 'Wind Speed(mph)', 'Cloud Coverage', 'Humidity'])
weather_df
# -
# save weather_df to csv file
weather_df.to_csv('weather_data.csv', index = False)
# ## Inspect the data and remove the cities where the humidity > 100%.
# ----
# Skip this step if there are no cities that have humidity > 100%.
# Get the indices of cities that have humidity over 100%.
weather_df.loc[weather_df['Humidity'] > 100]
# +
# Make a new DataFrame equal to the city data to drop all humidity outliers by index.
# Passing "inplace=False" will make a copy of the city_data DataFrame, which we call "clean_city_data".
# +
# ^^^no cities are above 100% humidity, so we skip the rest of this section
# -
# ## Plotting the Data
# * Use proper labeling of the plots using plot titles (including date of analysis) and axes labels.
# * Save the plotted figures as .pngs.
# ## Latitude vs. Temperature Plot
# +
temp_plt = weather_df.plot.scatter(x='Temperature(F)',
y='Latitude',
c='DarkBlue')
plt.savefig('lat_v_temp.png')
# +
# In the above cell, each city is plotted according to it's latitude and temperature in Fahrenheit.
# -
# ## Latitude vs. Humidity Plot
humidity_plt = weather_df.plot.scatter(x='Humidity',
y='Latitude',
c='DarkRed')
plt.savefig('lat_v_humidity.png')
# +
# In the above cell, each city is plotted according to it's latitude and humidity percentage.
# -
# ## Latitude vs. Cloudiness Plot
cloud_plt = weather_df.plot.scatter(x='Cloud Coverage',
y='Latitude',
c='LightBlue')
plt.savefig('lat_v_cloud.png')
# +
# In the above cell, each city is plotted according to it's latitude and cloud coverage percentage.
# -
# ## Latitude vs. Wind Speed Plot
wind_plt = weather_df.plot.scatter(x='Wind Speed(mph)',
y='Latitude',
c='Green')
plt.savefig('lat_v_wind.png')
# +
# In the above cell, each city is plotted according to it's latitude and wind speed(mph).
# -
# ## Linear Regression
# create new dataframes from weather_df for northern and southern hemispheres
southern_hemi_df = weather_df.loc[(weather_df['Latitude']) <= 0]
northern_hemi_df = weather_df.loc[(weather_df['Latitude']) >= 0]
# #### Northern Hemisphere - Max Temp vs. Latitude Linear Regression
# +
# Add the linear regression equation and line to plot
x_values = northern_hemi_df['Temperature(F)']
y_values = northern_hemi_df['Latitude']
(slope, intercept, rvalue, pvalue, stderr) = linregress(x_values, y_values)
regress_values = x_values * slope + intercept
line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
# create plot
plt.scatter(x_values, y_values)
plt.plot(x_values,regress_values,"r-")
plt.annotate(line_eq,(6,10),fontsize=15,color="red")
plt.xlabel('Temperature(F)')
plt.ylabel('Latitude')
plt.show()
plt.savefig('n_temp_regression.png')
# +
# In the northern hemisphere, there is a strong correlation between temperature and latitude. As we get closer to the equator, the temperature rises.
# -
# #### Southern Hemisphere - Max Temp vs. Latitude Linear Regression
# +
# Add the linear regression equation and line to plot
x_values = southern_hemi_df['Temperature(F)']
y_values = southern_hemi_df['Latitude']
(slope, intercept, rvalue, pvalue, stderr) = linregress(x_values, y_values)
regress_values = x_values * slope + intercept
line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
# create plot
plt.scatter(x_values, y_values)
plt.plot(x_values,regress_values,"r-")
plt.annotate(line_eq,(60,-50),fontsize=15,color="red")
plt.xlabel('Temperature(F)')
plt.ylabel('Latitude')
plt.show()
plt.savefig('s_temp_regression.png')
# +
# In the southern hemisphere, the data is less streamlined. However, there is still a slight correlation between latitude and temperature.
# -
# #### Northern Hemisphere - Humidity (%) vs. Latitude Linear Regression
# +
# Add the linear regression equation and line to plot
x_values = northern_hemi_df['Humidity']
y_values = northern_hemi_df['Latitude']
(slope, intercept, rvalue, pvalue, stderr) = linregress(x_values, y_values)
regress_values = x_values * slope + intercept
line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
# create plot
plt.scatter(x_values, y_values)
plt.plot(x_values,regress_values,"r-")
plt.annotate(line_eq,(20,10),fontsize=15,color="red")
plt.xlabel('Humidity')
plt.ylabel('Latitude')
plt.show()
plt.savefig('n_humidity_regression.png')
# -
# #### Southern Hemisphere - Humidity (%) vs. Latitude Linear Regression
# +
# Add the linear regression equation and line to plot
x_values = southern_hemi_df['Humidity']
y_values = southern_hemi_df['Latitude']
(slope, intercept, rvalue, pvalue, stderr) = linregress(x_values, y_values)
regress_values = x_values * slope + intercept
line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
# create plot
plt.scatter(x_values, y_values)
plt.plot(x_values,regress_values,"r-")
plt.annotate(line_eq,(20,-50),fontsize=15,color="red")
plt.xlabel('Humidity')
plt.ylabel('Latitude')
plt.show()
plt.savefig('s_humidity_regression.png')
# +
# In both the northern and southern hemispheres, we can see that humidity has little to do with latitude placement.
# -
# #### Northern Hemisphere - Cloudiness (%) vs. Latitude Linear Regression
# +
# Add the linear regression equation and line to plot
x_values = northern_hemi_df['Cloud Coverage']
y_values = northern_hemi_df['Latitude']
(slope, intercept, rvalue, pvalue, stderr) = linregress(x_values, y_values)
regress_values = x_values * slope + intercept
line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
# create plot
plt.scatter(x_values, y_values)
plt.plot(x_values,regress_values,"r-")
plt.annotate(line_eq,(6,10),fontsize=15,color="red")
plt.xlabel('Cloud Coverage')
plt.ylabel('Latitude')
plt.show()
plt.savefig('n_cloud_regression.png')
# -
# #### Southern Hemisphere - Cloudiness (%) vs. Latitude Linear Regression
# +
# Add the linear regression equation and line to plot
x_values = southern_hemi_df['Cloud Coverage']
y_values = southern_hemi_df['Latitude']
(slope, intercept, rvalue, pvalue, stderr) = linregress(x_values, y_values)
regress_values = x_values * slope + intercept
line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
# create plot
plt.scatter(x_values, y_values)
plt.plot(x_values,regress_values,"r-")
plt.annotate(line_eq,(20,-50),fontsize=15,color="red")
plt.xlabel('Cloud Coverage')
plt.ylabel('Latitude')
plt.show()
plt.savefig('s_cloud_regression.png')
# +
# Again, in both the northern and southern hemispheres, we can see that cloudiness has little to do with latitude placement.
# -
# #### Northern Hemisphere - Wind Speed (mph) vs. Latitude Linear Regression
# +
# Add the linear regression equation and line to plot
x_values = northern_hemi_df['Wind Speed(mph)']
y_values = northern_hemi_df['Latitude']
(slope, intercept, rvalue, pvalue, stderr) = linregress(x_values, y_values)
regress_values = x_values * slope + intercept
line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
# create plot
plt.scatter(x_values, y_values)
plt.plot(x_values,regress_values,"r-")
plt.annotate(line_eq,(6,10),fontsize=15,color="red")
plt.xlabel('Wind Speed(mph)')
plt.ylabel('Latitude')
plt.show()
plt.savefig('n_wind_regression.png')
# -
# #### Southern Hemisphere - Wind Speed (mph) vs. Latitude Linear Regression
# +
# Add the linear regression equation and line to plot
x_values = southern_hemi_df['Wind Speed(mph)']
y_values = southern_hemi_df['Latitude']
(slope, intercept, rvalue, pvalue, stderr) = linregress(x_values, y_values)
regress_values = x_values * slope + intercept
line_eq = "y = " + str(round(slope,2)) + "x + " + str(round(intercept,2))
# create plot
plt.scatter(x_values, y_values)
plt.plot(x_values,regress_values,"r-")
plt.annotate(line_eq,(4,-50),fontsize=15,color="red")
plt.xlabel('Wind Speed(mph)')
plt.ylabel('Latitude')
plt.show()
plt.savefig('s_wind_regression.png')
# +
# Again, in both the northern and southern hemispheres, we can see that wind speed has little to do with latitude placement.
|
main_script/WeatherPy.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# default_exp models.XCMPlus
# -
# # XCM: An Explainable Convolutional Neural Network for Multivariate Time Series Classification
#
# > This is an unofficial PyTorch implementation of XCM created by <NAME>.
#
# **References:**
#
# * <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2020). XCM: An Explainable Convolutional Neural Network ([paper](https://hal.inria.fr/hal-03469487/document))
# * Official tensorflow implementation available at: https://github.com/XAIseries/XCM
# * No official XCM PyTorch implementation.
#export
from tsai.imports import *
from tsai.utils import *
from tsai.models.layers import *
from tsai.models.utils import *
from tsai.models.explainability import *
# +
#export
# This is an unofficial PyTorch implementation of XVM created by <NAME> - <EMAIL> based on:
# <NAME>., <NAME>., <NAME>., <NAME>., & <NAME>. (2020). XCM: An Explainable Convolutional Neural Network
# https://hal.inria.fr/hal-03469487/document
# Official tensorflow implementation available at: https://github.com/XAIseries/XCM
# No official XCM PyTorch implementation available as of Dec 11, 2021
class XCMPlus(nn.Sequential):
def __init__(self, c_in:int, c_out:int, seq_len:Optional[int]=None, nf:int=128, window_perc:float=1., flatten:bool=False, custom_head:callable=None,
concat_pool:bool=False, fc_dropout:float=0., bn:bool=False, y_range:tuple=None, **kwargs):
window_size = int(round(seq_len * window_perc, 0))
backbone = _XCMPlus_Backbone(c_in, c_out, seq_len=seq_len, nf=nf, window_perc=window_perc)
self.head_nf = nf
self.c_out = c_out
self.seq_len = seq_len
if custom_head: head = custom_head(self.head_nf, c_out, seq_len, **kwargs)
else: head = self.create_head(self.head_nf, c_out, seq_len, flatten=flatten, concat_pool=concat_pool,
fc_dropout=fc_dropout, bn=bn, y_range=y_range)
super().__init__(OrderedDict([('backbone', backbone), ('head', head)]))
def create_head(self, nf, c_out, seq_len=None, flatten=False, concat_pool=False, fc_dropout=0., bn=False, y_range=None):
if flatten:
nf *= seq_len
layers = [Flatten()]
else:
if concat_pool: nf *= 2
layers = [GACP1d(1) if concat_pool else GAP1d(1)]
layers += [LinBnDrop(nf, c_out, bn=bn, p=fc_dropout)]
if y_range: layers += [SigmoidRange(*y_range)]
return nn.Sequential(*layers)
def show_gradcam(self, x, y=None, detach=True, cpu=True, apply_relu=True, cmap='inferno', figsize=None, **kwargs):
att_maps = get_attribution_map(self, [self.backbone.conv2dblock, self.backbone.conv1dblock], x, y=y, detach=detach, cpu=cpu, apply_relu=apply_relu)
att_maps[0] = (att_maps[0] - att_maps[0].min()) / (att_maps[0].max() - att_maps[0].min())
att_maps[1] = (att_maps[1] - att_maps[1].min()) / (att_maps[1].max() - att_maps[1].min())
figsize = ifnone(figsize, (10, 10))
fig = plt.figure(figsize=figsize, **kwargs)
ax = plt.axes()
plt.title('Observed variables')
im = ax.imshow(att_maps[0], cmap=cmap)
cax = fig.add_axes([ax.get_position().x1+0.01,ax.get_position().y0,0.02,ax.get_position().height])
plt.colorbar(im, cax=cax)
plt.show()
fig = plt.figure(figsize=figsize, **kwargs)
ax = plt.axes()
plt.title('Time')
im = ax.imshow(att_maps[1], cmap=cmap)
cax = fig.add_axes([ax.get_position().x1+0.01,ax.get_position().y0,0.02,ax.get_position().height])
plt.colorbar(im, cax=cax)
plt.show()
class _XCMPlus_Backbone(Module):
def __init__(self, c_in:int, c_out:int, seq_len:Optional[int]=None, nf:int=128, window_perc:float=1.):
window_size = int(round(seq_len * window_perc, 0))
self.conv2dblock = nn.Sequential(*[Unsqueeze(1), Conv2d(1, nf, kernel_size=(1, window_size), padding='same'), BatchNorm(nf), nn.ReLU()])
self.conv2d1x1block = nn.Sequential(*[nn.Conv2d(nf, 1, kernel_size=1), nn.ReLU(), Squeeze(1)])
self.conv1dblock = nn.Sequential(*[Conv1d(c_in, nf, kernel_size=window_size, padding='same'), BatchNorm(nf, ndim=1), nn.ReLU()])
self.conv1d1x1block = nn.Sequential(*[nn.Conv1d(nf, 1, kernel_size=1), nn.ReLU()])
self.concat = Concat()
self.conv1d = nn.Sequential(*[Conv1d(c_in + 1, nf, kernel_size=window_size, padding='same'), BatchNorm(nf, ndim=1), nn.ReLU()])
def forward(self, x):
x1 = self.conv2dblock(x)
x1 = self.conv2d1x1block(x1)
x2 = self.conv1dblock(x)
x2 = self.conv1d1x1block(x2)
out = self.concat((x2, x1))
out = self.conv1d(out)
return out
# +
from tsai.data.basics import *
from tsai.learner import *
dsid = 'NATOPS'
X, y, splits = get_UCR_data(dsid, split_data=False)
tfms = [None, TSCategorize()]
dls = get_ts_dls(X, y, splits=splits, tfms=tfms)
model = XCMPlus(dls.vars, dls.c, dls.len)
learn = ts_learner(dls, model, metrics=accuracy)
xb, yb = dls.one_batch()
bs, c_in, seq_len = xb.shape
c_out = len(np.unique(yb.cpu().numpy()))
model = XCMPlus(c_in, c_out, seq_len, fc_dropout=.5)
test_eq(model.to(xb.device)(xb).shape, (bs, c_out))
model = XCMPlus(c_in, c_out, seq_len, concat_pool=True)
test_eq(model.to(xb.device)(xb).shape, (bs, c_out))
model = XCMPlus(c_in, c_out, seq_len)
test_eq(model.to(xb.device)(xb).shape, (bs, c_out))
model
# -
model.show_gradcam(xb[0], yb[0])
bs = 16
n_vars = 3
seq_len = 12
c_out = 1
xb = torch.rand(bs, n_vars, seq_len)
new_head = partial(conv_lin_nd_head, d=(5, 2))
net = XCMPlus(n_vars, c_out, seq_len, custom_head=new_head)
print(net.to(xb.device)(xb).shape)
net.head
bs = 16
n_vars = 3
seq_len = 12
c_out = 2
xb = torch.rand(bs, n_vars, seq_len)
net = XCMPlus(n_vars, c_out, seq_len)
change_model_head(net, create_pool_plus_head, concat_pool=False)
print(net.to(xb.device)(xb).shape)
net.head
#hide
from tsai.imports import *
from tsai.export import *
nb_name = get_nb_name()
# nb_name = "114b_models.XCMPlus.ipynb"
create_scripts(nb_name);
|
nbs/114b_models.XCMPlus.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # # Modelling
# +
#importing libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import matplotlib.ticker as ticker
import matplotlib.ticker as plticker
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.datasets import make_classification
# +
#load data
world_cup = pd.read_csv('Datasets/T20TeamStats.csv')
results = pd.read_csv('Datasets/T20Records.csv')
ranks = pd.read_csv('Datasets/T20Ranking.csv')
# -
world_cup.head()
# Previous records
results
#Removing the record with ties
data=results[results.Winner!='tie']
# Dropping Venue column ,Since Only Australia will host the World Cup so home team advantage not possible for other team
data.drop(columns=['Venue'],inplace=True)
#Dropping the date column
data.drop(columns=['Date'],inplace=True)
data.head()
#Filtering the records for only teams that will play the world cup
worldcup_teams = [' England ', ' South Africa ', ' Scotland ', ' West Indies ',
' Pakistan ', ' New Zealand ', ' Sri Lanka ', ' Afghanistan ',
' Australia ', ' Bangladesh ', ' India ',' Namibia ']
df_teams_1 = data[data['Team1'].isin(worldcup_teams)]
df_teams_2 = data[data['Team2'].isin(worldcup_teams)]
df_teams = pd.concat((df_teams_1, df_teams_2))
df_teams.drop_duplicates()
df_teams.count()
#Adding column 'Count': It will record the team which won the most matches from its previous 6 matches (i.e. if India have won 4 matches from its previous 6 matches and Pakistan have won 3 matches than count which select India)
# and 'Rank': The team which is having higher rank will be named in this column
df_teams['Count']=0
df_teams['Rank']=0
df_team= df_teams.reset_index(drop=True)
df_team
# +
#filling the count and rank column
for i in range(700):
dt1=df_team['Team1'].iloc[i]
dt2=df_team['Team2'].iloc[i]
c1=0
c2=0
w1=0
w2=0
for j in range(i+1,i+50):
if ((df_team['Team1'].iloc[j]==dt1) or (df_team['Team2'].iloc[j]==dt1.rstrip())) and c1<6:
if df_team['Winner'].iloc[j]==dt1.strip():
w1=w1+1
c1=c1+1
else:
c1=c1+1
for j in range(i+1,i+50):
if ((df_team['Team1'].iloc[j]==(dt2+ ' ')) or (df_team['Team2'].iloc[j]==dt2)) and c2<6:
if df_team['Winner'].iloc[j]==dt2.strip():
w2=w2+1
c2=c2+1
else:
c2=c2+1
if(w2>w1):
df_team['Count'].iloc[i]=dt2
else:
df_team['Count'].iloc[i]=dt1
r1=0
r2=0
for k in range(12):
if (dt1.strip()==ranks['Team'].iloc[k]):
r1=ranks['Rank'].iloc[k]
for k in range(12):
if (dt2.strip()==ranks['Team'].iloc[k]):
r2=ranks['Rank'].iloc[k]
if(r2>r1):
df_team['Rank'].iloc[i]=dt1
else:
df_team['Rank'].iloc[i]=dt2
# -
data=df_team.iloc[0:700]
data
# +
#Assigning the binary number to data , team1: 0 and team2: 1
for i in range(700):
dt1=data['Team1'].iloc[i]
dt2=data['Team2'].iloc[i]
data['Team1'].iloc[i]=0
data['Team2'].iloc[i]=1
if data['Winner'].iloc[i]==dt1.strip():
data['Winner'].iloc[i]=data['Team1'].iloc[i]
else:
data['Winner'].iloc[i]=data['Team2'].iloc[i]
if data['Count'].iloc[i]==dt1:
data['Count'].iloc[i]=data['Team1'].iloc[i]
else:
data['Count'].iloc[i]=data['Team2'].iloc[i]
if data['Rank'].iloc[i]==dt1:
data['Rank'].iloc[i]=data['Team1'].iloc[i]
else:
data['Rank'].iloc[i]=data['Team2'].iloc[i]
# -
data
#importing the fixture file of upcoming world cup
fixtures = pd.read_csv('Datasets/T20Fixture.csv')
fixtures
#selecting the record till League matches
fixtures=fixtures.iloc[0:30]
fixtures
#Dropping the Date,Column1 and Venue columns
fixtures.drop(columns=[' Date','Column1','Venue'],inplace=True)
#Seperating the output i.e. Winner column for testing model
y=data["Winner"]
y = y.astype(float, errors = 'raise')
#dropping the target column and creating features file
X=data.drop('Winner',axis=1)
X['Team1'] = X.Team1.astype(float)
X['Team2'] = X.Team2.astype(float)
X['Count'] = X.Count.astype(float)
X['Rank'] = X.Rank.astype(float)
#importing libraries for SVM(Support Vector Machine) model
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.svm import SVC
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size= 0.2)
model = SVC(kernel = 'linear', C = 1)
model.fit(X_train, y_train)
svm_pred = model.predict(X_test)
#Accuracy score
accuracy = model.score(X_test, y_test)
accuracy
U=fixtures.drop('Result',axis=1)
U['Count']=0
U['Rank']=0
U
# +
#We have assumped from recent performance of teams in qualifying matches that Group A winner : Sri Lanka
#Group A Runner Up : Namibia , Group B winner : West Indies and Group B runner Up : Scotland
for i in range(30):
if (U['Team_1'].iloc[i].strip()=="Group A Winner"):
U['Team_1'].iloc[i]="Sri Lanka "
elif (U['Team_1'].iloc[i].strip()=="Group B Winner"):
U['Team_1'].iloc[i]="West Indies "
elif (U['Team_1'].iloc[i].strip()=="Group A Runner Up"):
U['Team_1'].iloc[i]="Namibia "
elif (U['Team_1'].iloc[i].strip()=="Group B Runner Up"):
U['Team_1'].iloc[i]="Scotland "
else:
continue
for i in range(30):
if (U['Team_2'].iloc[i].strip()=="Group A Winner"):
U['Team_2'].iloc[i]="Sri Lanka "
elif (U['Team_2'].iloc[i].strip()=="Group B Winner"):
U['Team_2'].iloc[i]="West Indies "
elif (U['Team_2'].iloc[i].strip()=="Group A Runner Up"):
U['Team_2'].iloc[i]="Namibia "
elif (U['Team_2'].iloc[i].strip()=="Group B Runner Up"):
U['Team_2'].iloc[i]="Scotland "
else:
continue
for i in range(30):
dt1=U['Team_1'].iloc[i]
dt2=U['Team_2'].iloc[i]
r1=0
r2=0
for k in range(12):
if (dt1.strip()==ranks['Team'].iloc[k]):
r1=ranks['Rank'].iloc[k]
for k in range(12):
if (dt2.strip()==ranks['Team'].iloc[k]):
r2=ranks['Rank'].iloc[k]
if(r2>r1):
U['Rank'].iloc[i]=dt1
U['Count'].iloc[i]=dt1
else:
U['Rank'].iloc[i]=dt2
U['Count'].iloc[i]=dt2
# -
for i in range(30):
dt1=U['Team_1'].iloc[i]
dt2=U['Team_2'].iloc[i]
U['Team_1'].iloc[i]=0
U['Team_2'].iloc[i]=1
if U['Count'].iloc[i]==dt1:
U['Count'].iloc[i]=U['Team_1'].iloc[i]
else:
U['Count'].iloc[i]=U['Team_2'].iloc[i]
if U['Rank'].iloc[i]==dt1:
U['Rank'].iloc[i]=U['Team_1'].iloc[i]
else:
U['Rank'].iloc[i]=U['Team_2'].iloc[i]
U
U['Team_1'] = U.Team_1.astype(float)
U['Team_2'] = U.Team_2.astype(float)
U['Count'] = U.Count.astype(float)
U['Rank'] = U.Rank.astype(float)
#Appling SVM model on World Cup schedule
svm_pred = model.predict(U)
#Predicted vales 0: Team1 and 1:Team2
svm_pred
#Linking the predicted values with records
print("Results till League matches\n")
for i in range(30):
if (svm_pred[i]==1.0):
print(str(i+1)+"."+fixtures['Team_1'].iloc[i] + " Vs " + fixtures['Team_2'].iloc[i] + " : " + fixtures['Team_2'].iloc[i] )
print()
else:
print(str(i+1)+"."+fixtures['Team_1'].iloc[i] + " Vs " + fixtures['Team_2'].iloc[i] + " : " + fixtures['Team_1'].iloc[i] )
print()
# +
#Point table after league matches
print("From the Modelling the Point table as follows:\n")
print("Group 1\n")
print("Team\t\t\tWin")
print("Afghanistan\t\t0")
print("Australia\t\t3")
print("England\t\t\t4")
print("New Zealand\t\t4")
print("Group A Winner\t\t2")
print("Group B Runner Up\t2")
print("\nGroup 2\n")
print("Team\t\t\tWin")
print("Bangladesh\t\t0")
print("India\t\t\t5")
print("Pakistan\t\t4")
print("South Africa\t\t3")
print("Group B Winner\t\t2")
print("Group A Runner Up\t1")
# -
V=U.iloc[18:20]
V['Count'].iloc[1]=0.0
#Predicting the semi-final winners
svm_pred = model.predict(V)
svm_pred
print("Semi Finals\n")
print("India(0) Vs England(1) : India")
print("New Zealand(0) Vs Pakistan(1) : New Zealand")
#Predicting the Final Winner
V=U.iloc[18:19]
svm_pred = model.predict(V)
svm_pred
print("Final\n")
print("India(0) Vs New Zealand(0) : India(0)\n")
print("Probable Winner of World Cup : India")
|
Notebooks/Modelling.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Turn this addition off with the DataScience.changeDirOnImportExport setting
# ms-python.python added
# +
import numpy as np
import matplotlib.pyplot as plt
from sklearn.neighbors import LocalOutlierFactor
# +
from lensutils import read_data as read
from lensutils import SingleLens as slf
# +
data = read('./single/',t_range=(7940,8040))
# -
fitter = slf(data,[1.927,7984.64, 9.964])
# # Example with one one data source
data_key = 'KMT-C31-'
t = data[data_key][0]
obs = data[data_key][1]
err = data[data_key][2]
t, _, _ = fitter.data[data_key]
coeffs, _ = fitter.linear_fit(data_key,fitter.magnification(t))
fx = coeffs[0]+coeffs[1]*fitter.magnification(t)
obsl = (obs - coeffs[0])/coeffs[1] # This gives only magnification from baseline 1
err1 = err/coeffs[1]
plt.plot(t,obsl,'.')
plt.errorbar(t,obsl,err1 , fmt = '.')
mag = fitter.magnification(data['KMT-C31-'][0])
chi = (mag-obsl)**2/err1**2
from mpl_toolkits.mplot3d import axes3d, Axes3D
# #%matplotlib notebook
fig = plt.figure()
ax = Axes3D(fig)
ax.scatter(mag,obsl,chi,c='r')
plt.show()
X = np.hstack((mag.reshape(len(mag),1),obsl.reshape(len(obsl),1),chi.reshape(len(chi),1)))
A = chi.reshape(len(chi),1)
# +
# fit the model for outlier detection (default)
clf = LocalOutlierFactor(n_neighbors=10,metric='manhattan', contamination=0.5,)
# use fit_predict to compute the predicted labels of the training samples
# (when LOF is used for outlier detection, the estimator has no predict,
# decision_function and score_samples methods).
# +
y_pred = clf.fit_predict(X)
X_scores = clf.negative_outlier_factor_
plt.title("Local Outlier Factor (LOF)")
plt.scatter(t, X[:, 1], color='k', s=3., label='Data points')
# plot circles with radius proportional to the outlier scores
radius = (X_scores.max() - X_scores) / (X_scores.max() - X_scores.min())
plt.scatter(t, X[:, 1], s=1000 * radius, edgecolors='r',
facecolors='none', label='Outlier scores')
#plt.scatter(X[:, 0], X[:, 1], y_pred,marker='+', edgecolors='r',
# facecolors='none', label='Outlier scores')
plt.axis('tight')
#plt.xlim((-5, 5))
#plt.ylim((-5, 5))
legend = plt.legend(loc='upper left')
legend.legendHandles[0]._sizes = [10]
legend.legendHandles[1]._sizes = [20]
plt.show()
# -
plt.title("Local Outlier Factor (LOF)")
# plot circles with radius proportional to the outlier scores
radius = (X_scores.max() - X_scores) / (X_scores.max() - X_scores.min())
#plt.scatter(X[y_pred==1, 0], X[y_pred==1, 1],marker='+', edgecolors='r',
# facecolors='none', label='Outlier scores')
plt.scatter(t, X[:, 1], color='k', s=3., label='Data points')
plt.scatter(t[y_pred!=1], X[y_pred!=1, 1], s=100, edgecolors='r',
facecolors='none', label='Outlier scores')
plt.axis('tight')
#plt.xlim((-5, 5))
#plt.ylim((-5, 5))
legend = plt.legend(loc='upper left')
#legend.legendHandles[0]._sizes = [10]
#legend.legendHandles[1]._sizes = [20]
plt.show()
# +
y_pred = clf.fit_predict(A)
X_scores = clf.negative_outlier_factor_
plt.title("Local Outlier Factor (LOF)")
plt.scatter(t, A, color='k', s=3., label='Data points')
# plot circles with radius proportional to the outlier scores
radius = (X_scores.max() - X_scores) / (X_scores.max() - X_scores.min())
plt.scatter(t, A, s=1000 * radius, edgecolors='r',
facecolors='none', label='Outlier scores')
#plt.scatter(X[:, 0], X[:, 1], y_pred,marker='+', edgecolors='r',
# facecolors='none', label='Outlier scores')
plt.axis('tight')
#plt.xlim((-5, 5))
#plt.ylim((-5, 5))
legend = plt.legend(loc='upper left')
legend.legendHandles[0]._sizes = [10]
legend.legendHandles[1]._sizes = [20]
plt.show()
# -
plt.title("Local Outlier Factor (LOF)")
plt.scatter(t, X[:, 1], color='k', s=3., label='Data points')
plt.plot(t,mag, color='b', label='Model')
plt.scatter(t[y_pred!=1], X[y_pred!=1, 1], s=100, edgecolors='r',
facecolors='none', label='Outlier scores')
plt.axis('tight')
#plt.xlim((-5, 5))
#plt.ylim((-5, 5))
legend = plt.legend(loc='upper left')
#legend.legendHandles[0]._sizes = [10]
#legend.legendHandles[1]._sizes = [20]
plt.show()
plt.title("Local Outlier Factor (LOF)")
plt.scatter(t, X[:, 1], color='k', s=3., label='Data points')
plt.plot(t,mag, color='b', label='Model')
plt.scatter(t[X_scores<-1.5], X[X_scores<-1.5,1], s=100, edgecolors='r',
facecolors='none', label='Outlier scores')
plt.axis('tight')
#plt.xlim((-5, 5))
#plt.ylim((-5, 5))
legend = plt.legend(loc='upper left')
#legend.legendHandles[0]._sizes = [10]
#legend.legendHandles[1]._sizes = [20]
plt.show()
plt.plot(t,-X_scores,'.')
plt.ylim(0,2)
plt.plot(t,chi,'.')
plt.ylim(-1,1)
# The following plots is form the view of the linear relation from the data and the model.
# +
mag = fitter.magnification(data['KMT-C31-'][0])
# +
plt.plot(mag,obsl,'.')
# +
Y = np.hstack((mag.reshape(len(mag),1),obsl.reshape(len(obsl),1)))
# +
# fit the model for outlier detection (default)
clf2 = LocalOutlierFactor(n_neighbors=20,metric='chebyshev', contamination=0.1,)
# use fit_predict to compute the predicted labels of the training samples
# (when LOF is used for outlier detection, the estimator has no predict,
# decision_function and score_samples methods).
# +
y_pred = clf2.fit_predict(Y)
X_scores = clf2.negative_outlier_factor_
plt.title("Local Outlier Factor (LOF)")
plt.scatter(X[:, 0], X[:, 1], color='k', s=3., label='Data points')
# plot circles with radius proportional to the outlier scores
radius = (X_scores.max() - X_scores) / (X_scores.max() - X_scores.min())
plt.scatter(X[:, 0], X[:, 1], s=1000 * radius, edgecolors='r',
facecolors='none', label='Outlier scores')
#plt.scatter(X[:, 0], X[:, 1], y_pred,marker='+', edgecolors='r',
# facecolors='none', label='Outlier scores')
plt.axis('tight')
#plt.xlim((-5, 5))
#plt.ylim((-5, 5))
legend = plt.legend(loc='upper left')
legend.legendHandles[0]._sizes = [10]
legend.legendHandles[1]._sizes = [20]
plt.show()
# +
plt.title("Local Outlier Factor (LOF)")
# plot circles with radius proportional to the outlier scores
radius = (X_scores.max() - X_scores) / (X_scores.max() - X_scores.min())
#plt.scatter(X[y_pred==1, 0], X[y_pred==1, 1],marker='+', edgecolors='r',
# facecolors='none', label='Outlier scores')
plt.scatter(X[:, 0], X[:, 1], color='k', s=3., label='Data points')
plt.scatter(X[y_pred != 1, 0], X[y_pred != 1, 1], s=100,marker='x', c='r' ,label='Outlier scores')
plt.axis('tight')
#plt.xlim((-5, 5))
#plt.ylim((-5, 5))
legend = plt.legend(loc='upper left')
#legend.legendHandles[0]._sizes = [10]
#legend.legendHandles[1]._sizes = [20]
plt.show()
# -
# #The Following uses all the datasets for the single example
# +
t_vec = []
obs_vc = []
err_vec = []
for data_key in data.keys():
print(data_key)
t, _, err = fitter.data[data_key]
obs = data[data_key][1]
coeffs, _ = fitter.linear_fit(data_key,fitter.magnification(t))
obsl = (obs - coeffs[0])/coeffs[1]
err1 = err/coeffs[1]
t_vec = np.append(t_vec,t)
obs_vc = np.append(obs_vc,obsl)
err_vec = np.append(err_vec,err1)
# +
obs_vc = obs_vc[t_vec.argsort()]
err_vec = err_vec[t_vec.argsort()]
t_vec = np.sort(t_vec)
# +
plt.errorbar(t_vec,obs_vc,err_vec,fmt='.')
# +
magl = fitter.magnification(t_vec)
chi2 = (magl-obs_vc)**2/err_vec**2
# +
plt.plot(t_vec,obs_vc,'.')
plt.plot(t_vec,magl,'--')
# +
plt.plot(magl,obs_vc,'.')
# -
# #%matplotlib notebook
fig = plt.figure()
ax = Axes3D(fig)
ax.scatter(magl,obs_vc,chi2,c='r')
plt.show()
# +
Z = np.hstack((magl.reshape(len(magl),1),obs_vc.reshape(len(obs_vc),1),chi2.reshape(len(chi2),1)))
# +
# fit the model for outlier detection (default)
clf2 = LocalOutlierFactor(n_neighbors=20,metric='chebyshev', contamination=0.01,)
# use fit_predict to compute the predicted labels of the training samples
# (when LOF is used for outlier detection, the estimator has no predict,
# decision_function and score_samples methods).
# +
y_pred = clf2.fit_predict(Z)
X_scores = clf2.negative_outlier_factor_
plt.title("Local Outlier Factor (LOF)")
plt.scatter(t_vec, Z[:, 1], color='k', s=3., label='Data points')
# plot circles with radius proportional to the outlier scores
radius = (X_scores.max() - X_scores) / (X_scores.max() - X_scores.min())
plt.scatter(t_vec, Z[:, 1], s=1000 * radius, edgecolors='r',
facecolors='none', label='Outlier scores')
#plt.scatter(X[:, 0], X[:, 1], y_pred,marker='+', edgecolors='r',
# facecolors='none', label='Outlier scores')
plt.plot(t_vec,magl,'--')
#plt.vlines(t_vec[X_scores==X_scores.max()], 1 , 1.05)
plt.vlines(t_vec[X_scores==X_scores.min()], 1 , 1.01367504)
plt.axis('tight')
#plt.xlim((-5, 5))
#plt.ylim((-5, 5))
legend = plt.legend(loc='upper left')
legend.legendHandles[0]._sizes = [10]
legend.legendHandles[1]._sizes = [20]
plt.show()
# +
plt.title("Local Outlier Factor (LOF)")
plt.scatter(Z[:, 0], Z[:, 1], color='k', s=3., label='Data points')
# plot circles with radius proportional to the outlier scores
radius = (X_scores.max() - X_scores) / (X_scores.max() - X_scores.min())
plt.scatter(Z[:, 0], Z[:, 1], s=1000 * radius, edgecolors='r',
facecolors='none', label='Outlier scores')
#plt.scatter(X[:, 0], X[:, 1], y_pred,marker='+', edgecolors='r',
# facecolors='none', label='Outlier scores')
plt.plot(magl,obs_vc,'--')
plt.axis('tight')
#plt.xlim((-5, 5))
#plt.ylim((-5, 5))
legend = plt.legend(loc='upper left')
legend.legendHandles[0]._sizes = [10]
legend.legendHandles[1]._sizes = [20]
plt.show()
# +
plt.title("Local Outlier Factor (LOF)")
# plot circles with radius proportional to the outlier scores
radius = (X_scores.max() - X_scores) / (X_scores.max() - X_scores.min())
#plt.scatter(X[y_pred==1, 0], X[y_pred==1, 1],marker='+', edgecolors='r',
# facecolors='none', label='Outlier scores')
plt.scatter(t_vec, Z[:, 1], color='k', s=3., label='Data points')
plt.scatter(t_vec[y_pred!=1], Z[y_pred!=1, 1], s=50,marker='o',edgecolors='r',facecolors='none' ,label='Outlier scores')
plt.plot(t_vec,magl, label='Model')
plt.axis('tight')
#plt.xlim((-5, 5))
#plt.ylim((-5, 5))
legend = plt.legend(loc='upper left')
#legend.legendHandles[0]._sizes = [10]
#legend.legendHandles[1]._sizes = [20]
plt.show()
# -
# This is the firs binary example, extreme case.
# +
bindata = read('./bin1/',t_range=(7800,7950),max_uncertainty=1)
# -
binfitter = slf(bindata,[4.366,7884.99, 6.197])
# +
t_vec = []
obs_vc = []
err_vec = []
for data_key in bindata.keys():
print(data_key)
t, _, err = binfitter.data[data_key]
obs = bindata[data_key][1]
coeffs, _ = binfitter.linear_fit(data_key,binfitter.magnification(t))
obsl = (obs - coeffs[0])/coeffs[1]
err1 = err/coeffs[1]
t_vec = np.append(t_vec,t)
obs_vc = np.append(obs_vc,obsl)
err_vec = np.append(err_vec,err1)
obs_vc = obs_vc[t_vec.argsort()]
err_vec = err_vec[t_vec.argsort()]
t_vec = np.sort(t_vec)
magl = binfitter.magnification(t_vec)
chi2 = (magl-obs_vc)**2/err_vec**2
plt.plot(t_vec,obs_vc,'.')
plt.plot(t_vec,magl,'--')
# -
# #%matplotlib notebook
# %matplotlib inline
fig = plt.figure()
ax = Axes3D(fig)
ax.scatter(magl,obs_vc,chi2,c='r')
plt.show()
Z = np.hstack((magl.reshape(len(magl),1),obs_vc.reshape(len(obs_vc),1),chi2.reshape(len(chi2),1)))
# +
# fit the model for outlier detection (default)
clf3 = LocalOutlierFactor(n_neighbors=20,metric='euclidean', contamination=0.1,)
# use fit_predict to compute the predicted labels of the training samples
# (when LOF is used for outlier detection, the estimator has no predict,
# decision_function and score_samples methods).
# +
y_pred = clf2.fit_predict(Z)
X_scores = clf2.negative_outlier_factor_
plt.title("Local Outlier Factor (LOF)")
plt.scatter(t_vec, Z[:, 1], color='k', s=3., label='Data points')
# plot circles with radius proportional to the outlier scores
radius = (X_scores.max() - X_scores) / (X_scores.max() - X_scores.min())
plt.scatter(t_vec, Z[:, 1], s=1000 * radius, edgecolors='r',
facecolors='none', label='Outlier scores')
#plt.scatter(X[:, 0], X[:, 1], y_pred,marker='+', edgecolors='r',
# facecolors='none', label='Outlier scores')
plt.plot(t_vec,magl,'--')
plt.axis('tight')
#plt.xlim((-5, 5))
#plt.ylim((-5, 5))
legend = plt.legend(loc='upper left')
legend.legendHandles[0]._sizes = [10]
legend.legendHandles[1]._sizes = [20]
plt.show()
# -
plt.title("Local Outlier Factor (LOF)")
plt.scatter(Z[:, 0], Z[:, 1], color='k', s=3., label='Data points')
# plot circles with radius proportional to the outlier scores
radius = (X_scores.max() - X_scores) / (X_scores.max() - X_scores.min())
plt.scatter(Z[:, 0], Z[:, 1], s=1000 * radius, edgecolors='r',
facecolors='none', label='Outlier scores')
#plt.scatter(X[:, 0], X[:, 1], y_pred,marker='+', edgecolors='r',
# facecolors='none', label='Outlier scores')
plt.plot(magl,obs_vc,'--')
plt.axis('tight')
#plt.xlim((-5, 5))
#plt.ylim((-5, 5))
legend = plt.legend(loc='upper left')
legend.legendHandles[0]._sizes = [10]
legend.legendHandles[1]._sizes = [20]
plt.show()
plt.title("Local Outlier Factor (LOF)")
# plot circles with radius proportional to the outlier scores
radius = (X_scores.max() - X_scores) / (X_scores.max() - X_scores.min())
#plt.scatter(X[y_pred==1, 0], X[y_pred==1, 1],marker='+', edgecolors='r',
# facecolors='none', label='Outlier scores')
plt.scatter(t_vec, Z[:, 1], color='k', s=3., label='Data points')
plt.scatter(t_vec[y_pred!=1], Z[y_pred!=1, 1], s=50,marker='o',edgecolors='r',facecolors='none' ,label='Outlier scores')
plt.plot(t_vec,magl, label='Model')
plt.axis('tight')
#plt.xlim((-5, 5))
#plt.ylim((-5, 5))
legend = plt.legend(loc='upper left')
#legend.legendHandles[0]._sizes = [10]
#legend.legendHandles[1]._sizes = [20]
plt.show()
plt.plot(t_vec,-X_scores,".")
plt.ylim(0,1.1)
# Second example of anomalous fit. This case is very close to a good fit.
# +
bindata2 = read('./bin2/',t_range=(7875,8025),max_uncertainty=1)
# +
binfitter2 = slf(bindata2,[0.27,7947.29, 20.33])
# +
t_vec = []
obs_vc = []
err_vec = []
for data_key in bindata2.keys():
print(data_key)
t, _, err = binfitter2.data[data_key]
obs = bindata2[data_key][1]
coeffs, _ = binfitter2.linear_fit(data_key,binfitter2.magnification(t))
obsl = (obs - coeffs[0])/coeffs[1]
err1 = err/coeffs[1]
t_vec = np.append(t_vec,t)
obs_vc = np.append(obs_vc,obsl)
err_vec = np.append(err_vec,err1)
obs_vc = obs_vc[t_vec.argsort()]
err_vec = err_vec[t_vec.argsort()]
t_vec = np.sort(t_vec)
magl = binfitter2.magnification(t_vec)
chi2 = (magl-obs_vc)**2/err_vec**2
plt.errorbar(t_vec,obs_vc,err_vec, fmt='.')
plt.plot(t_vec,magl,'--')
# -
# #%matplotlib notebook
fig = plt.figure()
ax = Axes3D(fig)
ax.scatter(magl,obs_vc,chi2,c='r')
plt.show()
# +
Z = np.hstack((magl.reshape(len(magl),1),obs_vc.reshape(len(obs_vc),1),chi2.reshape(len(chi2),1)))
# +
# fit the model for outlier detection (default)
clf4 = LocalOutlierFactor(n_neighbors=20,metric='chebyshev', contamination=0.1)
# use fit_predict to compute the predicted labels of the training samples
# (when LOF is used for outlier detection, the estimator has no predict,
# decision_function and score_samples methods).
# +
y_pred = clf4.fit_predict(Z)
X_scores = clf4.negative_outlier_factor_
plt.title("Local Outlier Factor (LOF)")
plt.scatter(t_vec, Z[:, 1], color='k', s=3., label='Data points')
# plot circles with radius proportional to the outlier scores
radius = (X_scores.max() - X_scores) / (X_scores.max() - X_scores.min())
plt.scatter(t_vec, Z[:, 1], s=1000 * radius, edgecolors='r',
facecolors='none', label='Outlier scores')
#plt.scatter(X[:, 0], X[:, 1], y_pred,marker='+', edgecolors='r',
# facecolors='none', label='Outlier scores')
plt.plot(t_vec,magl,'--')
plt.axis('tight')
#plt.xlim((-5, 5))
#plt.ylim((-5, 5))
legend = plt.legend(loc='upper left')
legend.legendHandles[0]._sizes = [10]
legend.legendHandles[1]._sizes = [20]
plt.show()
# +
plt.title("Local Outlier Factor (LOF)")
#plt.scatter(Z[:, 0], Z[:, 1], color='k', s=3., label='Data points')
# plot circles with radius proportional to the outlier scores
radius = (X_scores.max() - X_scores) / (X_scores.max() - X_scores.min())
plt.scatter(Z[y_pred!=1, 0], Z[y_pred!=1, 1], s=50, edgecolors='r',
facecolors='none', label='Outlier scores')
#plt.scatter(X[:, 0], X[:, 1], y_pred,marker='+', edgecolors='r',
# facecolors='none', label='Outlier scores')
plt.errorbar(magl,obs_vc,fmt='.')
plt.axis('tight')
#plt.xlim((-5, 5))
#plt.ylim((-5, 5))
#legend = plt.legend(loc='upper left')
#legend.legendHandles[0]._sizes = [10]
#legend.legendHandles[1]._sizes = [20]
plt.show()
# +
plt.title("Local Outlier Factor (LOF)")
# plot circles with radius proportional to the outlier scores
radius = (X_scores.max() - X_scores) / (X_scores.max() - X_scores.min())
#plt.scatter(X[y_pred==1, 0], X[y_pred==1, 1],marker='+', edgecolors='r',
# facecolors='none', label='Outlier scores')
plt.scatter(t_vec, Z[:, 1], color='k', s=3., label='Data points')
plt.scatter(t_vec[y_pred!=1], Z[y_pred!=1, 1], s=50,marker='o',edgecolors='r',facecolors='none' ,label='Outlier scores')
plt.plot(t_vec,magl,'--')
plt.axis('tight')
#plt.xlim((-5, 5))
#plt.ylim((-5, 5))
legend = plt.legend(loc='upper left')
#legend.legendHandles[0]._sizes = [10]
#legend.legendHandles[1]._sizes = [20]
plt.show()
# -
plt.plot(t_vec,-X_scores,".")
|
LOFexamplenotebook.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
pwd
# Dependencies and Setup
import pandas as pd
import pathlib
import csv
import scipy.stats as st
import numpy as np
import calendar
from time import strptime
# Path
longlat_path = "../raw_data/us-zip-code-latitude-and-longitude.csv"
# Read csv
longlat_df = pd.read_csv(longlat_path, sep = ';')
pd.DataFrame(longlat_df)
only_CA_df = longlat_df.loc[(longlat_df['State'] == 'CA')]
only_CA_df
rename_new_df = only_CA_df.rename(columns={"Zip":"zip", "Latitude": "lat", "Longitude":"lng"})
rename_new_df
# Delete extraneous columns
lnglat_CA = rename_new_df.drop(columns= [
"Timezone"
, "Daylight savings time flag"
, "geopoint"
, "City"
, "State"
])
lnglat_CA
# Save as csv
lnglat_CA.to_csv("../cleaned_data/lnglat_CA.csv", index=False)
|
Data/cleaning_data_wrbk/longlat_information.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ### ASCTB - AZ
# ### Perfect Match in AZ
import requests
import simplejson as json
import pandas as pd
import numpy as np
import os
import json
asctb_kidney_all_cts_label_unique=pd.read_csv("./Data/asctb_kidney_all_cts_label_unique.csv")
az_kidney_all_cts_label_unique=pd.read_csv("./Data/az_kidney_all_cts_label_unique.csv")
asctb_kidney_all_cts_label_unique
az_kidney_all_cts_label_unique[az_kidney_all_cts_label_unique["CT/ID"]=="CL:1001431"]
def check_in_az(cl_asctb,i):
flag=0
for j in range(len(az_kidney_all_cts_label_unique['CT/ID'])):
if cl_asctb == az_kidney_all_cts_label_unique['CT/ID'][j]:
az_row.append(j)
asctb_row.append(i)
flag=1
break
if flag==0:
not_matching.append(i)
# +
az_row=[]
asctb_row=[]
not_matching=[]
for i in range(len(asctb_kidney_all_cts_label_unique['CT/ID'])):
if type(asctb_kidney_all_cts_label_unique['CT/ID'][i])!=float and asctb_kidney_all_cts_label_unique['CT/ID'][i][:3]=="CL:":
check_in_az(asctb_kidney_all_cts_label_unique['CT/ID'][i],i)
else:
not_matching.append(i)
# +
az_matches=az_kidney_all_cts_label_unique.loc[az_row]
asctb_matches=asctb_kidney_all_cts_label_unique.loc[asctb_row]
az_matches.reset_index(drop=True,inplace=True)
asctb_matches.reset_index(drop=True,inplace=True)
# +
az_matches.rename(columns = {"CT/ID":"AZ.CT/ID","CT/LABEL":"AZ.CT/LABEL"},inplace = True)
asctb_matches.rename(columns = {"CT/ID":"ASCTB.CT/ID","CT/LABEL":"ASCTB.CT/LABEL"},inplace = True)
perfect_matches=pd.concat([asctb_matches,az_matches],axis=1)
perfect_matches
# -
perfect_matches.to_csv("./Data/Azimuth_perfect_match_ASCTB.csv",index=False)
asctb_mismatches=asctb_kidney_all_cts_label_unique.loc[not_matching]
asctb_mismatches.reset_index(drop=True,inplace=True)
asctb_mismatches
# ### CTs present in ASCTB but not in Azimuth
print("No. of CTs in ASCTB and not in Azimuth",len(asctb_mismatches))
print("No. of CTs in ASCTB present in Azimuth",len(perfect_matches))
print("Total CTs in ASCTB",len(asctb_kidney_all_cts_label_unique))
# ### Incorrect CTs in ASCTB
def incorrect_cts(cl_asctb,i):
cl_asctb=cl_asctb.replace(":","_")
url = "http://www.ebi.ac.uk/ols/api/ontologies/cl/terms?iri=http%3A%2F%2Fpurl.obolibrary.org%2Fobo%2F"
payload={}
headers = {
'Accept': 'application/json'
}
response = requests.request("GET", url+cl_asctb, headers=headers, data=payload)
if response.status_code!=200:
not_found_in_ols.append(i)
else:
found_in_ols.append(i)
# +
found_in_ols=[]
not_found_in_ols=[]
for i in range(len(asctb_mismatches['CT/ID'])):
if pd.isnull(asctb_mismatches['CT/ID'][i]):
not_found_in_ols.append(i)
else:
incorrect_cts(asctb_mismatches['CT/ID'][i],i)
# +
asctb_not_found_in_ols=asctb_mismatches.loc[not_found_in_ols]
asctb_not_found_in_ols.reset_index(drop=True,inplace=True)
asctb_mismatch_az = asctb_mismatches.loc[found_in_ols]
asctb_mismatch_az.reset_index(drop=True,inplace=True)
# -
asctb_not_found_in_ols
asctb_mismatch_az
# ### Traversing up ASCTB
def check_in_az(cl_asctb,i,all_links_asctb):
flag=0
for j in range(len(az_kidney_all_cts_label_unique['CT/ID'])):
if cl_asctb == az_kidney_all_cts_label_unique['CT/ID'][j]:
tree_match_asctb.append(i)
tree_match_az.append(j)
flag=1
hierarchy_list.append([[hierarchy],[i],[j]])
print(cl_asctb,az_kidney_all_cts_label_unique['CT/ID'][j],"Match found")
break
if flag==0:
print(cl_asctb)
ols_call(cl_asctb,i,all_links_asctb)
def ols_call(cl_asctb,i,all_links_asctb):
url = "http://www.ebi.ac.uk/ols/api/ontologies/cl/terms?iri=http%3A%2F%2Fpurl.obolibrary.org%2Fobo%2F"
payload={}
headers = {
'Accept': 'application/json'
}
#ASCTB
try:
response = requests.request("GET", all_links_asctb['parents']['href'], headers=headers, data=payload)
except:
print("No parent")
tree_not_match.append(i)
hierarchy_list.append([[hierarchy],[i]])
return
if response.status_code!=200:
print("Status !=200")
tree_not_match.append(i)
hierarchy_list.append([[hierarchy],[i]])
else:
result_asctb= json.loads(response.text)
all_links_asctb=result_asctb['_embedded']['terms'][0]['_links']
ct_id_asctb=result_asctb['_embedded']['terms'][0]['obo_id']
label_asctb=result_asctb['_embedded']['terms'][0]['label']
hierarchy[ct_id_asctb]=label_asctb
if ct_id_asctb[:-8:-1]=='0000000':
hierarchy[ct_id_asctb]= label_asctb
tree_not_match.append(i)
hierarchy_list.append([[hierarchy],[i]])
print(ct_id_asctb, "No match")
else:
hierarchy[ct_id_asctb]= label_asctb
check_in_az(ct_id_asctb,i,all_links_asctb)
# +
tree_match_asctb=[]
tree_match_az=[]
tree_not_match=[]
hierarchy_list=[]
# tree_match_ct=[]
# tree_match_label=[]
url = "http://www.ebi.ac.uk/ols/api/ontologies/cl/terms?iri=http%3A%2F%2Fpurl.obolibrary.org%2Fobo%2F"
payload={}
#hierarchy={}
headers = {
'Accept': 'application/json'
}
for i in range(len(asctb_mismatch_az['CT/ID'])):
hierarchy={}
cl_asctb=asctb_mismatch_az['CT/ID'][i]
print(cl_asctb,"Original")
hierarchy[cl_asctb]=asctb_mismatch_az['CT/LABEL'][i]
cl_asctb=cl_asctb.replace(":","_")
response = requests.request("GET", url+cl_asctb, headers=headers, data=payload)
if response.status_code!=200:
tree_not_match.append(i)
hierarchy_list.append([hierarchy,i])
else:
result_asctb= json.loads(response.text)
all_links_asctb=result_asctb['_embedded']['terms'][0]['_links']
ols_call(cl_asctb,i,all_links_asctb)
# -
tree_match_asctb
tree_match_az
#tree_not_match=list(set(tree_not_match))
tree_not_match
hierarchy_list
found_match=[]
hier=[]
len_hier=[]
# asctb_row_match=[]
# az_row_match=[]
for i in range(len(hierarchy_list)):
if len(hierarchy_list[i])==3:
found_match.append("Yes")
else:
found_match.append("No")
len_hier.append((len(hierarchy_list[i][0][0])))
x=[]
for k,v in hierarchy_list[i][0][0].items():
abc=str(k + " (" + v + ")")
x.append(abc)
hier.append(x)
hier_1=[]
for item in hier:
hier_1.append(str(" >> ".join(item)))
hier_1=pd.DataFrame(hier_1,columns=["Hierarchy"])
found_match=pd.DataFrame(found_match,columns=["Match Found"])
len_hier=pd.DataFrame(len_hier,columns=["Hierarchy Length"])
# az_mismatch_asctb_all= pd.DataFrame(az_mismatch_asctb_all,columns=["AZ.CT/ID","AZ.CT/LABEL"])
asctb_mismatch_az.rename(columns = {"CT/ID":"ASCTB.CT/ID","CT/LABEL":"ASCTB.CT/LABEL"},inplace = True)
df_hier=pd.concat([asctb_mismatch_az,found_match,len_hier,hier_1],axis=1)
df_hier
df_hier.to_csv("./Data/Hier_asctb.csv",index=False)
az_final_matches.to_csv("./Data/Incorrect data/Azimuth_detailed_CTs_than_ASCTB.csv",index=False)
tree_match_asctb=[]
tree_match_az=[]
tree_not_match=[]
hierarchy_list=[]
# +
asctb_matches_tree=asctb_mismatch_az.loc[tree_match_asctb]
asctb_matches_tree.reset_index(drop=True,inplace=True)
az_matches_tree=az_kidney_all_cts_label_unique.loc[tree_match_az]
az_matches_tree.reset_index(drop=True,inplace=True)
az_matches_tree.rename(columns = {"CT/ID":"AZ.CT/ID","CT/LABEL":"AZ.CT/LABEL"},inplace = True)
asctb_matches_tree.rename(columns = {"CT/ID":"ASCTB.CT/ID","CT/LABEL":"ASCTB.CT/LABEL"},inplace = True)
az_final_matches =pd.concat([asctb_matches_tree,az_matches_tree],axis=1)
az_final_matches
# -
az_final_matches.to_csv("./Data/ASCTB_detailed_CTs_than_Azimuth.csv",index=False)
# +
asctb_mismatches_final=asctb_mismatch_az.loc[tree_not_match]
asctb_mismatches_final.reset_index(drop=True,inplace=True)
asctb_mismatches_final.rename(columns = {"CT/ID":"ASCTB.CT/ID","CT/LABEL":"ASCTB.CT/LABEL"},inplace = True)
asctb_mismatches_final
# -
asctb_mismatches_final.to_csv("./Data/Final_ASCTB_CTs_with_no_match_in_Azimuth.csv",index=False)
|
Python/.ipynb_checkpoints/OLS matching 1-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import requests
import pandas as pd
from time import sleep
NFT_CONTRACT = "<KEY>"
OFFSET = 158292848
def get_message_from_tx(tx):
m_list = []
messages = tx['tx']['value']['msg']
for m in messages:
if 'execute_msg' in m['value'].keys():
msg_obj = {
'type':m['type'],
'execute_msg':m['value']['execute_msg']
}
m_list.append(msg_obj)
return m_list
def get_all_messages(nft_contract, offset = 0 ):
loop = True
m_list = []
offset = offset
while loop:
request_success = False
while not request_success:
data = requests.get(f"https://fcd.terra.dev/v1/txs?offset={offset}&limit=100&account={nft_contract}")
request_success = True if data.status_code == 200 else False
if request_success == False:
sleep(20)
print(data.text)
print("retrying request in 20 secs")
x = data.json()
for tx in x['txs']:
m_list = m_list + get_message_from_tx(tx)
if 'next' in x.keys():
print(offset)
offset = x['next']
else:
break
sleep(1)
m_list = [i for i in m_list if 'mint_nft' in i['execute_msg']]
return m_list
def generate_dataframe(message_list):
for i in message_list:
i['token_id'] = i['execute_msg']['mint_nft']['token_id']
i['name'] = i['execute_msg']['mint_nft']['extension']['name']
i['image'] = i['execute_msg']['mint_nft']['extension']['image']
i['attributes'] = i['execute_msg']['mint_nft']['extension']['attributes']
i['description'] = i['execute_msg']['mint_nft']['extension']['description']
i['token_uri'] = i['execute_msg']['mint_nft']['token_uri']
for i in message_list:
for a in i['attributes']:
i[a['trait_type']] = a['value']
df = pd.DataFrame(message_list)
df['number_attributes'] = df.notnull()[
['backgrounds','suits','species','face','hair','glasses','headware','jewelry']
].sum(axis=1)
df_lean = df[
['description','name','token_id','image','token_uri',
'backgrounds','suits','species','face','hair','glasses',
'headware','jewelry','number_attributes']
]
return df_lean
def calculate_rarity(row, attribute_types):
score = 0
for attribute in attribute_types:
score = score + 1 / row[f"{attribute}_probability"]
return score
def generate_dataframe(messages):
extensions = [x["execute_msg"]["mint_nft"]["extension"] for x in messages]
for j in extensions:
ipfs_id = j["image"].replace("ipfs://","")
j['ipfs_url'] = f"https://cf-ipfs.com/ipfs/{ipfs_id}"
for a in j["attributes"]:
j[a["trait_type"]] = a["value"]
df = pd.DataFrame(extensions)
attribute_types = [x["trait_type"] for x in df["attributes"][0]]
attribute_prob_df_dict = {}
for attribute in attribute_types:
attribute_df = df[attribute].value_counts(dropna=False).rename_axis(attribute).reset_index(name=f'{attribute}_count')
attribute_df[f'{attribute}_probability'] = attribute_df[f'{attribute}_count'] / attribute_df[f'{attribute}_count'].sum()
attribute_prob_df_dict[attribute] = attribute_df
for attribute in attribute_types:
df = df.merge(attribute_prob_df_dict[attribute],how="left",on=attribute)
df["rarity_score"] = df.apply(lambda x : calculate_rarity(x,attribute_types),axis=1)
df = df.sort_values("rarity_score",ascending=False)
df["rarity_rankings"] = range(1,len(df)+1)
return df
messages = get_all_messages(NFT_CONTRACT,offset=OFFSET)
df = generate_dataframe(messages)
df.to_csv(f"{NFT_CONTRACT}.csv",index=None)
|
Galactic Punks NFTs.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.9 (tensorflow)
# language: python
# name: tensorflow
# ---
# + [markdown] id="pQwmBz1GumJR"
# <a href="https://colab.research.google.com/github/jeffheaton/t81_558_deep_learning/blob/master/t81_558_class_09_3_transfer_nlp.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
#
# + [markdown] id="KBQzVEtJumJT"
# # T81-558: Applications of Deep Neural Networks
# **Module 9: Transfer Learning**
# * Instructor: [<NAME>](https://sites.wustl.edu/jeffheaton/), McKelvey School of Engineering, [Washington University in St. Louis](https://engineering.wustl.edu/Programs/Pages/default.aspx)
# * For more information visit the [class website](https://sites.wustl.edu/jeffheaton/t81-558/).
# + [markdown] id="Cwz1jhRaumJU"
# # Module 9 Material
#
# * Part 9.1: Introduction to Keras Transfer Learning [[Video]](https://www.youtube.com/watch?v=AtoeoNwmd7w&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](https://github.com/jeffheaton/t81_558_deep_learning/blob/master/t81_558_class_09_1_keras_transfer.ipynb)
# * Part 9.2: Keras Transfer Learning for Computer Vision [[Video]](https://www.youtube.com/watch?v=nXcz0V5SfYw&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](https://github.com/jeffheaton/t81_558_deep_learning/blob/master/t81_558_class_09_2_keras_xfer_cv.ipynb)
# * **Part 9.3: Transfer Learning for NLP with Keras** [[Video]](https://www.youtube.com/watch?v=PyRsjwLHgAU&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](https://github.com/jeffheaton/t81_558_deep_learning/blob/master/t81_558_class_09_3_transfer_nlp.ipynb)
# * Part 9.4: Transfer Learning for Facial Feature Recognition [[Video]](https://www.youtube.com/watch?v=uUZg33DfCls&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](https://github.com/jeffheaton/t81_558_deep_learning/blob/master/t81_558_class_09_4_facial_points.ipynb)
# * Part 9.5: Transfer Learning for Style Transfer [[Video]](https://www.youtube.com/watch?v=pLWIaQwkJwU&list=PLjy4p-07OYzulelvJ5KVaT2pDlxivl_BN) [[Notebook]](https://github.com/jeffheaton/t81_558_deep_learning/blob/master/t81_558_class_09_5_style_transfer.ipynb)
# + [markdown] id="WPEQF9auumJU"
# # Google CoLab Instructions
#
# The following code ensures that Google CoLab is running the correct version of TensorFlow.
# + colab={"base_uri": "https://localhost:8080/"} id="rUpegFqQumJV" outputId="213c9008-d443-4819-dfc6-f26be8c665f9"
try:
# %tensorflow_version 2.x
COLAB = True
print("Note: using Google CoLab")
except:
print("Note: not using Google CoLab")
COLAB = False
# + [markdown] id="KtQUhDWNumJW"
# # Part 9.3: Transfer Learning for NLP with Keras
#
# You will commonly use transfer learning with Natural Language Processing (NLP). Word embeddings are a common means of transfer learning in NLP where network layers map words to vectors. Third parties trained neural networks on a large corpus of text to learn these embeddings. We will use these vectors as the input to the neural network rather than the actual characters of words.
#
# This course has an entire module covering NLP; however, we use word embeddings to perform sentiment analysis in this module. We will specifically attempt to classify if a text sample is speaking in a positive or negative tone.
#
# The following three sources were helpful for the creation of this section.
#
# * Universal sentence encoder [[Cite:cer2018universal]](https://arxiv.org/abs/1803.11175). arXiv preprint arXiv:1803.11175)
# * Deep Transfer Learning for Natural Language Processing: Text Classification with Universal Embeddings [[Cite:howard2018universal]](https://towardsdatascience.com/deep-transfer-learning-for-natural-language-processing-text-classification-with-universal-1a2c69e5baa9)
# * [Keras Tutorial: How to Use Google's Universal Sentence Encoder for Spam Classification](http://hunterheidenreich.com/blog/google-universal-sentence-encoder-in-keras/)
#
# These examples use TensorFlow Hub, which allows pretrained models to be loaded into TensorFlow easily. To install TensorHub use the following commands.
# + colab={"base_uri": "https://localhost:8080/"} id="eMp-8-OIumJW" outputId="e97f541b-b98e-455c-e735-bf988555ad95"
# HIDE OUTPUT
# !pip install tensorflow_hub
# + [markdown] id="1jSQ2owPumJX"
# It is also necessary to install TensorFlow Datasets, which you can install with the following command.
# + colab={"base_uri": "https://localhost:8080/"} id="Z56Ce_B7umJX" outputId="a3fe3a71-8ed3-45ec-e60e-918e5bb38a1b"
# HIDE OUTPUT
# !pip install tensorflow_datasets
# + [markdown] id="0UgVyHXxumJY"
# Movie reviews are a good source of training data for sentiment analysis. These reviews are textual, and users give them a star rating which indicates if the viewer had a positive or negative experience with the movie. Load the Internet Movie DataBase (IMDB) reviews data set. This example is based on a TensorFlow example that you can [find here](https://colab.research.google.com/github/tensorflow/hub/blob/master/examples/colab/tf2_text_classification.ipynb#scrollTo=2ew7HTbPpCJH).
# + colab={"base_uri": "https://localhost:8080/", "height": 440, "referenced_widgets": ["342c8ecc48764a19a7d84fca7cfd8b0c", "c187a8615f9f482d8c4ba206ba1a9634", "98cd785afad840fd8cbbf73c58bb6951", "4bf007c0b80e4c7f8139bdcf3009b26b", "6543a3160a9c4980a9cd2ce87e38010b", "799f38528b7b4236b6624591ed66002c", "c88defd020874e348f87c75bc649c291", "<KEY>", "b96b9d778cee45699e570583eb38e17f", "<KEY>", "f702e622ee004e9591ac90ef231a1533", "<KEY>", "d665aedd6a1c4e12a312ebadf482dc00", "<KEY>", "<KEY>", "8bf75a35229f48bea7786ce024330d44", "<KEY>", "07713e5d0b1b445fb54ec157515290e5", "<KEY>", "<KEY>", "<KEY>", "59fd2e3ce54f42dd97aa95044bc99609", "78c16501daf448f68db6acd6ac22fe44", "9adf83e3d28f457ebaeac6f74a817585", "e1b7df5aabda4edb86db4a69db783dee", "30ad7760769f4042a00c07ceeae9f89c", "<KEY>", "<KEY>", "eb956c443ed84f5199572acab1a73b74", "8a0cb9329d39408b833344e751f5a762", "<KEY>", "265c02017e2046bba839f947d5f20b25", "<KEY>", "4ddcac6e75a7443396149c931ce4f3b3", "<KEY>", "87d16d7a0daa4fae837deb6b81da28e4", "fca2e9f85258474a8184ee19038dbce1", "3d19b848e4a54a198db10c2000748513", "<KEY>", "<KEY>", "8e83f3c6a0624551b9209b0ef0a5376d", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "bb9f2e95f6a24860b56544727377a981", "ebe04996d21b48f6aba2e4e178121c5b", "<KEY>", "e52b23daab5c4a289e37a7d07ce10125", "<KEY>", "e71f9481469f45d4a578f1ddf671551c", "da1193435b104e139b8de27384ab5a07", "3779841513a8455da892b6f396e7025c", "<KEY>", "b31589e904ba48e3868d59795e28f7ff", "<KEY>", "b0311cb873e64621ac45d6a49cc3a925", "<KEY>", "98f8cc568f544a579918de572117f58d", "<KEY>", "ad3b2411e5ff405696a38013c4a10f9e", "a87fe8137d4e4eca8c0ebb71ac861292", "<KEY>", "<KEY>", "0bd507d7c5314685a633b0d943b69ef9", "<KEY>", "279c293b01a846118b31684e451c3499", "<KEY>", "f032e8ac2dee4378a70a31a7c3b3a9f7", "<KEY>", "<KEY>", "f72d5e4b2ff24d0a808c52be57a78b93", "1a236aef271f4dda9cf63c01461affe9", "<KEY>", "a95f333d2866430a8eb5618b379ced7e", "4098c7a311b6424e84d9cd4ad30f51fe", "<KEY>", "624ebbef587d47a7916432035e7666b6", "511354020ece429baf3b9c3f0be0d573", "<KEY>", "8b80de00f12549dd8e195db716240a70", "<KEY>", "700fbe90ea7f4e028e1d0698ecdb560c", "<KEY>", "9793d24e39a04dcc8c43df759ef2a69f", "<KEY>", "e29ed19ced0946658859bf32d2b9a25e", "<KEY>"]} id="NrZOixq-umJY" outputId="ac74b049-3050-4dc5-b491-69bc23dce46f"
# HIDE OUTPUT
import tensorflow as tf
import tensorflow_hub as hub
import tensorflow_datasets as tfds
train_data, test_data = tfds.load(name="imdb_reviews",
split=["train", "test"],
batch_size=-1, as_supervised=True)
train_examples, train_labels = tfds.as_numpy(train_data)
test_examples, test_labels = tfds.as_numpy(test_data)
# /Users/jheaton/tensorflow_datasets/imdb_reviews/plain_text/0.1.0
# + [markdown] id="B95Ek4cIumJZ"
# Load a pretrained embedding model called [gnews-swivel-20dim](https://tfhub.dev/google/tf2-preview/gnews-swivel-20dim/1). Google trained this network on GNEWS data and can convert raw text into vectors.
# + id="6_2X_-SlumJZ"
model = "https://tfhub.dev/google/tf2-preview/gnews-swivel-20dim/1"
hub_layer = hub.KerasLayer(model, output_shape=[20], input_shape=[],
dtype=tf.string, trainable=True)
# + [markdown] id="vPa3iHUoumJZ"
# The following code displays three movie reviews. This display allows you to see the actual data.
# + colab={"base_uri": "https://localhost:8080/"} id="HSIAeshoumJZ" outputId="646ec7e4-db4e-465b-94ab-9e03b5485493"
train_examples[:3]
# + [markdown] id="lK53sJ5iumJZ"
# The embedding layer can convert each to 20-number vectors, which the neural network receives as input in place of the actual words.
# + colab={"base_uri": "https://localhost:8080/"} id="6kr2SLKSumJa" outputId="e3b3671d-e40c-4679-8554-65866a1fa862"
hub_layer(train_examples[:3])
# + [markdown] id="fH8nB4z5umJa"
# We add additional layers to classify the movie reviews as either positive or negative.
# + colab={"base_uri": "https://localhost:8080/"} id="ezL-ysehumJa" outputId="6e955403-426a-41f4-a817-b29f62b17ef2"
model = tf.keras.Sequential()
model.add(hub_layer)
model.add(tf.keras.layers.Dense(16, activation='relu'))
model.add(tf.keras.layers.Dense(1, activation='sigmoid'))
model.summary()
# + [markdown] id="3z685otDumJa"
# We are now ready to compile the neural network. For this application, we use the adam training method for binary classification. We also save the initial random weights for later to start over easily.
# + id="6ACYutHrumJa"
model.compile(optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy'])
init_weights = model.get_weights()
# + [markdown] id="G4rLPk0QumJa"
# Before fitting, we split the training data into the train and validation sets.
# + id="aZLYInpaumJa"
x_val = train_examples[:10000]
partial_x_train = train_examples[10000:]
y_val = train_labels[:10000]
partial_y_train = train_labels[10000:]
# + [markdown] id="pY3B9wM-oruO"
# We can now fit the neural network. This fitting will run for 40 epochs and allow us to evaluate the effectiveness of the neural network, as measured by the training set.
# + colab={"base_uri": "https://localhost:8080/"} id="EqDAQls1umJb" outputId="cdb29323-1fef-44ae-edff-34d98dd23613"
history = model.fit(partial_x_train,
partial_y_train,
epochs=40,
batch_size=512,
validation_data=(x_val, y_val),
verbose=1)
# + [markdown] id="C8cjFPa1uhMA"
# ## Benefits of Early Stopping
# While we used a validation set, we fit the neural network without early stopping. This dataset is complex enough to allow us to see the benefit of early stopping. We will examine how accuracy and loss progressed for training and validation sets. Loss measures the degree to which the neural network was confident in incorrect answers. Accuracy is the percentage of correct classifications, regardless of the neural network's confidence.
#
# We begin by looking at the loss as we fit the neural network.
# + colab={"base_uri": "https://localhost:8080/", "height": 295} id="sDBXJsl5umJb" outputId="239a4a13-6564-406b-8e25-104c84dd8446"
# %matplotlib inline
import matplotlib.pyplot as plt
history_dict = history.history
acc = history_dict['accuracy']
val_acc = history_dict['val_accuracy']
loss = history_dict['loss']
val_loss = history_dict['val_loss']
epochs = range(1, len(acc) + 1)
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
# + [markdown] id="j2cQ3mocxVgp"
# We can see that training and validation loss are similar early in the fitting. However, as fitting continues and overfitting sets in, training and validation loss diverge from each other. Training loss continues to fall consistently. However, once overfitting happens, the validation loss no longer falls and eventually begins to increase a bit. Early stopping, which we saw earlier in this course, can prevent some overfitting.
# + colab={"base_uri": "https://localhost:8080/", "height": 295} id="hU02g4gjumJb" outputId="125c6e89-6571-477a-94e5-25173bcf8380"
plt.clf() # clear figure
plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
plt.show()
# + [markdown] id="BagkzLu4yxyC"
# The accuracy graph tells a similar story. Now let's repeat the fitting with early stopping. We begin by creating an early stopping monitor and restoring the network's weights to random. Once this is complete, we can fit the neural network with the early stopping monitor enabled.
# + colab={"base_uri": "https://localhost:8080/"} id="Kh4uYTcwzEtG" outputId="15072a6d-b941-452b-db44-c17c78eb668d"
from tensorflow.keras.callbacks import EarlyStopping
monitor = EarlyStopping(monitor='val_loss', min_delta=1e-3,
patience=5, verbose=1, mode='auto',
restore_best_weights=True)
model.set_weights(init_weights)
history = model.fit(partial_x_train,
partial_y_train,
epochs=40,
batch_size=512,
callbacks=[monitor],
validation_data=(x_val, y_val),
verbose=1)
# + [markdown] id="pQsyC5VHmfuX"
# The training history chart is now shorter because we stopped earlier.
# + colab={"base_uri": "https://localhost:8080/", "height": 295} id="hyQUkk1kz22U" outputId="d0651549-b9b0-4d67-e917-27a08e509a41"
history_dict = history.history
acc = history_dict['accuracy']
val_acc = history_dict['val_accuracy']
loss = history_dict['loss']
val_loss = history_dict['val_loss']
epochs = range(1, len(acc) + 1)
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
# + [markdown] id="U8CoFEPUmpPO"
# Finally, we evaluate the accuracy for the best neural network before early stopping occured.
# + colab={"base_uri": "https://localhost:8080/"} id="ccmjDt8KkBgF" outputId="58cc12d6-0016-447c-80c7-e3185f601618"
from sklearn.metrics import accuracy_score
import numpy as np
pred = model.predict(x_val)
# Use 0.5 as the threshold
predict_classes = pred.flatten()>0.5
correct = accuracy_score(y_val,predict_classes)
print(f"Accuracy: {correct}")
|
t81_558_class_09_3_transfer_nlp.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
print("How old are you?", end=' ')
age = input()
print("How tall are you?", end=' ')
height = input()
print("How much do you weigh?", end=' ')
weight = input()
print(f"So, you're {age} old, {height} tall and {weight} heavy.")
# -
|
E11.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Jupyter Notebook for Counting Building Occupancy from Polaris Traffic Simulation Data
# This notebook will load a Polaris SQLlite data file into a Pandas data frame using sqlite3 libraries and count the average number of people in each building in each hour of the simulation.
# For help with Jupyter notebooks
#
# For help on using sql with Pandas see
# http://www.pererikstrandberg.se/blog/index.cgi?page=PythonDataAnalysisWithSqliteAndPandas
#
# For help on data analysis with Pandas see
# http://nbviewer.jupyter.org/github/jakevdp/PythonDataScienceHandbook/blob/master/notebooks/Index.ipynb
import sqlite3
import pandas as pd
# Create your connection.
cnx = sqlite3.connect('detroit-Demand.sqlite')
cnx
households = pd.read_sql_query("SELECT household, location FROM Household", cnx)
households
people = pd.read_sql_query("SELECT * FROM Person", cnx)
locations = pd.read_sql_query("SELECT * FROM All_Locations", cnx)
locations
activity = pd.read_sql_query("SELECT * FROM Activity", cnx)
people
locations
activity
households.columns
people.columns
locations.columns
households.persons
# count up all the number of people by looking at the households data - should match the number of people data
households.persons.mean()
# find the max person number, should match sum of people in households if we don't count homeless
households["persons"]
# list out the different land use types
locations.land_use.unique()
# count up the number of locations that are business
locations.land_use[locations.land_use == 'BUSINESS'].count()
# count the number of multifamiliy residential
locations.land_use[locations.land_use == 'RESIDENTIAL-MULTI'].count()
locations.land_use[locations.land_use == 'RESIDENTIAL-SINGLE'].count()
locations.land_use[locations.land_use == 'CIVIC'].count()
locations.land_use[locations.land_use == 'INDUSTRY'].count()
locations.land_use[locations.land_use == 'RECREATION'].count()
locations.land_use[locations.land_use == 'ALL'].count()
locations.land_use[locations.land_use == 'AGRICULTURE'].count()
beginning_location = pd.merge(household, person, on='household', how='left')
|
Hollowed SULI/Polaris_Building_Population_Count.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Tensor shapes in Pyro 0.2
#
# This tutorial introduces Pyro's organization of tensor dimensions. Before starting, you should familiarize yourself with [PyTorch broadcasting semantics](http://pytorch.org/docs/master/notes/broadcasting.html).
#
# #### Summary:
# - While you are learning or debugging, set `pyro.enable_validation(True)`.
# - Tensors broadcast by aligning on the right: `torch.ones(3,4,5) + torch.ones(5)`.
# - Distribution `.sample().shape == batch_shape + event_shape`.
# - Distribution `.log_prob(x).shape == batch_shape` (but not `event_shape`!).
# - Use `.expand()` or `poutine.broadcast` to draw a batch of samples.
# - Use `my_dist.independent(1)` to declare a dimension as dependent.
# - Use `with pyro.iarange('name', size):` to declare a dimension as independent.
# - All dimensions must be declared either dependent or independent.
# - Try to support batching on the left. This lets Pyro auto-parallelize.
# - use negative indices like `x.sum(-1)` rather than `x.sum(2)`
# - use ellipsis notation like `pixel = image[..., i, j]`
#
# #### Table of Contents
# - [Distribution shapes](#Distributions-shapes:-batch_shape-and-event_shape)
# - [Examples](#Examples)
# - [Reshaping distributions](#Reshaping-distributions)
# - [It is always safe to assume dependence](#It-is-always-safe-to-assume-dependence)
# - [Declaring independence with iarange](#Declaring-independent-dims-with-iarange)
# - [Subsampling inside iarange](#Subsampling-tensors-inside-an-iarange)
# - [Broadcasting to allow Parallel Enumeration](#Broadcasting-to-allow-parallel-enumeration)
# - [Writing parallelizable code](#Writing-parallelizable-code)
# - [Automatic broadcasting via broadcast poutine](#Automatic-broadcasting-via-broadcast-poutine)
# +
import os
import torch
import pyro
from torch.distributions import constraints
from pyro.distributions import Bernoulli, Categorical, MultivariateNormal, Normal
from pyro.distributions.util import broadcast_shape
from pyro.infer import Trace_ELBO, TraceEnum_ELBO, config_enumerate
import pyro.poutine as poutine
from pyro.optim import Adam
smoke_test = ('CI' in os.environ)
pyro.enable_validation(True) # <---- This is always a good idea!
# We'll ue this helper to check our models are correct.
def test_model(model, guide, loss):
pyro.clear_param_store()
loss.loss(model, guide)
# -
# ## Distributions shapes: `batch_shape` and `event_shape` <a class="anchor" id="Distributions-shapes:-batch_shape-and-event_shape"></a>
#
# PyTorch `Tensor`s have a single `.shape` attribute, but `Distribution`s have two shape attributions with special meaning: `.batch_shape` and `.event_shape`. These two combine to define the total shape of a sample
# ```py
# x = d.sample()
# assert x.shape == d.batch_shape + d.event_shape
# ```
# Indices over `.batch_shape` denote independent random variables, whereas indices over `.event_shape` denote dependent random variables. Because the dependent random variables define probability together, the `.log_prob()` method only produces a single number for each event of shape `.event_shape`. Thus the total shape of `.log_prob()` is `.batch_shape`:
# ```py
# assert d.log_prob(x).shape == d.batch_shape
# ```
# Note that the `Distribution.sample()` method also takes a `sample_shape` parameter that indexes over independent identically distributed (iid) random varables, so that
# ```py
# x2 = d.sample(sample_shape)
# assert x2.shape == sample_shape + batch_shape + event_shape
# ```
# In summary
# ```
# | iid | independent | dependent
# ------+--------------+-------------+------------
# shape = sample_shape + batch_shape + event_shape
# ```
# For example univariate distributions have empty event shape (because each number is an independent event). Distributions over vectors like `MultivariateNormal` have `len(event_shape) == 1`. Distributions over matrices like `InverseWishart` have `len(event_shape) == 2`.
#
# ### Examples <a class="anchor" id="Examples"></a>
#
# The simplest distribution shape is a single univariate distribution.
d = Bernoulli(0.5)
assert d.batch_shape == ()
assert d.event_shape == ()
x = d.sample()
assert x.shape == ()
assert d.log_prob(x).shape == ()
# Distributions can be batched by passing in batched parameters.
d = Bernoulli(0.5 * torch.ones(3,4))
assert d.batch_shape == (3, 4)
assert d.event_shape == ()
x = d.sample()
assert x.shape == (3, 4)
assert d.log_prob(x).shape == (3, 4)
# Another way to batch distributions is via the `.expand()` method. This only works if
# parameters are identical along the leftmost dimensions.
d = Bernoulli(torch.tensor([0.1, 0.2, 0.3, 0.4])).expand([3, 4])
assert d.batch_shape == (3, 4)
assert d.event_shape == ()
x = d.sample()
assert x.shape == (3, 4)
assert d.log_prob(x).shape == (3, 4)
# Multivariate distributions have nonempty `.event_shape`. For these distributions, the shapes of `.sample()` and `.log_prob(x)` differ:
d = MultivariateNormal(torch.zeros(3), torch.eye(3, 3))
assert d.batch_shape == ()
assert d.event_shape == (3,)
x = d.sample()
assert x.shape == (3,) # == batch_shape + event_shape
assert d.log_prob(x).shape == () # == batch_shape
# ### Reshaping distributions <a class="anchor" id="Reshaping-distributions"></a>
#
# In Pyro you can treat a univariate distribution as multivariate by calling the `.independent(_)` property.
d = Bernoulli(0.5 * torch.ones(3,4)).independent(1)
assert d.batch_shape == (3,)
assert d.event_shape == (4,)
x = d.sample()
assert x.shape == (3, 4)
assert d.log_prob(x).shape == (3,)
# While you work with Pyro programs, keep in mind that samples have shape `batch_shape + event_shape`, whereas `.log_prob(x)` values have shape `batch_shape`. You'll need to ensure that `batch_shape` is carefully controlled by either trimming it down with `.independent(n)` or by declaring dimensions as independent via `pyro.iarange`.
#
# ### It is always safe to assume dependence <a class="anchor" id="It-is-always-safe-to-assume-dependence"></a>
#
# Often in Pyro we'll declare some dimensions as dependent even though they are in fact independent, e.g.
# ```py
# pyro.sample("x", dist.Normal(0, 1).expand([10]).independent(1))
# ```
# This is useful for two reasons: First it allows us to easily swap in a `MultivariateNormal` distribution later. Second it simplifies the code a bit since we don't need an `iarange` (see below) as in
# ```py
# with pyro.iarange("x_iarange", 10):
# pyro.sample("x", dist.Normal(0, 1).expand([10]))
# ```
# The difference between these two versions is that the second version with `iarange` informs Pyro that it can make use of independence information when estimating gradients, whereas in the first version Pyro must assume they are dependent (even though the normals are in fact independent). This is analogous to d-separation in graphical models: it is always safe to add edges and assume variables *may* be dependent (i.e. to widen the model class), but it is unsafe to assume independence when variables are actually dependent (i.e. narrowing the model class so the true model lies outside of the class, as in mean field). In practice Pyro's SVI inference algorithm uses reparameterized gradient estimators for `Normal` distributions so both gradient estimators have the same performance.
# ## Declaring independent dims with `iarange` <a class="anchor" id="Declaring-independent-dims-with-iarange"></a>
#
# Pyro models can use the context manager [pyro.iarange](http://docs.pyro.ai/en/dev/primitives.html#pyro.iarange) to declare that certain batch dimensions are independent. Inference algorithms can then take advantage of this independence to e.g. construct lower variance gradient estimators or to enumerate in linear space rather than exponential space. An example of an independent dimension is the index over data in a minibatch: each datum should be independent of all others.
#
# The simplest way to declare a dimension as independent is to declare the rightmost batch dimension as independent via a simple
# ```py
# with pyro.iarange("my_iarange"):
# # within this context, batch dimension -1 is independent
# ```
# We recommend always providing an optional size argument to aid in debugging shapes
# ```py
# with pyro.iarange("my_iarange", len(my_data)):
# # within this context, batch dimension -1 is independent
# ```
# Starting with Pyro 0.2 you can additionally nest `iaranges`, e.g. if you have per-pixel independence:
# ```py
# with pyro.iarange("x_axis", 320):
# # within this context, batch dimension -1 is independent
# with pyro.iarange("y_axis", 200):
# # within this context, batch dimensions -2 and -1 are independent
# ```
# Note that we always count from the right by using negative indices like -2, -1.
#
# Finally if you want to mix and match `iarange`s for e.g. noise that depends only on `x`, some noise that depends only on `y`, and some noise that depends on both, you can declare multiple `iaranges` and use them as reusable context managers. In this case Pyro cannot automatically allocate a dimension, so you need to provide a `dim` argument (again counting from the right):
# ```py
# x_axis = pyro.iarange("x_axis", 3, dim=-2)
# y_axis = pyro.iarange("y_axis", 2, dim=-3)
# with x_axis:
# # within this context, batch dimension -2 is independent
# with y_axis:
# # within this context, batch dimension -3 is independent
# with x_axis, y_axis:
# # within this context, batch dimensions -3 and -2 are independent
# ```
# Let's take a closer look at batch sizes within `iarange`s. (See the [broadcasting](#Broadcasting-to-allow-parallel-enumeration) section below for explanation of `@poutine.broadcast`.)
# +
@poutine.broadcast
def model1():
a = pyro.sample("a", Normal(0, 1))
b = pyro.sample("b", Normal(torch.zeros(2), 1).independent(1))
with pyro.iarange("c_iarange", 2):
c = pyro.sample("c", Normal(torch.zeros(2), 1))
with pyro.iarange("d_iarange", 3):
d = pyro.sample("d", Normal(torch.zeros(3,4,5), 1).independent(2))
assert a.shape == () # batch_shape == () event_shape == ()
assert b.shape == (2,) # batch_shape == () event_shape == (2,)
assert c.shape == (2,) # batch_shape == (2,) event_sahpe == ()
assert d.shape == (3,4,5) # batch_shape == (3,) event_shape == (4,5)
x_axis = pyro.iarange("x_axis", 3, dim=-2)
y_axis = pyro.iarange("y_axis", 2, dim=-3)
with x_axis:
x = pyro.sample("x", Normal(0, 1))
with y_axis:
y = pyro.sample("y", Normal(0, 1))
with x_axis, y_axis:
xy = pyro.sample("xy", Normal(0, 1))
z = pyro.sample("z", Normal(0, 1).expand([5]).independent(1))
assert x.shape == (3, 1) # batch_shape == (3,1) event_shape == ()
assert y.shape == (2, 1, 1) # batch_shape == (2,1,1) event_shape == ()
assert xy.shape == (2, 3, 1) # batch_shape == (2,3,1) event_shape == ()
assert z.shape == (2, 3, 1, 5) # batch_shape == (2,3,1) event_shape == (5,)
test_model(model1, model1, Trace_ELBO())
# -
# It is helpful to visualize the `.shape`s of each sample site by aligning them at the boundary between `batch_shape` and `event_shape`: dimensions to the right will be summed out in `.log_prob()` and dimensions to the left will remain.
# ```
# batch dims | event dims
# -----------+-----------
# | a = sample("a", Normal(0, 1))
# |2 b = sample("b", Normal(zeros(2), 1)
# | .independent(1)
# | with iarange("c", 2):
# 2| c = sample("c", Normal(zeros(2), 1))
# | with iarange("d", 3):
# 3|4 5 d = sample("d", Normal(zeros(3,4,5), 1)
# | .independent(2)
# |
# | x_axis = iarange("x", 3, dim=-2)
# | y_axis = iarange("y", 2, dim=-3)
# | with x_axis:
# 3 1| x = sample("x", Normal(0, 1))
# | with y_axis:
# 2 1 1| y = sample("y", Normal(0, 1))
# | with x_axis, y_axis:
# 2 3 1| xy = sample("xy", Normal(0, 1))
# 2 3 1|5 z = sample("z", Normal(0, 1).expand([5])
# | .independent(1))
# ```
# As an exercise, try to tabulate the shapes of sample sites in one of your own programs.
# ## Subsampling tensors inside an `iarange` <a class="anchor" id="Subsampling-tensors-inside-an-iarange"></a>
#
# One of the main uses of [iarange](http://docs.pyro.ai/en/dev/primitives.html#pyro.iarange) is to subsample data. This is possible within an `iarange` because data are independent, so the expected value of the loss on, say, half the data should be half the expected loss on the full data.
#
# To subsample data, you need to inform Pyro of both the original data size and the subsample size; Pyro will then choose a random subset of data and yield the set of indices.
# +
data = torch.arange(100.)
@poutine.broadcast
def model2():
mean = pyro.param("mean", torch.zeros(len(data)))
with pyro.iarange("data", len(data), subsample_size=10) as ind:
assert len(ind) == 10 # ind is a LongTensor that indexes the subsample.
batch = data[ind] # Select a minibatch of data.
mean_batch = mean[ind] # Take care to select the relevant per-datum parameters.
# Do stuff with batch:
x = pyro.sample("x", Normal(mean_batch, 1), obs=batch)
assert len(x) == 10
test_model(model2, guide=lambda: None, loss=Trace_ELBO())
# -
# ## Broadcasting to allow parallel enumeration <a class="anchor" id="Broadcasting-to-allow-parallel-enumeration"></a>
#
# Pyro 0.2 introduces the ability to enumerate discrete latent variables in parallel. This can significantly reduce the variance of gradient estimators when learning a posterior via [SVI](http://docs.pyro.ai/en/dev/inference_algos.html#pyro.infer.svi.SVI).
#
# To use parallel enumeration, Pyro needs to allocate tensor dimension that it can use for enumeration. To avoid conflicting with other dimensions that we want to use for `iarange`s, we need to declare a budget of the maximum number of tensor dimensions we'll use. This budget is called `max_iarange_nesting` and is an argument to [SVI](http://docs.pyro.ai/en/dev/inference_algos.html) (the argument is simply passed through to [TraceEnum_ELBO](http://docs.pyro.ai/en/dev/inference_algos.html#pyro.infer.traceenum_elbo.TraceEnum_ELBO)).
#
# To understand `max_iarange_nesting` and how Pyro allocates dimensions for enumeration, let's revisit `model1()` from above. This time we'll map out three types of dimensions:
# enumeration dimensions on the left (Pyro takes control of these), batch dimensions in the middle, and event dimensions on the right.
# ```
# max_iarange_nesting = 3
# |<--->|
# enumeration|batch|event
# -----------+-----+-----
# |. . .| a = sample("a", Normal(0, 1))
# |. . .|2 b = sample("b", Normal(zeros(2), 1)
# | | .independent(1))
# | | with iarange("c", 2):
# |. . 2| c = sample("c", Normal(zeros(2), 1))
# | | with iarange("d", 3):
# |. . 3|4 5 d = sample("d", Normal(zeros(3,4,5), 1)
# | | .independent(2))
# | |
# | | x_axis = iarange("x", 3, dim=-2)
# | | y_axis = iarange("y", 2, dim=-3)
# | | with x_axis:
# |. 3 1| x = sample("x", Normal(0, 1))
# | | with y_axis:
# |2 1 1| y = sample("y", Normal(0, 1))
# | | with x_axis, y_axis:
# |2 3 1| xy = sample("xy", Normal(0, 1))
# |2 3 1|5 z = sample("z", Normal(0, 1).expand([5]))
# | | .independent(1))
# ```
# Note that it is safe to overprovision `max_iarange_nesting=4` but we cannot underprovision `max_iarange_nesting=2` (or Pyro will error). Let's see how this works in practice.
# +
@config_enumerate(default="parallel")
@poutine.broadcast
def model3():
p = pyro.param("p", torch.arange(6.) / 6)
locs = pyro.param("locs", torch.tensor([-1., 1.]))
a = pyro.sample("a", Categorical(torch.ones(6) / 6))
b = pyro.sample("b", Bernoulli(p[a])) # Note this depends on a.
with pyro.iarange("c_iarange", 4):
c = pyro.sample("c", Bernoulli(0.3))
with pyro.iarange("d_iarange", 5):
d = pyro.sample("d", Bernoulli(0.4))
e_loc = locs[d.long()].unsqueeze(-1)
e_scale = torch.arange(1., 8.)
e = pyro.sample("e", Normal(e_loc, e_scale)
.independent(1)) # Note this depends on d.
# enumerated|batch|event dims
assert a.shape == ( 6, 1, 1 ) # Six enumerated values of the Categorical.
assert b.shape == ( 2, 1, 1, 1 ) # Two enumerated Bernoullis, unexpanded.
assert c.shape == ( 2, 1, 1, 1, 1 ) # Only two Bernoullis, unexpanded.
assert d.shape == (2, 1, 1, 1, 1, 1 ) # Only two Bernoullis, unexpanded.
assert e.shape == (2, 1, 1, 1, 5, 4, 7) # This is sampled and depends on d.
assert e_loc.shape == (2, 1, 1, 1, 1, 1, 1,)
assert e_scale.shape == ( 7,)
test_model(model3, model3, TraceEnum_ELBO(max_iarange_nesting=2))
# -
# Let's take a closer look at those dimensions. First note that Pyro allocates enumeration dims starting from the right at `max_iarange_nesting`: Pyro allocates dim -3 to enumerate `a`, then dim -4 to enumerate `b`, then dim -5 to enumerate `c`, and finally dim -6 to enumerate `d`. Next note that samples only have extent (size > 1) in the new enumeration dimension. This helps keep tensors small and computation cheap. (Note that the `log_prob` shape will be broadcast up to contain both enumeratin shape and batch shape, so e.g. `trace.nodes['d']['log_prob'].shape == (2, 1, 1, 1, 5, 4)`.)
#
# We can draw a similar map of the tensor dimensions:
# ```
# max_iarange_nesting = 2
# |<->|
# enumeration batch event
# ------------|---|-----
# 6|1 1| a = pyro.sample("a", Categorical(torch.ones(6) / 6))
# 2 1|1 1| b = pyro.sample("b", Bernoulli(p[a]))
# | | with pyro.iarange("c_iarange", 4):
# 2 1 1|1 1| c = pyro.sample("c", Bernoulli(0.3))
# | | with pyro.iarange("d_iarange", 5):
# 2 1 1 1|1 1| d = pyro.sample("d", Bernoulli(0.4))
# 2 1 1 1|1 1|1 e_loc = locs[d.long()].unsqueeze(-1)
# | |7 e_scale = torch.arange(1., 8.)
# 2 1 1 1|5 4|7 e = pyro.sample("e", Normal(e_loc, e_scale)
# | | .independent(1))
# ```
#
# ### Writing parallelizable code <a class="anchor" id="Writing-parallelizable-code"></a>
#
# It can be tricky to write Pyro models that correctly handle parallelized sample sites. Two tricks help: [broadcasting](http://pytorch.org/docs/master/notes/broadcasting.html) and [ellipsis slicing](http://python-reference.readthedocs.io/en/latest/docs/brackets/ellipsis.html). Let's look at a contrived model to see how these work in practice. Our aim is to write a model that works both with and without enumeration.
# +
width = 8
height = 10
sparse_pixels = torch.LongTensor([[3, 2], [3, 5], [3, 9], [7, 1]])
enumerated = None # set to either True or False below
@poutine.broadcast
def fun(observe):
p_x = pyro.param("p_x", torch.tensor(0.1), constraint=constraints.unit_interval)
p_y = pyro.param("p_y", torch.tensor(0.1), constraint=constraints.unit_interval)
x_axis = pyro.iarange('x_axis', width, dim=-2)
y_axis = pyro.iarange('y_axis', height, dim=-1)
# Note that the shapes of these sites depend on whether Pyro is enumerating.
with x_axis:
x_active = pyro.sample("x_active", Bernoulli(p_x))
with y_axis:
y_active = pyro.sample("y_active", Bernoulli(p_y))
if enumerated:
assert x_active.shape == (2, 1, 1)
assert y_active.shape == (2, 1, 1, 1)
else:
assert x_active.shape == (width, 1)
assert y_active.shape == (height,)
# The first trick is to broadcast. This works with or without enumeration.
p = 0.1 + 0.5 * x_active * y_active
if enumerated:
assert p.shape == (2, 2, 1, 1)
else:
assert p.shape == (width, height)
dense_pixels = p.new_zeros(broadcast_shape(p.shape, (width, height)))
# The second trick is to index using ellipsis slicing.
# This allows Pyro to add arbitrary dimensions on the left.
for x, y in sparse_pixels:
dense_pixels[..., x, y] = 1
if enumerated:
assert dense_pixels.shape == (2, 2, width, height)
else:
assert dense_pixels.shape == (width, height)
with x_axis, y_axis:
if observe:
pyro.sample("pixels", Bernoulli(p), obs=dense_pixels)
def model4():
fun(observe=True)
@config_enumerate(default="parallel")
def guide4():
fun(observe=False)
# Test without enumeration.
enumerated = False
test_model(model4, guide4, Trace_ELBO())
# Test with enumeration.
enumerated = True
test_model(model4, guide4, TraceEnum_ELBO(max_iarange_nesting=2))
# -
# ### Automatic broadcasting via broadcast poutine<a class="anchor" id="Automatic-broadcasting-via-broadcast-poutine"></a>
#
# Note that in all our model/guide specifications, we hve relied on [poutine.broadcast](http://docs.pyro.ai/en/latest/poutine.html#pyro.poutine.broadcast) to automatically expand sample shapes to satisfy the constraints on batch shape enforced by `pyro.iarange` statements. However `poutine.broadcast` is equivalent to hand-annotated `.expand()` statements.
#
# We will demonstrate this using `model4` from the [previous section](#Writing-parallelizable-code). Note the following changes to the code from earlier:
#
# - For the purpose of this example, we will only consider "parallel" enumeration, but broadcasting should work as expected without enumeration or with "sequential" enumeration.
# - We have separated out the sampling function which returns the tensors corresponding to the active pixels. Modularizing the model code into components is a common practice, and helps with maintainability of large models. The first sampling function is identical to what we had in `model4`, and the remaining sampling functions use `poutine.broadcast` to implicitly expand sample sites to confirm to the shape requirements imposed by the `iarange` contexts in which they are embedded.
# - We would also like to use the `pyro.iarange` construct to parallelize the ELBO estimator over [num_particles](http://docs.pyro.ai/en/latest/inference_algos.html#pyro.infer.elbo.ELBO). This is done by wrapping the contents of model/guide inside an outermost `pyro.iarange` context.
# +
num_particles = 100 # Number of samples for the ELBO estimator
width = 8
height = 10
sparse_pixels = torch.LongTensor([[3, 2], [3, 5], [3, 9], [7, 1]])
def sample_pixel_locations_no_broadcasting(p_x, p_y, x_axis, y_axis):
with x_axis:
x_active = pyro.sample("x_active", Bernoulli(p_x).expand([num_particles, width, 1]))
with y_axis:
y_active = pyro.sample("y_active", Bernoulli(p_y).expand([num_particles, 1, height]))
return x_active, y_active
def sample_pixel_locations_automatic_broadcasting(p_x, p_y, x_axis, y_axis):
with x_axis:
x_active = pyro.sample("x_active", Bernoulli(p_x))
with y_axis:
y_active = pyro.sample("y_active", Bernoulli(p_y))
return x_active, y_active
def sample_pixel_locations_partial_broadcasting(p_x, p_y, x_axis, y_axis):
with x_axis:
x_active = pyro.sample("x_active", Bernoulli(p_x).expand([width, 1]))
with y_axis:
y_active = pyro.sample("y_active", Bernoulli(p_y).expand([height]))
return x_active, y_active
def fun(observe, sample_fn):
p_x = pyro.param("p_x", torch.tensor(0.1), constraint=constraints.unit_interval)
p_y = pyro.param("p_y", torch.tensor(0.1), constraint=constraints.unit_interval)
x_axis = pyro.iarange('x_axis', width, dim=-2)
y_axis = pyro.iarange('y_axis', height, dim=-1)
with pyro.iarange("num_particles", 100, dim=-3):
x_active, y_active = sample_fn(p_x, p_y, x_axis, y_axis)
# Indices corresponding to "parallel" enumeration are appended
# to the left of the "num_particles" iarange dim.
assert x_active.shape == (2, 1, 1, 1)
assert y_active.shape == (2, 1, 1, 1, 1)
p = 0.1 + 0.5 * x_active * y_active
assert p.shape == (2, 2, 1, 1, 1)
dense_pixels = p.new_zeros(broadcast_shape(p.shape, (width, height)))
for x, y in sparse_pixels:
dense_pixels[..., x, y] = 1
assert dense_pixels.shape == (2, 2, 1, width, height)
with x_axis, y_axis:
if observe:
pyro.sample("pixels", Bernoulli(p), obs=dense_pixels)
def test_model_with_sample_fn(sample_fn, broadcast=False):
def model():
fun(observe=True, sample_fn=sample_fn)
@config_enumerate(default="parallel")
def guide():
fun(observe=False, sample_fn=sample_fn)
if broadcast:
model = poutine.broadcast(model)
guide = poutine.broadcast(guide)
test_model(model, guide, TraceEnum_ELBO(max_iarange_nesting=3))
test_model_with_sample_fn(sample_pixel_locations_no_broadcasting)
test_model_with_sample_fn(sample_pixel_locations_automatic_broadcasting, broadcast=True)
test_model_with_sample_fn(sample_pixel_locations_partial_broadcasting, broadcast=True)
# -
# In the first sampling function, we had to do some manual book-keeping and expand the `Bernoulli` distribution's batch shape to account for the independent dimensions added by the `pyro.iarange` contexts. In particular, note how `sample_pixel_locations` needs knowledge of `num_particles`, `width` and `height` and is accessing these variables from the global scope, which is not ideal.
#
# The next two sampling functions are annotated with [poutine.broadcast](http://docs.pyro.ai/en/latest/poutine.html#pyro.poutine.broadcast), so that this can be automatically achieved via an effect handler. Note the following in the next two modified sampling functions:
#
# - The second argument to `pyro.iarange`, i.e. the optional `size` argument needs to be provided for implicit broadasting, so that `poutine.broadcast` can infer the batch shape requirement for each of the sample sites.
# - The existing `batch_shape` of the sample site must be broadcastable with the size of the `pyro.iarange` contexts. In our particular example, `Bernoulli(p_x)` has an empty batch shape which is universally broadcastable.
# - `poutine.broadcast` is idempotent, and is also safe to use when the sample sites have been partially broadcasted to the size of some of the `iarange`s but not all. In the third sampling function, the user has partially expanded `x_active` and `y_active`, and the broadcast effect handler expands the other batch dimensions to the size of remaining `iarange`s.
#
# Note how simple it is to achieve parallelization via tensorized operations using `pyro.iarange` and `poutine.broadcast`! `poutine.broadcast` also helps in code modularization because model components can be written agnostic of the `iarange` contexts in which they may subsequently get embedded in.
|
tutorial/source/tensor_shapes.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="y7BAlRdOa3uk" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="bd569ad8-1e3d-4d7e-f394-106a22dcbbab"
from sklearn import datasets
import numpy as np
iris = datasets.load_iris()
X = iris.data[:,[2, 3]]
y = iris.target
print('클래스 레이블:', np.unique(y))
# + id="I1hMrSVqZXa6" colab_type="code" colab={}
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=1, stratify=y)
# + id="MRum8Z-Pbjpr" colab_type="code" colab={}
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
sc.fit(X_train)
X_train_std = sc.transform(X_train)
X_test_std = sc.transform(X_test)
# + id="8sSMkqkOarlM" colab_type="code" colab={}
from matplotlib.colors import ListedColormap
import matplotlib.pyplot as plt
def plot_decision_regions(X, y, classifier, test_idx=None, resolution=0.02):
# set marker * colors
markers = ('s', 'x', 'o', '^', 'v')
colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')
cmap = ListedColormap(colors[:len(np.unique(y))])
# 결정경계를 그린다.
x1_min, x1_max = X[:, 0].min() -1, X[:, 0].max() + 1
x2_min, x2_max = X[:, 1].min() -1, X[:, 1].max() + 1
xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution),
np.arange(x2_min, x2_max, resolution))
Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)
Z = Z.reshape(xx1.shape)
plt.contourf(xx1, xx2, Z, alpha=0.3, cmap=cmap)
plt.xlim(xx1.min(), xx1.max())
plt.ylim(xx2.min(), xx2.max())
for idx, cl in enumerate(np.unique(y)):
plt.scatter(x=X[y == cl, 0], y=X[y == cl, 1], alpha=0.3, c=colors[idx], marker=markers[idx], label=cl, edgecolor='black')
if test_idx:
X_test, y_test = X[test_idx, :], y[test_idx]
plt.scatter(X_test[:, 0], X_test[:, 1], c='', edgecolor='black', alpha=1.0, linewidth=1, marker='o', s=100, label='test set')
# + id="wxZORAv-Z3dV" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 333} outputId="444cd72a-7385-4b4b-c8a6-65605a34e324"
from sklearn.linear_model import LogisticRegression
lr = LogisticRegression(solver='liblinear', multi_class='auto', C=100.0, random_state=1)
lr.fit(X_train_std, y_train)
X_combined_std = np.vstack((X_train_std, X_test_std))
y_combined = np.hstack((y_train, y_test))
plot_decision_regions(X=X_combined_std, y=y_combined, classifier=lr, test_idx=range(105, 150))
plt.xlabel('petal length')
plt.ylabel('petal width')
# + id="XB1nueqRa6Dd" colab_type="code" colab={}
|
3/3_3_4.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: env
# language: python
# name: env
# ---
# # Part 01
#
# The data is leaded from url and is explored to select the appropriate variables.
import tensorflow as tf
tf.__version__
import sys
print(sys.executable)
# +
# # !/Users/amin/Desktop/proj/env/bin/python -m pip install matplotlib
# -
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import keras
import pandas as pd
import numpy as np
import seaborn as sns
# +
from zipfile import ZipFile
import os
uri = "https://storage.googleapis.com/tensorflow/tf-keras-datasets/jena_climate_2009_2016.csv.zip"
zip_path = keras.utils.get_file(origin=uri,
fname="jena_climate_2009_2016.csv.zip")
zip_file = ZipFile(zip_path)
zip_file.extractall()
# -
# save the data frame into the Data folder
# +
import shutil
# determine the src address and dist address
src_dir = "../Code/jena_climate_2009_2016.csv"
dst_dir = "../Data/jena_climate_2009_2016.csv"
# # copy img from src to dist
shutil.copy(src_dir, dst_dir)
# +
# # !mv jena_climate_2009_2016.csv.zip ~/Data/jena_climate_2009_2016.csv.zip
# -
csv_path = "../Data/jena_climate_2009_2016.csv"
df = pd.read_csv(csv_path)
df.head()
df.columns
df.columns.size
|
Code/P01_01_Data.ipynb
|
# ---
# jupyter:
# jupytext:
# split_at_heading: true
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/JacopoMangiavacchi/FastLabeling/blob/master/mnist_odd_even_transfer_learning.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="BRD3vJhViqiN" colab_type="text"
# # Import FastAI and MNIST dataset
# + colab_type="code" id="sAtalHAL-NnO" colab={"base_uri": "https://localhost:8080/", "height": 884} outputId="cb42db56-11bf-4251-9d18-466a89751c8b"
# !pip install fastai -U
# + colab_type="code" id="JlRcnsN--NnV" colab={}
from fastai.vision.all import *
matplotlib.rc('image', cmap='Greys')
# + colab_type="code" id="8sYFk9VL-Nna" colab={"base_uri": "https://localhost:8080/", "height": 17} outputId="96a17ff5-2d06-4eeb-8244-ce0603706c30"
path = untar_data(URLs.MNIST)
Path.BASE_PATH = path
# + [markdown] id="4GDoyHFjihoo" colab_type="text"
# # Utility Function to create subset copies of the MNIST dataset with different labels
# + colab_type="code" id="6BTNAmZ9UghS" colab={}
from shutil import copy2, rmtree
def copy_files(path, source_folder, destination_folder, sub_folders_list = [], random_select = False, max_files_to_copy = 0):
def copy_folder_path(source_path, destination_path):
source_files = source_path.ls()
list_files = list(range(len(source_files)))
if random_select:
random.shuffle(list_files)
count = 0
for i in list_files:
copy2(source_files[i], destination_path)
count += 1
if max_files_to_copy > 0 and count >= max_files_to_copy:
break
(path/destination_folder).mkdir(exist_ok=True)
if len(sub_folders_list) > 0:
for i in sub_folders_list:
folder = source_folder + '/' + str(i)
copy_folder_path(path/folder, path/destination_folder)
else:
copy_folder_path(path/source_folder, path/destination_folder)
# + [markdown] id="XLHh9tM3ihAV" colab_type="text"
# # Prepare DataLoaders from transformed MNSIT with ODD and EVEN Labels instead of DIGITS
# + colab_type="code" id="0YFUdt-BUfoG" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="eda89c88-7731-4486-8668-4bd70d85ee79"
rmtree(path/'oe_training', ignore_errors=true)
rmtree(path/'oe_testing', ignore_errors=true)
(path/'oe_training').mkdir(exist_ok=True)
(path/'oe_testing').mkdir(exist_ok=True)
path.ls()
# + colab_type="code" id="XMwcOJHcqwBy" colab={}
copy_files(path, 'training', 'oe_training/odd', [0, 2, 4, 6, 8])
copy_files(path, 'training', 'oe_training/even', [1, 3, 5, 7, 9])
copy_files(path, 'testing', 'oe_testing/odd', [0, 2, 4, 6, 8])
copy_files(path, 'testing', 'oe_testing/even', [1, 3, 5, 7, 9])
# + colab_type="code" id="6ZWtOZHfUgqv" colab={"base_uri": "https://localhost:8080/", "height": 105} outputId="43711d9c-af8b-4c52-f10d-59b028472b3f"
(path/'oe_training/odd').ls(), (path/'oe_training/even').ls(), (path/'oe_testing/odd').ls(), (path/'oe_testing/even').ls()
# + colab_type="code" id="FocObiS7-Nnp" colab={"base_uri": "https://localhost:8080/", "height": 71} outputId="33b11cd1-0714-47d4-c71c-9d573673bde7"
even_odd_dls = ImageDataLoaders.from_folder(path, train='oe_training', valid='oe_testing')
even_odd_dls.train_ds, even_odd_dls.valid_ds
# + [markdown] id="SGcmjdyLidOD" colab_type="text"
# # Train Full MNIST Dataset on ResNet architecture with modified labels ODD and EVEN
# + colab_type="code" id="j4MFv4TJ-Nnt" colab={"base_uri": "https://localhost:8080/", "height": 271, "referenced_widgets": ["2ce50dd483304248854bbc9a6ac28aaa", "150604f5a5694bfa8885f774dae47664", "<KEY>", "<KEY>", "<KEY>", "dab809d0fead4142bdd1a915b12d07a7", "1ede3dac400e493e804042647d13a8a5", "9e0d6a4fb17f4a0a90ec747027e804de"]} outputId="271fc61c-7196-42b5-a0eb-273196796254"
learn = cnn_learner(even_odd_dls, resnet34, metrics=accuracy)
learn.fine_tune(3)
# + [markdown] id="mVXQwYGNibJn" colab_type="text"
# # Prepare DataLoaders for a small subset of the MNIST Dataset with real digit labels for only 60 samples per class (2 order of magnitute smaller)
# + colab_type="code" id="jQNHvlCILj2B" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="12d1a371-6c55-4af0-8b49-26ba6870928c"
size_traning = 60
size_testing = 10
size_training_name = str(size_traning) + '_training'
size_testing_name = str(size_testing) + '_testing'
rmtree(path/size_training_name, ignore_errors=true)
rmtree(path/size_testing_name, ignore_errors=true)
(path/size_training_name).mkdir(exist_ok=True)
(path/size_testing_name).mkdir(exist_ok=True)
path.ls()
# + colab_type="code" id="23StteY5LkPw" colab={}
for i in range(10):
copy_files(path, 'training/' + str(i), size_training_name + '/' + str(i), random_select=True, max_files_to_copy=size_traning)
copy_files(path, 'testing/' + str(i), size_testing_name + '/' + str(i), random_select=True, max_files_to_copy=size_testing)
# + colab_type="code" id="mkoTV7n2wa-p" colab={"base_uri": "https://localhost:8080/", "height": 71} outputId="074b3e4c-1027-451c-e195-05e240aaf0ef"
subset_dls = ImageDataLoaders.from_folder(path, train=size_training_name, valid=size_testing_name)
subset_dls.train_ds, subset_dls.valid_ds
# + [markdown] id="YDw-4QMhiZHO" colab_type="text"
# # Transfer Learning reusing the ResNet model previosly trained on EVEN / ODD labels and change last Linear learner to fine-tune on the 10 digit labels and train on the new small subset dataset
# + colab_type="code" id="NsIgVY4nBh_Q" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="e55195f9-3606-437b-c68e-cb49ff0acfa5"
learn.model[1][8]
# + colab_type="code" id="S0x5JL2YBg3g" colab={}
learn.model[1][8] = torch.nn.modules.linear.Linear(in_features=512, out_features=10, bias=False)
# + colab_type="code" id="0vy9B5Sq6N7B" colab={"base_uri": "https://localhost:8080/", "height": 732} outputId="99b6ed04-987c-4da4-ff4b-d0880b0aba08"
learn2 = Learner(subset_dls, learn.model, metrics=accuracy)
learn2.fine_tune(20)
# + colab_type="code" id="b7LeQHC8-Nnn" colab={"base_uri": "https://localhost:8080/", "height": 45} outputId="a7976462-7fba-495b-efb7-4e5b04aa703a"
test_path = (path/'testing'/'5').ls()[10]
test_image = Image.open(test_path)
test_image
# + colab_type="code" id="liJNj8i3-Nnu" colab={"base_uri": "https://localhost:8080/", "height": 85} outputId="b8079c5b-f12a-4b52-9feb-2afd01e7f85c"
learn2.predict(test_path)
# + [markdown] id="a0U7sPcwiQwb" colab_type="text"
# # Classic Transfer Learning training from original ResNet on the new small subset dataset
# + colab_type="code" id="a6N8UKDI0hFA" colab={"base_uri": "https://localhost:8080/", "height": 732} outputId="e30c3d50-1a3a-4a49-ee7c-2950aa3fdac9"
learn3 = cnn_learner(subset_dls, resnet34, metrics=accuracy)
learn3.fine_tune(20)
# + [markdown] id="QDePZhH-4RnM" colab_type="text"
# # Prepare DataLoaders for full MNIST dataset with original labels
#
#
# + id="sjRXO-8g4gGU" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 71} outputId="857c51cf-24ea-45de-bf39-19ffe1bab332"
full_dls = ImageDataLoaders.from_folder(path, train='training', valid='testing')
full_dls.train_ds, full_dls.valid_ds
# + [markdown] id="QildUBiiuvdY" colab_type="text"
# # Benchmarking the two models with the full MNIST validation dataset
# + id="Vx65nzG9wZDL" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="4f9fe3e7-cc76-4c11-b82c-42dcd971b805"
learn2.validate(dl=full_dls.valid)
# + id="BGxFd5v3vxHi" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="562ed2c5-e099-4a7d-c4d3-9455dbefaf15"
learn3.validate(dl=full_dls.valid)
|
mnist_odd_even_transfer_learning.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: 'Python 3.7.7 64-bit (''pyvizenv'': conda)'
# name: python3
# ---
# # ARIMA Model - Stock Price Prediction
import numpy as np
import pandas as pd
from pathlib import Path
import matplotlib.pyplot as plt
# %matplotlib inline
# Read in stock historical prices and calculate daily return
stock_prices = pd.read_csv("../Resources/stock_historical_prices.csv")
stock_prices = stock_prices.set_index("Date")
stock_prices["Return"] = stock_prices["Adj Close"].pct_change() * 100
stock_prices.dropna(inplace=True)
stock_prices.head()
# Plot just the "Adj Close" column from the dataframe:
stock_prices["Adj Close"].plot(title="Close Price Trend", rot=45)
plt.ylabel("Close Price")
# +
import statsmodels.api as sm
# Apply the Hodrick-Prescott Filter by decomposing the "Adj Close" price into two separate series:
ts_noise, ts_trend = sm.tsa.filters.hpfilter(stock_prices['Adj Close'])
ts_trend.plot(title= "TS Trend", rot=45)
plt.ylabel("Close Price")
# -
ts_noise.plot(title = "TS Noise", rot=45)
# +
# Create a dataframe of just the settle price, and add columns for "noise" and "trend" series from above:
close_prices = pd.DataFrame(stock_prices["Adj Close"])
close_prices["Trend"] = ts_trend
close_prices["Noise"] = ts_noise
close_prices.head()
# +
# Plot the Settle Price vs. the Trend for 2015 to the present
close_prices[["Adj Close","Trend"]].loc["2015":].plot(title = "Close vs Trend", rot=45)
plt.ylabel("Settle, Trend ($)")
# +
# Plot the Settle Noise
close_prices[["Adj Close","Noise"]].loc["2015":].plot(title = "Close vs Noise", rot=45)
plt.ylabel("Close, Noise ($)")
# +
from statsmodels.tsa.arima_model import ARIMA
# Estimate and ARIMA Model:
# Hint: ARIMA(df, order=(p, d, q))
model = ARIMA(close_prices["Adj Close"], order=(5, 1, 2))
# Fit the model
results = model.fit()
# +
# Output model summary results:
print(results.summary())
# +
# p values of the lags are above our treshold of 0.05, thus our ARIMA model is not a good fit to make predictions for this dataset. We can tweak hyperparameters ARIMA(df, order=(p, d, q)) and see if that will improve the model perfomance.
# -
price_forecast = pd.DataFrame(results.forecast(steps=5)[0])
price_forecast
# Plot the 5 Day Price Forecast
price_forecast = pd.DataFrame(results.forecast(steps=5)[0]).plot(title="Close Price 5 Day Forecast ")
plt.ylabel("Close ($)")
plt.xlabel("Day")
|
stock_price_predictors/regression_models/ARIMA.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import xarray as xr
from matplotlib import pyplot as plt
from xhistogram.xarray import histogram
from matplotlib import cm
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
import cartopy.crs as ccrs
import numpy as np
import cartopy.feature as cfeature
from cartopy.util import add_cyclic_point
import rebin_functions as rb
import budgetcalcs as bc
# from matplotlib.axes import Axes
# from cartopy.mpl.geoaxes import GeoAxes
# GeoAxes._pcolormesh_patched = Axes.pcolormesh
def histsum_ds(ds,bin_data,bins,dim=None,verbose=False,hist_vars=None):
ds = ds.copy()
vol = ds['volcello']
if hist_vars is not None:
ds = ds[variables]
ds_rebinned = xr.Dataset()
for var in ds.data_vars:
if verbose:
print(var)
if ds[var].dtype == 'float32':
nanmask = np.isnan(ds[var])
ds_rebinned[var] = histogram(
bin_data.where(~nanmask), bins=[bins], dim=dim, weights=(ds[var]*vol).where(~nanmask)
)
return ds_rebinned
# WRAPPING of contours fixed by masking approach outlined here: https://github.com/SciTools/cartopy/issues/1421#issue-538090364
def z_masked_overlap(axe, X, Y, Z, source_projection=None):
"""
for data in projection axe.projection
find and mask the overlaps (more 1/2 the axe.projection range)
X, Y either the coordinates in axe.projection or longitudes latitudes
Z the data
operation one of 'pcorlor', 'pcolormesh', 'countour', 'countourf'
if source_projection is a geodetic CRS data is in geodetic coordinates
and should first be projected in axe.projection
X, Y are 2D same dimension as Z for contour and contourf
same dimension as Z or with an extra row and column for pcolor
and pcolormesh
return ptx, pty, Z
"""
import numpy.ma as ma
if not hasattr(axe, 'projection'):
return X, Y, Z
if not isinstance(axe.projection, ccrs.Projection):
return X, Y, Z
if len(X.shape) != 2 or len(Y.shape) != 2:
return X, Y, Z
if (source_projection is not None and
isinstance(source_projection, ccrs.Geodetic)):
transformed_pts = axe.projection.transform_points(
source_projection, X, Y)
ptx, pty = transformed_pts[..., 0], transformed_pts[..., 1]
else:
ptx, pty = X, Y
with np.errstate(invalid='ignore'):
# diagonals have one less row and one less columns
diagonal0_lengths = np.hypot(
ptx[1:, 1:] - ptx[:-1, :-1],
pty[1:, 1:] - pty[:-1, :-1]
)
diagonal1_lengths = np.hypot(
ptx[1:, :-1] - ptx[:-1, 1:],
pty[1:, :-1] - pty[:-1, 1:]
)
to_mask = (
(diagonal0_lengths > (
abs(axe.projection.x_limits[1]
- axe.projection.x_limits[0])) / 2) |
np.isnan(diagonal0_lengths) |
(diagonal1_lengths > (
abs(axe.projection.x_limits[1]
- axe.projection.x_limits[0])) / 2) |
np.isnan(diagonal1_lengths)
)
# TODO check if we need to do something about surrounding vertices
# add one extra colum and row for contour and contourf
if (to_mask.shape[0] == Z.shape[0] - 1 and
to_mask.shape[1] == Z.shape[1] - 1):
to_mask_extended = np.zeros(Z.shape, dtype=bool)
to_mask_extended[:-1, :-1] = to_mask
to_mask_extended[-1, :] = to_mask_extended[-2, :]
to_mask_extended[:, -1] = to_mask_extended[:, -2]
to_mask = to_mask_extended
if np.any(to_mask):
Z_mask = getattr(Z, 'mask', None)
to_mask = to_mask if Z_mask is None else to_mask | Z_mask
Z = ma.masked_where(to_mask, Z)
return ptx, pty, Z
path_grid = '/archive/gam/ESM4/DECK/ESM4_piControl_D/gfdl.ncrc4-intel16-prod-openmp/history/08990101.ocean_static_no_mask_table.nc'
grid = xr.open_dataset(path_grid)
rootdir = '/archive/oar.gfdl.cmip6/ESM4/DECK/ESM4_piControl_D/gfdl.ncrc4-intel16-prod-openmp/pp/'
name_tracer = 'ocean_cobalt_omip_tracers_year_z'
name_rates = 'ocean_cobalt_omip_rates_year_z'
name_mom6 = 'ocean_annual_z'
time = '.0896-0900.'
# Annual
ext = '/av/annual_5yr/'
month = 'ann'
path_tracer = rootdir+name_tracer+ext+name_tracer+time+month+'.nc'
path_rates = rootdir+name_rates+ext+name_rates+time+month+'.nc'
path_mom6 = rootdir+name_mom6+ext+name_mom6+time+month+'.nc'
ds_rates = xr.open_dataset(path_rates)
ds_tracer = xr.open_dataset(path_tracer)
ds_mom6 = xr.open_dataset(path_mom6)
# ### Global primary production and temperature contours
# +
xh = ds_tracer.xh+120
pp = (ds_rates['pp']*ds_rates['volcello']).mean(dim='time').sum(dim='z_l').roll(xh=-240,roll_coords=False).assign_coords(xh=xh).load() #
temp = (ds_mom6['thetao']).sel(z_l = slice(0,50)).mean(dim=['time','z_l']).roll(xh=-240,roll_coords=False).assign_coords(xh=xh).load() #
grid_rolled = grid.copy().roll(xh=-240,roll_coords=False).assign_coords(xh=xh) #
grid_rolled['geolon'] = grid_rolled['geolon'].where(grid_rolled['geolon']>=-180,
grid_rolled['geolon'].where(grid_rolled['geolon']<-180)+360)
crs_plot = ccrs.Robinson(central_longitude=-160)
crs_transform = ccrs.PlateCarree()
# Transform lat and lon onro
pos = crs_plot.transform_points(src_crs=crs_transform,
x=grid_rolled['geolon'].values, y=grid_rolled['geolat'].values)
X = xr.zeros_like(pp)+pos[:,:,0]
Y = xr.zeros_like(pp)+pos[:,:,1]
pp = add_cyclic_point(pp.values)
temp = add_cyclic_point(temp.values)
X = add_cyclic_point(X.values)
Y = add_cyclic_point(Y.values)
# Define manual positions of labels
contours = [0,5,10,15,20,25]
ll = -160
manual_labels = [(ll,-70),(ll,-60),(ll,-50),(ll,-40),(ll,-30),(ll,-20),
(ll,20),(ll,30),(ll,40),(ll,50),(ll,60),(ll,70)]
posc = manual_labels.copy()
for c in range(len(posc)):
posc[c] = crs_plot.transform_point(src_crs=crs_transform,
x=manual_labels[c][0], y=manual_labels[c][1])
# +
fig,ax = plt.subplots(figsize=(12,6),subplot_kw={'projection': crs_plot})
ax.add_feature(cfeature.LAND,facecolor='lightgray',zorder=10)
ax.coastlines(linewidth=2)
ax.set_title('Depth integrated primary production and temperature contours',pad=10)
# Correct issues of plotting contours from curvilinear grid
X,Y,temp = z_masked_overlap(ax, X, Y, temp, source_projection=crs_transform)
im_pp=ax.pcolormesh(X,Y,pp,vmin=0,vmax=2000,cmap='Greens')
# im_phyc=ax.pcolormesh(grid_rolled['geolon'],grid_rolled['geolat'],phyc,transform=ccrs.PlateCarree())
im_temp=ax.contour(X,Y,temp,contours,colors='cornflowerblue',linestyle=':')
# im_temp=ax.contour(grid_rolled['geolon'],grid_rolled['geolat'],temp,transform=crs_transform)
ax.clabel(im_temp, inline=True, fontsize=10, manual = posc, fmt='%1.0f')
cbar = plt.colorbar(im_pp,ax=ax,fraction=0.06,pad=0.04,orientation='horizontal')
cbar.set_label('[$mol s^{-1}$]')
fig.savefig('figures/esm4/pp.png',transparent=True,orientation='landscape',dpi=600)
# -
# ### Phytoplankton, primary production, respiration as a function of temperature
# Calculate bgc variables as a function of temperature
bins_theta = np.arange(-2,32,1)
variables = ['phyc']
ds_tracer_rebinned = histsum_ds(ds_tracer,ds_mom6['thetao'],bins=bins_theta,dim=['xh','yh','z_l'],verbose=True,hist_vars=variables)
ds_tracer_rebinned.load()
variables = ['pp','remoc']
ds_rates_rebinned = histsum_ds(ds_rates,ds_mom6['thetao'].mean('time'),bins=bins_theta,verbose=True,hist_vars=variables)
ds_rates_rebinned.load()
# +
start = 0
stop = 1
N = 12
colors = [ cm.BrBG(x) for x in np.linspace(start, stop, N) ]
fig, (ax0,ax1) = plt.subplots(figsize = (12,8),nrows=2,gridspec_kw={'height_ratios': [1, 1]})
for t in range(12):
ax0.plot(ds_tracer_rebinned['thetao_bin'],1E-12*ds_tracer_rebinned['phyc'].isel(time=t),color=colors[t])
ax0.set_title('Layerwise integral of phytoplankton carbon')
ax0.set_ylabel('[$10^{12}$ moles]')
ax0.set_ylim([0,4])
ax0.grid(linestyle=':')
cbaxes = inset_axes(ax0, width="30%", height="3%", loc='upper center')
# fig.add_axes([0.4, 0.8, 0.2, 0.01])
# plt.colorbar(cax=cbaxes, ticks=[0.,1], orientation='horizontal')
sm = plt.cm.ScalarMappable(cmap=cm.BrBG, norm=plt.Normalize(vmin=0, vmax=1))
cbar = plt.colorbar(sm,cax=cbaxes,ticks=[0,1],orientation='horizontal')
cbar.ax.set_xticklabels(['January','December'],fontsize=12)
ax1.set_title('Layerwise integral of primary production and remineralisation')
ax1.plot(ds_rates_rebinned['thetao_bin'],1E-7*ds_rates_rebinned['pp'],label='Primary production')
ax1.plot(ds_rates_rebinned['thetao_bin'],1E-7*ds_rates_rebinned['remoc'],label='Remineralisation')
ax1.grid(linestyle=':')
ax1.set_ylabel('[$10^{7}$ mol s$^{-1}$]')
ax1.set_ylim([0,1.2])
ax1.set_xlabel('Temperature [$^\circ$ C]')
ax1.legend(fontsize=12,loc='upper center')
fig.savefig('figures/osm_bgc-on-temp.png',transparent=True,orientation='landscape',dpi=600)
# -
# ### Oxygen and nitrogen, zonal means
# +
fig,ax = plt.subplots(figsize=(16,6))
colo = ds_tracer['o2'].isel(xh=slice(100,400)).mean(dim='xh').squeeze().load()
cont = ds_mom6['thetao'].isel(xh=slice(100,400)).mean(dim='xh').squeeze().load()
im_colo = ax.contourf(colo['yh'],colo['z_l'],colo,cmap='Blues')
cbar = plt.colorbar(im_colo)
contours = [0,2.5,5,10,15]
im_cont = ax.contour(cont['yh'],cont['z_l'],cont,contours,colors='darkorange')
ax.clabel(im_cont, inline=True, fontsize=14, fmt='%1.0f')
ax.invert_yaxis()
ax.set_ylabel('depth [m]')
ax.set_xlabel('latitude [$^\circ N$]')
ax.set_title('oxygen and temperature contours (Pacific zonal mean)',pad=20)
cbar.set_label('[$mol\,m^{-3}$]')
fig.savefig('figures/esm4/o2-temp_pacific',transparent=True,orientation='landscape',dpi=600)
# +
fig,ax = plt.subplots(figsize=(16,6))
colo = ds_tracer['no3'].isel(xh=slice(100,400)).mean(dim='xh').squeeze().load()
cont = ds_mom6['thetao'].isel(xh=slice(100,400)).mean(dim='xh').squeeze().load()
im_colo = ax.contourf(colo['yh'],colo['z_l'],colo,cmap='Purples')
cbar = plt.colorbar(im_colo)
contours = [0,2.5,5,10,15]
im_cont = ax.contour(cont['yh'],cont['z_l'],cont,contours,colors='darkorange')
ax.clabel(im_cont, inline=True, fontsize=14, fmt='%1.0f')
ax.invert_yaxis()
ax.set_ylabel('depth [m]')
ax.set_xlabel('latitude [$^\circ N$]')
ax.set_title('nitrate and temperature contours (Pacific zonal mean)',pad=20)
cbar.set_label('[$mol\,m^{-3}$]')
fig.savefig('figures/esm4/no3-temp_pacific',transparent=True,orientation='landscape',dpi=600)
# -
|
notebooks/draw_figs_esm.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os, re, magic, json, sys
import time, urllib
from googlesearch import search
from datetime import datetime, timedelta
from pymongo import MongoClient
from search_engine_scraper import google_search,bing_search,yahoo_search
import requests
from bs4 import BeautifulSoup
import logging, logging.handlers
import random
import subprocess
import signal, unicodedata
import html2text
from contextlib import contextmanager
from bs4 import BeautifulSoup, UnicodeDammit
import pprint
import PyPDF2
from time import mktime, strptime
from datetime import datetime
import json
import spacy
from ipynb.fs.full.similarity_measure import compute_best_doc
from nltk.corpus import wordnet
#######import this into a requierements.txt file
import session_info
session_info.show()
# #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#def constant():
CHEMIN_LOG = "./log/"
CHEMIN_RESULTATS = os.getcwd()+"/documents/"# "./documents/"
NOT_SITES = "-site:youtube.com -site:pagesjaunes.fr"
# +
#os.getcwd()+"/documents/"
# -
class Log(object):
def __init__(self, dossier, nomFichier, niveau=logging.DEBUG):
super(Log, self).__init__()
self.__logger__ = logging.getLogger(nomFichier)
self.__logger__.setLevel(niveau)
format = logging.Formatter('%(asctime)s :: %(levelname)s :: %(message)s', datefmt='%d-%m-%Y %H:%M:%S')
fichierLog = logging.handlers.RotatingFileHandler("{0}{1}.log".format(dossier, nomFichier), 'a', 1000000, 1)
fichierLog.setLevel(niveau)
fichierLog.setFormatter(format)
self.__logger__.addHandler(fichierLog)
console = logging.StreamHandler()
console.setLevel(niveau)
self.__logger__.addHandler(console)
def info(self, message):
self.__logger__.info(message)
def debug(self, message):
self.__logger__.debug(message)
def warning(self, message):
self.__logger__.warning(message)
def error(self, message):
self.__logger__.error(message)
def critical(self, message):
self.__logger__.critical(message)
def exception(self, message):
self.__logger__.exception(message)
def close(self):
for handler in self.__logger__.handlers[:] :
handler.close()
self.__logger__.removeHandler(handler)
def pause(logger, minutes=0.5):
"""
Effectue une pause.
@type log: Logger.
@param log: Fichier de log.
@type minutes: Entier.
@param minutes: Temps en minute à attendre.
"""
date = datetime.now().strftime('%d-%m-%Y:%Hh%M')
current_time = datetime.now() + timedelta(minutes=minutes)
logger.info("{0} : Nombre limite de requete atteint. Reprise du programme : {1}".format(date, current_time.strftime(
'%d-%m-%Y:%Hh%M')))
while datetime.now() < current_time:
time.sleep(0.5)
def generation_requetes_(ville, motscles, logger, site):
logger.info("Génération des requêtes")
requetes_effectuees = []
with open("{0}{1}/.sauvegarde.txt".format(CHEMIN_RESULTATS, ville)) as fichier:
requetes_effectuees = fichier.readlines()
for i in range(0, len(motscles), 1):
# motscles_couple = motscles[i].split("+")
# Si aucun site n'est spécifié alors on n'exclut
# seulement ceux définis dans les constants
if site == "":
site_or_not_sites = NOT_SITES
# Sinon on cherche uniquement sur le site spécifié
else:
site_or_not_sites = "site:" + site
requete = "\"{0}\" AND {1}".format(ville,motscles[i])
if not any(requete in s for s in requetes_effectuees):
yield requete
def insertion_document(document, collection):
""" Insère un document (dictionnaire) dans une collection MongoDB """
resultat = collection.insert_one(document)
return resultat.inserted_id
def format_to_iso_date(pdfdate):
datestring = ''
if pdfdate !='':
if len(pdfdate)==23:
datestring = pdfdate[2:-7]
elif len(pdfdate)==17:
datestring = pdfdate[2:-1]
elif len(pdfdate)==21:
datestring = pdfdate[:-7]
ts = strptime(datestring,"%Y%m%d%H%M%S")# "%Y-%m-%dT%H:%M:%S.%fZ" )#
#print(ts)
dt = datetime.fromtimestamp(mktime(ts))
#print(dt)
#new_format = dt.strftime("%Y-%m-%dT%H:%M:%S.%fZ")# ('%m-%d-%Y')
new_format = dt.isoformat()
else:
new_format = "no_date"
return new_format
'''def get_pdf_info(f):
#pp = pprint.PrettyPrinter(indent=4)
fd = PyPDF2.PdfFileReader(f, 'rb')
doc_info = fd.getDocumentInfo()
#pp.pprint(doc_info)
return format_to_iso_date(doc_info['/ModDate'])'''
def get_pdf_info(f):
#pp = pprint.PrettyPrinter(indent=4)
fd = PyPDF2.PdfFileReader(f, 'rb')
doc_info = fd.getDocumentInfo()
if '/ModDate' in doc_info:
val_ = doc_info['/ModDate']
elif '/CreationDate' in doc_info:
val_ = doc_info['/CreationDate']
else:
pass
return format_to_iso_date(val_)
# extract spatial named entities
def SNE_Extract(title_mtd):
SNE = {}
nlp = spacy.load('fr_core_news_sm')# Text with nlp
dc = nlp(title_mtd)
i = 0
for ent in dc.ents:
if ent.label_ in ['LOC']:
SNE['ent'+repr(i)] = repr(ent)
i+=1
return SNE
# cette fonction ne prend que la date de publis
#à adapter pour prendre en compte l'extraction auto d'entitées nommées temporelles dans les titres
def TNE_extract(mtd): #ajouter title_mtd pour la prise en compte des ENT presentent dans le titles
mtd['TNE'] = {}
if 'post_date' in mtd and '$date' in mtd['post_date']:
mtd['TNE']['date'] = mtd['post_date']['$date']
return mtd
# ajout de meta données suplementaires pour l'intdexation spatiale et temporelle
def enrich_mtd(mtd):
if "title" in mtd:
title_mtd = mtd['title']
SNE = SNE_Extract(title_mtd)
mtd['SNE'] = SNE #liste d'ENS
mtd['TNE'] = {}
#if 'post_date' in mtd and '$date' in mtd['post_date']:
# mtd['TNE']['date'] = mtd['post_date']['$date']
if "post_date" in mtd:
mtd['TNE']['date'] = []
mtd['TNE']['date'] = mtd['post_date']
#mtd = TNE_extract(mtd)
return mtd
# +
# quelques étapes de preprocess
# #!/usr/bin/env python3
# -*- coding: utf-8 -*-
class TimeoutException(Exception): pass
@contextmanager
def limitation_temporelle(seconds):
def signal_handler(signum, frame):
raise TimeoutException
signal.signal(signal.SIGALRM, signal_handler)
signal.alarm(seconds)
try:
yield
finally:
signal.alarm(0)
def norm_string(string):
""" On supprime les accents et on remplace les caractères spéciaux par un tiret bas """
new_string = unicodedata.normalize('NFKD', string).encode('ascii', 'ignore').decode('utf-8')
return re.sub(r'[^\w\[\]\./]+', '_', new_string)
def formatage_mots(mot):
"""
Supprime les retour charriot et remplace les espace par un ?.
@type mot: String.
@param mot: Mot à formaté.
@rtype: String.
@return: Mot formaté.
"""
liste_caracteres = dict()
liste_caracteres["\n"] = ""
liste_caracteres[" "] = "?"
for i in liste_caracteres :
mot = mot.replace(i, liste_caracteres[i])
return mot
def get_paire(ligne):
"""
Extrait du texte la clé et la valeur.
@type ligne: String.
@param ligne: Texte contenant les valeurs.
@rtype: Tuple.
@return: Valeurs extraites.
"""
cle, sep, valeur = ligne.strip().partition("=")
return cle.replace(" ",""), valeur.replace(" ","")
def creation_dossier_resultat(chemin_resultats, ville, log, keyword_file) :
"""
Création des dossiers de stockage et renvoi des mots clés.
@type ville: String.
@param ville: Nom de la ville.
@rtype: Liste de string.
@return: Liste des mots clés à rechercher.
"""
head, tail = os.path.split(keyword_file)
# head,tail.split('.')[0]
thematic = tail.split('.')[0]# keyword_file
if not os.path.exists(chemin_resultats + ville):
os.makedirs(chemin_resultats + ville)
log.info("Création des dossier pour {0}".format(ville))
if not os.path.exists("{0}{1}/Documents_SRC".format(chemin_resultats, ville)):
os.makedirs("{0}{1}/Documents_SRC".format(chemin_resultats, ville))
log.info("Création du dossier Documents_SRC\n")
with open(keyword_file) as keywords_file:
words = keywords_file.readlines()
log.info("Lecture de keywords.txt ... \n")
open("{0}{1}/.sauvegarde.txt".format(chemin_resultats, ville),"a").close()
for i in range(0, len(words)):
words[i] = formatage_mots(words[i])
sampling = random.choices(words, k=3)
#print('>>',words)
#print('>>>',sampling)
return words, thematic
def extract_date(text):
# Motif : JJ/MM/AAAA
match_date = re.search(r'(\d{2})/(\d{2})/(\d{4})', text)
# Motif : JJ mois AAAA
match_date2 = re.search(r'(\d{1,2}) (\w+) (\d{4})', text)
if match_date:
date = datetime(
int(match_date[3]), # Année
int(match_date[2]), # Mois
int(match_date[1]) # Jour
)
return date
elif match_date2:
mapping_month = {
'janvier': 1,
'février': 2,
'mars': 3,
'avril': 4,
'mai': 5,
'juin': 6,
'juillet': 7,
'août': 8,
'septembre': 9,
'octobre': 10,
'novembre': 11,
'décembre': 12,
}
month = match_date2[2].lower()
if month in mapping_month:
date = datetime(
int(match_date2[3]), # Année
mapping_month[month], # Mois
int(match_date2[1]) # Jour
)
return date
def html_date_to_isoformat(date):
#x = "2020-09-18T00:00:00.000Z"
ts = strptime(date,"%Y-%m-%dT%H:%M:%S.%fZ")
dt = datetime.fromtimestamp(mktime(ts))
return dt.isoformat()
def less_html(html_doc):
""" Prend du code HTML en entrée et retourne un code épuré de certaines balises """
soup = BeautifulSoup(html_doc, 'html.parser')
# Regex pour matcher les attributs contenant ces termes
bad_classes = re.compile(r'menu|head|publici|share|social|button|alert|prev|next|foot|tags|label|sidebar|author|topics|contact|modal|nav|snippet|register|aside|logo|bandeau|immobilier', re.IGNORECASE)
# Suppression des espaces ou des sauts de ligne au début et à la fin du titre
title = re.sub(r'^\s|\s$', '', soup.find('title').text)
# Dictionnaire des métadonnées
metadata = {}
metadata['title'] = title
# Recherche d'une éventuelle balise dont la classe contient "date"
# En principe la première date est la date de publication
bloc_date = soup.find(class_=re.compile(r'date', re.IGNORECASE))
if bloc_date:
# Recherche du premier motif JJ/MM/AAAA,
# en principe la date de publication
metadata['post_date'] = ''
date = extract_date(bloc_date.text)
if date: metadata['post_date'] = html_date_to_isoformat(date)
for balise in soup.find_all():
conditions = (
balise.name == 'head',
balise.name == 'nav',
balise.name == 'footer',
balise.name == 'aside',
balise.name == 'script',
balise.name == 'style',
balise.name == 'a',
balise.name == 'figure',
balise.name == 'img',
balise.name == 'svg',
balise.name == 'noscript',
balise.name == 'form',
balise.name == 'button'
)
if any(conditions):
balise.extract()
# On ajoute un espace devant chaque span, pour éviter
# parfois d'avoir des mots collés
elif balise.name == 'span' and balise.string:
balise.string = ' ' + balise.string
for balise in soup.find_all(attrs={'class': bad_classes}):
balise.decompose()
for balise in soup.find_all(attrs={'id': bad_classes}):
balise.decompose()
for balise in soup.find_all():
if balise.text == '': balise.extract()
return metadata, str(soup)
def convert_pdf_to_txt(src_file_path):
"""
Appel externe à pdftotext.
-q : pas de message d'erreur dans la sortie.
- : envoie la sortie dans la console au lieu d'un fichier texte.
Capture de la sortie texte.
@type src_file_path: String.
@param src_file_path: Chemin du fichier source.
@rtype: String.
@return: Texte brut.
"""
completed_process = subprocess.run(["pdftotext", "-q", src_file_path, "-"], stdout=subprocess.PIPE)
return completed_process.stdout.decode('utf-8')
def convert_html_to_txt(src_file_path):
"""
Conversion à l'aide de html2text.
Détection automatique de l'encodage (UnicodeDammit).
On capture la sortie texte.
@type src_file_path: String.
@param src_file_path: Chemin du fichier source.
@rtype: String.
@return: Texte brut.
"""
html_file = open(src_file_path, 'rb').read()
dammit = UnicodeDammit(html_file) # src_file_path
# metadata, html_mini = less_html(html_file.decode(dammit.original_encoding))
metadata, html_mini = less_html(html_file.decode(dammit.original_encoding))
handler = html2text.HTML2Text()
handler.ignore_links = True
handler.ignore_emphasis = True
text = handler.handle(html_mini)
return metadata, text
# -
class TextCleaner():
"""docstring for TextCleaner."""
def __init__(self):
""" Expressions régulières """
self.match_alpha = re.compile(r'\w{3,}') # Existence d'un mot d'au moins 3 lettres
self.match_formfeed = re.compile(r'\f') # Saut de page
self.match_useless_line = re.compile(r'^[^\w•]+$|^[\d\. ]+$', re.MULTILINE) # Ligne ne contenant aucun caractère alphanumérique
self.match_bad_nl = re.compile(r'([^\.?!\n:])\n+(?![IVX\d]+[\.\)]|ANNEXE)([\w\(«\"=<>])') # Mauvais saut de ligne
self.match_bad_nl2 = re.compile(r'(\.{4,} ?\d+) (\w)') # Mauvais saut de ligne
self.make_paragraph = re.compile(r'\.\n(?=\w)') # On sépare mieux les paragraphes
self.match_toomuch_nl = re.compile(r'\n{3,}') # Nouvelles lignes surnuméraires
self.match_begin_end_nl = re.compile(r'^\n+|\n+$') # Nouvelles lignes au début et à la fin
self.match_begin_end_space = re.compile(r'^[ \t]+|[ \t]+$', re.MULTILINE) # Espace ou tabulation en début et fin de ligne
self.match_toomuch_spaces = re.compile(r' {2,}|\t+') # Espaces surnuméraires et tabulations
self.match_link = re.compile(r'!?\[.*?\]\(.*?\)|https?://[^ ]+', re.DOTALL) # Lien issu de la conversion depuis HTML
self.match_cesure = re.compile(r'(\w)-(\w)') # Césure
self.match_stuckwords = re.compile(r'(/\d{2,4}|[a-z])([A-ZÉÈÀÔ])') # Dates et mots collés
self.match_odd = re.compile(r'[�●§\\\|]+|\(cid:\d+\)|(?<=- )W ') # caractère bizarre
self.match_accent1 = re.compile(r'é') # é
self.match_accent2 = re.compile(r'è') # è
self.match_accent3 = re.compile(r'Ã') # à
self.match_puce1 = re.compile(r'')
self.match_puce2 = re.compile(r'[]')
self.match_diam = re.compile(r'diam\.')
self.match_apostrophe = re.compile(r'’')
def clean(self, text):
# Remplacement des espaces insécables
text = text.replace(u'\xa0', ' ')
text = self.match_link.sub('', text)
text = self.match_formfeed.sub('\n\n', text)
text = self.match_useless_line.sub('\n', text)
text = self.match_diam.sub('diamètre', text)
text = self.match_accent1.sub('é', text)
text = self.match_accent2.sub('è', text)
text = self.match_accent3.sub('à', text)
text = self.match_puce1.sub('*', text)
text = self.match_puce2.sub('-', text)
text = self.match_odd.sub('', text)
text = self.match_apostrophe.sub('\'', text)
text = self.match_begin_end_space.sub('', text)
text = self.match_bad_nl.sub(r'\g<1> \g<2>', text)
# On double la réparation des lignes, meilleurs résultats
text = self.match_bad_nl.sub(r'\g<1> \g<2>', text)
text = self.match_bad_nl2.sub(r'\g<1>\n\g<2>', text)
text = self.make_paragraph.sub('.\n\n', text)
text = self.match_stuckwords.sub(r'\g<1> \g<2>', text)
text = self.match_toomuch_spaces.sub(' ', text)
text = self.match_toomuch_nl.sub('\n\n', text)
text = self.match_begin_end_nl.sub('', text)
return text
def exists_alpha(self, text):
""" Contrôle l'existence de caractères alphanumériques """
return self.match_alpha.search(text) is not None
# +
def sauvegarde_fichier_advanced(ville, url, logger, tc):
"""
Enregistre un fichier quelqu'il soit.
@type ville: String.
@param ville: Nom de la ville.
@type url: String.
@param url: URL vers le fichier pdf.
@type log: Logger.
@param log: Fichier de log.
@type tc: TextCleaner.
@param tc: Nettoyeur de texte.
"""
# On prend l'URL de base pour désigner l'origine du fichier à laquelle on enlève le "www."
origine = re.sub(r'^www\.', '', urllib.request.urlparse(url).netloc)
url_split = url.split('/')
# Extraction du nom du document
if url_split[-1] == '':
nom_document = norm_string(url_split[-2])
else:
nom_document = norm_string(url_split[-1])
src_file_path = "{0}{1}/Documents_SRC/[{2}]{3}".format(CHEMIN_RESULTATS, ville, origine, nom_document)
# print(">>>>>>>", url)
# print("#####", src_file_path)
if not os.path.isfile(src_file_path):
req = urllib.request.Request(url, headers={'User-Agent': "Magic Browser"})
response = urllib.request.urlopen(req)
read_buffer = response.read()
mime_type = magic.from_buffer(read_buffer, mime=True)
source = {
'file_link': src_file_path,
'complete_url': url,
'base_url': origine,
'open_access': False,
}
document = {
'name': nom_document,
'mime_type': mime_type,
'source': source,
'manually_validated': False,
}
# Écriture du fichier téléchargé
with open(src_file_path, "wb") as fichier:
fichier.write(read_buffer)
logger.info("Document sauvegardé.")
# On détecte le type du document source pour utiliser la conversion adaptée
# Si le document est un PDF
if mime_type == 'application/pdf':
try:
# Conversion en texte brut
raw_text = convert_pdf_to_txt(src_file_path)
logger.info("Document PDF converti en texte brut.")
p_d = get_pdf_info(src_file_path)
document['text'] = tc.clean(raw_text)
document['post_date'] = p_d
logger.info("Texte nettoyé.\n")
except:
logger.exception("Erreur lors de la conversion en texte du pdf.\n")
# Si le document est un HTML (ou autre fichier web)
elif mime_type == 'text/html':
try:
# Conversion en texte brut
metadata, raw_text = convert_html_to_txt(src_file_path)
logger.info("Document web converti en texte brut.")
document['text'] = tc.clean(raw_text)
document.update(metadata)
logger.info("Texte nettoyé.\n")
except:
logger.exception("Erreur lors de la conversion en texte de la page web.\n")
else:
logger.info("Autre type de document téléchargé.\n")
document = enrich_mtd(document)
return document
else:
logger.info("Le document existe déjà.\n")
return None
# -
#
def run_requete(search):
page = requests.get(f"https://www.google.com/search?q={search}")
soup = BeautifulSoup(page.content, "html5lib")
links = soup.findAll("a")
url_list = []
for link in links :
#print(link)
link_href = link.get('href')
if "url?q=" in link_href:# and not "webcache" in link_href:
# print (link.get('href').split("?q=")[1].split("&sa=U")[0])
url_list.append(link.get('href').split("?q=")[1].split("&sa=U")[0])
return url_list[:-1]
### save database
def save_to_jsonl(mtd,spatial_extent):
# with open('3m_db.jsonl', 'a+', encoding='utf8') as outfile:
with open("{0}{1}/3m_db.jsonl".format(CHEMIN_RESULTATS, spatial_extent), 'a+', encoding='utf8') as outfile:
#for entry in JSON_file:
json.dump(mtd, outfile, ensure_ascii=False)
outfile.write('\n')
# +
#
# -
def recherche_web_advanced(spatial_extent,voc_concept, motscles,thematic, logger, site):
"""
Effecue la recherche.
@type ville: String.
@param ville: Nom de la ville.
@type motscles: Liste de string.
@param motscles: Liste des mots clés à rechercher.
@type logger: Logger.
@param logger: Fichier de log.
"""
# Initialisation du nettoyeur de texte
tc = TextCleaner()
# Liste des stopwords
#stopwords = creer_liste_stopwords('listes_stopwords/stopwords_base.txt', 'listes_stopwords/stopwords_1000.txt')
# Génération de la liste des requêtes
liste_requetes = generation_requetes_(formatage_mots(spatial_extent), motscles, logger, site)
#print('>>#', liste_requetes)
for requete in liste_requetes:
#print('###', requete)
response = search(requete, lang='fr', stop=10) # stop=10
#response = run_requete(requete) # top@10 first pages
# response = bing_search.search(requete) # stop=10
try:
#liste_url = list(response)
liste_url = set(response)
except:
liste_url = []
with (open("{0}{1}/resume.txt".format(CHEMIN_RESULTATS, spatial_extent), "a")) as res:
logger.info("Nouvelle requête : {0}\n".format(requete))
res.write("Requête : {0}\n".format(requete))
res.write("Nombre de résultats affichés : {0}\n".format(len(liste_url)))
res.write("\nListe des résultats\n")
res.write("\n")
for url in liste_url:
if not url.endswith('xml.gz'):
logger.info("URL : {0}\n".format(url))
res.write("{0}\n".format(url))
try:
# On temporise chaque requête Google pour ne pas être bloqué
with limitation_temporelle(30):
document = sauvegarde_fichier_advanced(spatial_extent, url, logger, tc)
# On s'assure que le document existe et qu'un texte y est associé
if document and 'text' in document:
compute_best_doc(voc_concept, document)
document['source']['type'] = 'web_request'
document['source']['raw_request'] = requete
#document['collection_date'] = date_collecte
document['spatial_extent'] = spatial_extent
document['thematic'] = thematic
# inserer le document dans le fichier jonl
save_to_jsonl(document,spatial_extent)
logger.info("Document inséré dans l'inventaire.\n")
except Exception as e:
print(e)
logger.info("Erreur pour l'URL : {0}\n".format(url))
res.write("Erreur pour l'URL : {0}\n".format(url))
logger.info("***********************************************************************\n")
res.write("***********************************************************************\n\n")
with open("{0}{1}/.sauvegarde.txt".format(CHEMIN_RESULTATS, spatial_extent), "a") as f:
f.write("{0}\n".format(requete))
pause(logger)
# +
# some extra to process google data before formating to BioTex inout file
def cleanhtml(raw_text, remove_punc=False, lower=False):
"""
Replace HTML tags in a text.
raw_html : str
html in its raw form
"""
clean_text = raw_text
# Remove hmtl and url patterns
patterns = [re.compile('<.*?>'), re.compile('\[\d\]'), re.compile('www.\S+.com')]
for pattern in patterns:
clean_text = re.sub(pattern, '', clean_text)
# Special characters causing pb with Biotex
# ['\n', '\t', 'ã', '€', "\'", "\xa0"]
toRemove = ['\n', '\t','\"', 'ã', '€', "\xa0"]
for char in toRemove:
clean_text = re.sub(char, '', clean_text)
# add whitespace after a dot
rx = r"\.(?=\S)"
clean_text = re.sub(rx, ". ", clean_text)
if remove_punc:
clean_text = re.sub('[^A-Za-z0-9]+', ' ', clean_text)
if lower:
clean_text = clean_text.lower()
return clean_text.strip()
def biotex_corpus_builder(g_corpus,keywords):
#os.chdir(files_dir)
if not os.path.exists(os.getcwd()+'/'+str(keywords)):
os.makedirs(os.getcwd()+'/'+str(keywords))
root = os.getcwd() + '/' + str(keywords)
for doc in g_corpus:
fw = open(root + '/' + str(keywords) + '_google_corpus.txt', 'a+')
fw.write("%s\n" % cleanhtml(g_corpus['text']))
fw.write("\n##########END##########\n")
fw.close()
# -
def advanced_scraper(spatial_extent,voc_concept, site=''):
logger = Log(CHEMIN_LOG, 'collecteDeDonnees_{0}'.format(datetime.today().strftime("%d-%m-%y")))
# ville = formatage_mots(input("Sur quelle ville voulez-vous effectuer la recherche ? \n"))
spatial_extent = spatial_extent.title()
motscles, thematic = creation_dossier_resultat(CHEMIN_RESULTATS, spatial_extent, logger,voc_concept)
logger.info("Début de la recherche de document concernant la ville de {0}".format(spatial_extent))
recherche_web_advanced(spatial_extent,voc_concept, motscles,thematic, logger, site)
#recherche_web_(motscles)
|
scrap_on_google_v2.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
import matplotlib.pyplot as plt
# %matplotlib inline
# +
# features
x = np.array([[8,5,2],[13,3,10],[15,4,6],[18,3,15],[20,5,4]])
print(x)
# target
y = np.array([40,60,80,52,78])
print(y)
# -
# Create model
reg = LinearRegression()
# Fit training data
reg = reg.fit(x,y)
print(reg)
# coefficient
c = reg.coef_
print(c)
# intercept
b = reg.intercept_
print(b)
# predict y
ypred = reg.predict(x)
print(ypred)
# r2 score
r2 = reg.score(x,y)
print(r2)
# actual vs predicted
plt.plot([y.min(),y.max()],[y.min(),y.max()],'r--')
plt.scatter(y,ypred)
plt.xlabel('y')
plt.ylabel('predicted y')
|
linear_regression.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Spectral Ananlysis
# ## 1. Import Libraries
# +
import warnings
warnings.filterwarnings("ignore")
import pandas as pd
import sqlite3
import csv
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
#from wordcloud import WordCloud
import re
import os
from sqlalchemy import create_engine # database connection
import datetime as dt
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.stem.snowball import SnowballStemmer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.multiclass import OneVsRestClassifier
from sklearn.linear_model import SGDClassifier
from sklearn import metrics
from sklearn.metrics import f1_score,precision_score,recall_score
from sklearn import svm
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from datetime import datetime
import pandas as pd
import numpy as np
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
import pylab as pl
# -
from sklearn.cluster import SpectralClustering
from spectral import SpectralClusteringg
# ## 2. Create Dataset
# +
df = pd.read_csv('dataset/28_1800.csv')
table=df.pivot_table(index=["Name"])
table=table.reset_index()
# features order:
# 0.Name, 1.AirTemp., 2.Alt, 3.DewPoint, 4.Lat, 5.Longt, 6.Pres., 7.R.Humidity, 8.Visib., 9.WindDir., 10.WindGust, 11.WindSpeed
# change the order of features
table2=table.iloc[:,[1,2,3,6,7,8,9,10,11,0,4,5]]
#print(table2.tail())
# specific features - new table is created
# enter the fourth column to address feature e.g: 1. AirTemp, 7. Relative Humidity
# e.g: table3=table.iloc[:,[0,4,5,1]]
table3=table.iloc[:,[0,4,5,7]]
table3=table3.dropna()
# for debug print tail of table3
#print(table3.tail())
#table3
title="RelativeHumidity"
# k=clustering number
k=4
cluster=KMeans(n_clusters=k)
table3["Cluster"]=cluster.fit_predict(table3[table3.columns[3:]])
#table3
# centroids = clusters' center points
centroids = cluster.cluster_centers_
weather_clusters=table3[["Name","Lat","Longt","Cluster",title]]
#print(weather_clusters)
weather_clusters.to_csv('ClusteredData.csv', index=False)
# informative
#print("centroids")
#print(table3.columns)
#print(centroids)
# -
# +
# plotting clusters
plt.figure(num=None, figsize=(8, 6), dpi=80)
if k==2:
x_0=weather_clusters[weather_clusters.Cluster==0]["Longt"]
y_0=weather_clusters[weather_clusters.Cluster == 0]["Lat"]
c1=pl.scatter(x_0,y_0,c='r',marker='o',alpha=0.4)
x_1=weather_clusters[weather_clusters.Cluster==1]["Longt"]
y_1=weather_clusters[weather_clusters.Cluster == 1]["Lat"]
c2=pl.scatter(x_1,y_1,c='g',marker='o',alpha=0.4)
# Numbers of Elements in Clusters
print("Cluster0 Size:",len(x_0), ", Cluster1 Size:",len(x_1))
# Print Cluster Max, Min Points to determine Cluster Seperation Point
max_c0 = max(weather_clusters[weather_clusters.Cluster == 0][title])
min_c0 = min(weather_clusters[weather_clusters.Cluster == 0][title])
print("max_c0:", max_c0, " min_c0:", min_c0, "Color:R")
max_c1 = max(weather_clusters[weather_clusters.Cluster == 1][title])
min_c1 = min(weather_clusters[weather_clusters.Cluster == 1][title])
print("max_c1:", max_c1, " min_c1:", min_c1, "Color:G")
elif k==3:
x_0 = weather_clusters[weather_clusters.Cluster == 0]["Longt"]
y_0 = weather_clusters[weather_clusters.Cluster == 0]["Lat"]
c1 = pl.scatter(x_0, y_0, c='r', marker='o', alpha=0.4)
x_1 = weather_clusters[weather_clusters.Cluster == 1]["Longt"]
y_1 = weather_clusters[weather_clusters.Cluster == 1]["Lat"]
c2 = pl.scatter(x_1, y_1, c='g', marker='o', alpha=0.4)
# for sensor fault visibility in figure
# c2 = pl.scatter(x_1, y_1, c='b', marker='x', alpha=1, s=300, linewidths=4, zorder=10)
x_2=weather_clusters[weather_clusters.Cluster==2]["Longt"]
y_2=weather_clusters[weather_clusters.Cluster == 2]["Lat"]
#c3 = pl.scatter(x_2, y_2, c='b', marker='x', alpha=1, s=300, linewidths=4, zorder=10)
c3=pl.scatter(x_2,y_2,c='b',marker='o', alpha=0.4)
# Numbers of Elements in Clusters
print("Cluster0 Size:", len(x_0), ", Cluster1 Size:", len(x_1), ", Cluster2 Size:", len(x_2))
# Print Cluster Max, Min Points to determine Cluster Seperation Point
max_c0 = max(weather_clusters[weather_clusters.Cluster == 0][title])
min_c0 = min(weather_clusters[weather_clusters.Cluster == 0][title])
print("max_c0:", max_c0, " min_c0:", min_c0, "Color:R")
max_c1 = max(weather_clusters[weather_clusters.Cluster == 1][title])
min_c1 = min(weather_clusters[weather_clusters.Cluster == 1][title])
print("max_c1:", max_c1, " min_c1:", min_c1, "Color:G")
max_c2 = max(weather_clusters[weather_clusters.Cluster == 2][title])
min_c2 = min(weather_clusters[weather_clusters.Cluster == 2][title])
print("max_c2:", max_c2, " min_c2:", min_c2, "Color:B")
elif k==4:
x_0 = weather_clusters[weather_clusters.Cluster == 0]["Longt"]
y_0 = weather_clusters[weather_clusters.Cluster == 0]["Lat"]
c1 = pl.scatter(x_0, y_0, c='r', marker='o', alpha=0.4)
x_1 = weather_clusters[weather_clusters.Cluster == 1]["Longt"]
y_1 = weather_clusters[weather_clusters.Cluster == 1]["Lat"]
#c2 = pl.scatter(x_1, y_1, c='g', marker='x', alpha=0.8, s=169, linewidths=3, zorder=10)
c2 = pl.scatter(x_1, y_1, c='g', marker='o', alpha=0.4)
x_2 = weather_clusters[weather_clusters.Cluster == 2]["Longt"]
y_2 = weather_clusters[weather_clusters.Cluster == 2]["Lat"]
c3 = pl.scatter(x_2, y_2, c='b', marker='o', alpha=0.4)
x_3=weather_clusters[weather_clusters.Cluster==3]["Longt"]
y_3=weather_clusters[weather_clusters.Cluster == 3]["Lat"]
c3=pl.scatter(x_3,y_3,c='y',marker='o', alpha=0.4)
# Numbers of Elements in Clusters
print("Cluster0 Size:", len(x_0), ", Cluster1 Size:", len(x_1), ", Cluster2 Size:", len(x_2), ", Cluster3 Size:", len(x_3))
# Print Cluster Max, Min Points to determine Cluster Seperation Point
max_c0 = max(weather_clusters[weather_clusters.Cluster == 0][title])
min_c0 = min(weather_clusters[weather_clusters.Cluster == 0][title])
print("max_c0:", max_c0, " min_c0:", min_c0, "Color:R")
max_c1 = max(weather_clusters[weather_clusters.Cluster == 1][title])
min_c1 = min(weather_clusters[weather_clusters.Cluster == 1][title])
print("max_c1:", max_c1, " min_c1:", min_c1, "Color:G")
max_c2 = max(weather_clusters[weather_clusters.Cluster == 2][title])
min_c2 = min(weather_clusters[weather_clusters.Cluster == 2][title])
print("max_c2:", max_c2, " min_c2:", min_c2, "Color:B")
max_c3 = max(weather_clusters[weather_clusters.Cluster == 3][title])
min_c3 = min(weather_clusters[weather_clusters.Cluster == 3][title])
print("max_c3:", max_c3, " min_c3:", min_c3, "Color:Y")
pl.xlabel('Longitude')
pl.ylabel('Latitude')
pl.title(title)
pl.savefig("plot_output.png")
pl.show()
# -
# ## 3. Graph Laplacian
# +
df = pd.read_csv('dataset/28_1800.csv')
table=df.pivot_table(index=["Name"])
table=table.reset_index()
# features order:
# 0.Name, 1.AirTemp., 2.Alt, 3.DewPoint, 4.Lat, 5.Longt, 6.Pres., 7.R.Humidity, 8.Visib., 9.WindDir., 10.WindGust, 11.WindSpeed
# change the order of features
table2=table.iloc[:,[1,2,3,6,7,8,9,10,11,0,4,5]]
#print(table2.tail())
# specific features - new table is created
# enter the fourth column to address feature e.g: 1. AirTemp, 7. Relative Humidity
# e.g: table3=table.iloc[:,[0,4,5,1]]
table3=table.iloc[:,[0,4,5,7]]
table3=table3.dropna()
# for debug print tail of table3
#print(table3.tail())
#table3
title="RelativeHumidity"
# k=clustering number
k=4
#cluster=KMeans(n_clusters=k)
cluster=SpectralClusteringg(n_clusters=k,assign_labels="discretize",random_state=0)
#table3["Cluster"]=cluster.fit_predict(table3[table3.columns[3:]])
#table3
cluster.fit(table3[table3.columns[3:]])
table3["Cluster"]=cluster._labels
# centroids = clusters' center points
##centroids = cluster.cluster_centers_
weather_clusters=table3[["Name","Lat","Longt","Cluster",title]]
#print(weather_clusters)
weather_clusters.to_csv('ClusteredData.csv', index=False)
# informative
#print("centroids")
#print(table3.columns)
#print(centroids)
# +
# plotting clusters
plt.figure(num=None, figsize=(8, 6), dpi=80)
if k==2:
x_0=weather_clusters[weather_clusters.Cluster==0]["Longt"]
y_0=weather_clusters[weather_clusters.Cluster == 0]["Lat"]
c1=pl.scatter(x_0,y_0,c='r',marker='o',alpha=0.4)
x_1=weather_clusters[weather_clusters.Cluster==1]["Longt"]
y_1=weather_clusters[weather_clusters.Cluster == 1]["Lat"]
c2=pl.scatter(x_1,y_1,c='g',marker='o',alpha=0.4)
# Numbers of Elements in Clusters
print("Cluster0 Size:",len(x_0), ", Cluster1 Size:",len(x_1))
# Print Cluster Max, Min Points to determine Cluster Seperation Point
max_c0 = max(weather_clusters[weather_clusters.Cluster == 0][title])
min_c0 = min(weather_clusters[weather_clusters.Cluster == 0][title])
print("max_c0:", max_c0, " min_c0:", min_c0, "Color:R")
max_c1 = max(weather_clusters[weather_clusters.Cluster == 1][title])
min_c1 = min(weather_clusters[weather_clusters.Cluster == 1][title])
print("max_c1:", max_c1, " min_c1:", min_c1, "Color:G")
elif k==3:
x_0 = weather_clusters[weather_clusters.Cluster == 0]["Longt"]
y_0 = weather_clusters[weather_clusters.Cluster == 0]["Lat"]
c1 = pl.scatter(x_0, y_0, c='r', marker='o', alpha=0.4)
x_1 = weather_clusters[weather_clusters.Cluster == 1]["Longt"]
y_1 = weather_clusters[weather_clusters.Cluster == 1]["Lat"]
c2 = pl.scatter(x_1, y_1, c='g', marker='o', alpha=0.4)
# for sensor fault visibility in figure
# c2 = pl.scatter(x_1, y_1, c='b', marker='x', alpha=1, s=300, linewidths=4, zorder=10)
x_2=weather_clusters[weather_clusters.Cluster==2]["Longt"]
y_2=weather_clusters[weather_clusters.Cluster == 2]["Lat"]
#c3 = pl.scatter(x_2, y_2, c='b', marker='x', alpha=1, s=300, linewidths=4, zorder=10)
c3=pl.scatter(x_2,y_2,c='b',marker='o', alpha=0.4)
# Numbers of Elements in Clusters
print("Cluster0 Size:", len(x_0), ", Cluster1 Size:", len(x_1), ", Cluster2 Size:", len(x_2))
# Print Cluster Max, Min Points to determine Cluster Seperation Point
max_c0 = max(weather_clusters[weather_clusters.Cluster == 0][title])
min_c0 = min(weather_clusters[weather_clusters.Cluster == 0][title])
print("max_c0:", max_c0, " min_c0:", min_c0, "Color:R")
max_c1 = max(weather_clusters[weather_clusters.Cluster == 1][title])
min_c1 = min(weather_clusters[weather_clusters.Cluster == 1][title])
print("max_c1:", max_c1, " min_c1:", min_c1, "Color:G")
max_c2 = max(weather_clusters[weather_clusters.Cluster == 2][title])
min_c2 = min(weather_clusters[weather_clusters.Cluster == 2][title])
print("max_c2:", max_c2, " min_c2:", min_c2, "Color:B")
elif k==4:
x_0 = weather_clusters[weather_clusters.Cluster == 0]["Longt"]
y_0 = weather_clusters[weather_clusters.Cluster == 0]["Lat"]
c1 = pl.scatter(x_0, y_0, c='r', marker='o', alpha=0.4)
x_1 = weather_clusters[weather_clusters.Cluster == 1]["Longt"]
y_1 = weather_clusters[weather_clusters.Cluster == 1]["Lat"]
#c2 = pl.scatter(x_1, y_1, c='g', marker='x', alpha=0.8, s=169, linewidths=3, zorder=10)
c2 = pl.scatter(x_1, y_1, c='g', marker='o', alpha=0.4)
x_2 = weather_clusters[weather_clusters.Cluster == 2]["Longt"]
y_2 = weather_clusters[weather_clusters.Cluster == 2]["Lat"]
c3 = pl.scatter(x_2, y_2, c='b', marker='o', alpha=0.4)
x_3=weather_clusters[weather_clusters.Cluster==3]["Longt"]
y_3=weather_clusters[weather_clusters.Cluster == 3]["Lat"]
c3=pl.scatter(x_3,y_3,c='y',marker='o', alpha=0.4)
# Numbers of Elements in Clusters
print("Cluster0 Size:", len(x_0), ", Cluster1 Size:", len(x_1), ", Cluster2 Size:", len(x_2), ", Cluster3 Size:", len(x_3))
# Print Cluster Max, Min Points to determine Cluster Seperation Point
max_c0 = max(weather_clusters[weather_clusters.Cluster == 0][title])
min_c0 = min(weather_clusters[weather_clusters.Cluster == 0][title])
print("max_c0:", max_c0, " min_c0:", min_c0, "Color:R")
max_c1 = max(weather_clusters[weather_clusters.Cluster == 1][title])
min_c1 = min(weather_clusters[weather_clusters.Cluster == 1][title])
print("max_c1:", max_c1, " min_c1:", min_c1, "Color:G")
max_c2 = max(weather_clusters[weather_clusters.Cluster == 2][title])
min_c2 = min(weather_clusters[weather_clusters.Cluster == 2][title])
print("max_c2:", max_c2, " min_c2:", min_c2, "Color:B")
max_c3 = max(weather_clusters[weather_clusters.Cluster == 3][title])
min_c3 = min(weather_clusters[weather_clusters.Cluster == 3][title])
print("max_c3:", max_c3, " min_c3:", min_c3, "Color:Y")
pl.xlabel('Longitude')
pl.ylabel('Latitude')
pl.title(title)
pl.savefig("plot_output.png")
pl.show()
# -
# ## 4. Clustering
class SpectralClusteringg():#ClusterMixin, BaseEstimator):
"""Apply clustering to a projection of the normalized Laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance when clusters are
nested circles on the 2D plane.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
When calling ``fit``, an affinity matrix is constructed using either
kernel function such the Gaussian (aka RBF) kernel of the euclidean
distanced ``d(X, X)``::
np.exp(-gamma * d(X,X) ** 2)
or a k-nearest neighbors connectivity matrix.
Alternatively, using ``precomputed``, a user-provided affinity
matrix can be used.
Read more in the :ref:`User Guide <spectral_clustering>`.
Parameters
----------
n_clusters : int, default=8
The dimension of the projection subspace.
eigen_solver : {'arpack', 'lobpcg', 'amg'}, default=None
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities. If None, then ``'arpack'`` is
used.
n_components : int, default=n_clusters
Number of eigen vectors to use for the spectral embedding
random_state : int, RandomState instance, default=None
A pseudo random number generator used for the initialization of the
lobpcg eigen vectors decomposition when ``eigen_solver='amg'`` and by
the K-Means initialization. Use an int to make the randomness
deterministic.
See :term:`Glossary <random_state>`.
n_init : int, default=10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
gamma : float, default=1.0
Kernel coefficient for rbf, poly, sigmoid, laplacian and chi2 kernels.
Ignored for ``affinity='nearest_neighbors'``.
affinity : str or callable, default='rbf'
How to construct the affinity matrix.
- 'nearest_neighbors' : construct the affinity matrix by computing a
graph of nearest neighbors.
- 'rbf' : construct the affinity matrix using a radial basis function
(RBF) kernel.
- 'precomputed' : interpret ``X`` as a precomputed affinity matrix.
- 'precomputed_nearest_neighbors' : interpret ``X`` as a sparse graph
of precomputed nearest neighbors, and constructs the affinity matrix
by selecting the ``n_neighbors`` nearest neighbors.
- one of the kernels supported by
:func:`~sklearn.metrics.pairwise_kernels`.
Only kernels that produce similarity scores (non-negative values that
increase with similarity) should be used. This property is not checked
by the clustering algorithm.
n_neighbors : int, default=10
Number of neighbors to use when constructing the affinity matrix using
the nearest neighbors method. Ignored for ``affinity='rbf'``.
eigen_tol : float, default=0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when ``eigen_solver='arpack'``.
assign_labels : {'kmeans', 'discretize'}, default='kmeans'
The strategy to use to assign labels in the embedding
space. There are two ways to assign labels after the laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another approach
which is less sensitive to random initialization.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
kernel_params : dict of str to any, default=None
Parameters (keyword arguments) and values for kernel passed as
callable object. Ignored by other kernels.
n_jobs : int, default=None
The number of parallel jobs to run when `affinity='nearest_neighbors'`
or `affinity='precomputed_nearest_neighbors'`. The neighbors search
will be done in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
verbose : bool, default=False
Verbosity mode.
.. versionadded:: 0.24
Attributes
----------
affinity_matrix_ : array-like of shape (n_samples, n_samples)
Affinity matrix used for clustering. Available only if after calling
``fit``.
labels_ : ndarray of shape (n_samples,)
Labels of each point
Examples
--------
>>> from sklearn.cluster import SpectralClustering
>>> import numpy as np
>>> X = np.array([[1, 1], [2, 1], [1, 0],
... [4, 7], [3, 5], [3, 6]])
>>> clustering = SpectralClustering(n_clusters=2,
... assign_labels="discretize",
... random_state=0).fit(X)
>>> clustering.labels_
array([1, 1, 1, 0, 0, 0])
>>> clustering
SpectralClustering(assign_labels='discretize', n_clusters=2,
random_state=0)
Notes
-----
If you have an affinity matrix, such as a distance matrix,
for which 0 means identical elements, and high values means
very dissimilar elements, it can be transformed in a
similarity matrix that is well suited for the algorithm by
applying the Gaussian (RBF, heat) kernel::
np.exp(- dist_matrix ** 2 / (2. * delta ** 2))
Where ``delta`` is a free parameter representing the width of the Gaussian
kernel.
Another alternative is to take a symmetric version of the k
nearest neighbors connectivity matrix of the points.
If the pyamg package is installed, it is used: this greatly
speeds up computation.
References
----------
- Normalized cuts and image segmentation, 2000
<NAME>, <NAME>
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
<NAME>
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- Multiclass spectral clustering, 2003
<NAME>, <NAME>
https://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
"""
#@_deprecate_positional_args
def __init__(self, n_clusters=8, *, eigen_solver=None, n_components=None,
random_state=None, n_init=10, gamma=1., affinity='rbf',
n_neighbors=10, eigen_tol=0.0, assign_labels='kmeans',
degree=3, coef0=1, kernel_params=None, n_jobs=None,
verbose=False):
self.n_clusters = n_clusters
self.eigen_solver = eigen_solver
self.n_components = n_components
self.random_state = random_state
self.n_init = n_init
self.gamma = gamma
self.affinity = affinity
self.n_neighbors = n_neighbors
self.eigen_tol = eigen_tol
self.assign_labels = assign_labels
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
self.n_jobs = n_jobs
self.verbose = verbose
def fit(self, X, y=None):
"""Perform spectral clustering from features, or affinity matrix.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features), or \
array-like of shape (n_samples, n_samples)
Training instances to cluster, or similarities / affinities between
instances if ``affinity='precomputed'``. If a sparse matrix is
provided in a format other than ``csr_matrix``, ``csc_matrix``,
or ``coo_matrix``, it will be converted into a sparse
``csr_matrix``.
y : Ignored
Not used, present here for API consistency by convention.
Returns
-------
self
"""
X = self._validate_data(X, accept_sparse=['csr', 'csc', 'coo'],
dtype=np.float64, ensure_min_samples=2)
allow_squared = self.affinity in ["precomputed",
"precomputed_nearest_neighbors"]
if X.shape[0] == X.shape[1] and not allow_squared:
warnings.warn("The spectral clustering API has changed. ``fit``"
"now constructs an affinity matrix from data. To use"
" a custom affinity matrix, "
"set ``affinity=precomputed``.")
if self.affinity == 'nearest_neighbors':
connectivity = kneighbors_graph(X, n_neighbors=self.n_neighbors,
include_self=True,
n_jobs=self.n_jobs)
self.affinity_matrix_ = 0.5 * (connectivity + connectivity.T)
elif self.affinity == 'precomputed_nearest_neighbors':
estimator = NearestNeighbors(n_neighbors=self.n_neighbors,
n_jobs=self.n_jobs,
metric="precomputed").fit(X)
connectivity = estimator.kneighbors_graph(X=X, mode='connectivity')
self.affinity_matrix_ = 0.5 * (connectivity + connectivity.T)
elif self.affinity == 'precomputed':
self.affinity_matrix_ = X
else:
params = self.kernel_params
if params is None:
params = {}
if not callable(self.affinity):
params['gamma'] = self.gamma
params['degree'] = self.degree
params['coef0'] = self.coef0
self.affinity_matrix_ = pairwise_kernels(X, metric=self.affinity,
filter_params=True,
**params)
random_state = check_random_state(self.random_state)
self.labels_ = spectral_clustering(self.affinity_matrix_,
n_clusters=self.n_clusters,
n_components=self.n_components,
eigen_solver=self.eigen_solver,
random_state=random_state,
n_init=self.n_init,
eigen_tol=self.eigen_tol,
assign_labels=self.assign_labels,
verbose=self.verbose)
return self
# +
#@_deprecate_positional_args
def discretize(vectors, *, copy=True, max_svd_restarts=30, n_iter_max=20,
random_state=None):
"""Search for a partition matrix (clustering) which is closest to the
eigenvector embedding.
Parameters
----------
vectors : array-like of shape (n_samples, n_clusters)
The embedding space of the samples.
copy : bool, default=True
Whether to copy vectors, or perform in-place normalization.
max_svd_restarts : int, default=30
Maximum number of attempts to restart SVD if convergence fails
n_iter_max : int, default=30
Maximum number of iterations to attempt in rotation and partition
matrix search if machine precision convergence is not reached
random_state : int, RandomState instance, default=None
Determines random number generation for rotation matrix initialization.
Use an int to make the randomness deterministic.
See :term:`Glossary <random_state>`.
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Multiclass spectral clustering, 2003
<NAME>, <NAME>
https://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
Notes
-----
The eigenvector embedding is used to iteratively search for the
closest discrete partition. First, the eigenvector embedding is
normalized to the space of partition matrices. An optimal discrete
partition matrix closest to this normalized embedding multiplied by
an initial rotation is calculated. Fixing this discrete partition
matrix, an optimal rotation matrix is calculated. These two
calculations are performed until convergence. The discrete partition
matrix is returned as the clustering solution. Used in spectral
clustering, this method tends to be faster and more robust to random
initialization than k-means.
"""
from scipy.sparse import csc_matrix
from scipy.linalg import LinAlgError
random_state = check_random_state(random_state)
vectors = as_float_array(vectors, copy=copy)
eps = np.finfo(float).eps
n_samples, n_components = vectors.shape
# Normalize the eigenvectors to an equal length of a vector of ones.
# Reorient the eigenvectors to point in the negative direction with respect
# to the first element. This may have to do with constraining the
# eigenvectors to lie in a specific quadrant to make the discretization
# search easier.
norm_ones = np.sqrt(n_samples)
for i in range(vectors.shape[1]):
vectors[:, i] = (vectors[:, i] / np.linalg.norm(vectors[:, i])) \
* norm_ones
if vectors[0, i] != 0:
vectors[:, i] = -1 * vectors[:, i] * np.sign(vectors[0, i])
# Normalize the rows of the eigenvectors. Samples should lie on the unit
# hypersphere centered at the origin. This transforms the samples in the
# embedding space to the space of partition matrices.
vectors = vectors / np.sqrt((vectors ** 2).sum(axis=1))[:, np.newaxis]
svd_restarts = 0
has_converged = False
# If there is an exception we try to randomize and rerun SVD again
# do this max_svd_restarts times.
while (svd_restarts < max_svd_restarts) and not has_converged:
# Initialize first column of rotation matrix with a row of the
# eigenvectors
rotation = np.zeros((n_components, n_components))
rotation[:, 0] = vectors[random_state.randint(n_samples), :].T
# To initialize the rest of the rotation matrix, find the rows
# of the eigenvectors that are as orthogonal to each other as
# possible
c = np.zeros(n_samples)
for j in range(1, n_components):
# Accumulate c to ensure row is as orthogonal as possible to
# previous picks as well as current one
c += np.abs(np.dot(vectors, rotation[:, j - 1]))
rotation[:, j] = vectors[c.argmin(), :].T
last_objective_value = 0.0
n_iter = 0
while not has_converged:
n_iter += 1
t_discrete = np.dot(vectors, rotation)
labels = t_discrete.argmax(axis=1)
vectors_discrete = csc_matrix(
(np.ones(len(labels)), (np.arange(0, n_samples), labels)),
shape=(n_samples, n_components))
t_svd = vectors_discrete.T * vectors
try:
U, S, Vh = np.linalg.svd(t_svd)
svd_restarts += 1
except LinAlgError:
print("SVD did not converge, randomizing and trying again")
break
ncut_value = 2.0 * (n_samples - S.sum())
if ((abs(ncut_value - last_objective_value) < eps) or
(n_iter > n_iter_max)):
has_converged = True
else:
# otherwise calculate rotation and continue
last_objective_value = ncut_value
rotation = np.dot(Vh.T, U.T)
if not has_converged:
raise LinAlgError('SVD did not converge')
return labels
#@_deprecate_positional_args
def spectral_clustering(affinity, *, n_clusters=8, n_components=None,
eigen_solver=None, random_state=None, n_init=10,
eigen_tol=0.0, assign_labels='kmeans',
verbose=False):
"""Apply clustering to a projection of the normalized Laplacian.
In practice Spectral Clustering is very useful when the structure of
the individual clusters is highly non-convex or more generally when
a measure of the center and spread of the cluster is not a suitable
description of the complete cluster. For instance, when clusters are
nested circles on the 2D plane.
If affinity is the adjacency matrix of a graph, this method can be
used to find normalized graph cuts.
Read more in the :ref:`User Guide <spectral_clustering>`.
Parameters
----------
affinity : {array-like, sparse matrix} of shape (n_samples, n_samples)
The affinity matrix describing the relationship of the samples to
embed. **Must be symmetric**.
Possible examples:
- adjacency matrix of a graph,
- heat kernel of the pairwise distance matrix of the samples,
- symmetric k-nearest neighbours connectivity matrix of the samples.
n_clusters : int, default=None
Number of clusters to extract.
n_components : int, default=n_clusters
Number of eigen vectors to use for the spectral embedding
eigen_solver : {None, 'arpack', 'lobpcg', or 'amg'}
The eigenvalue decomposition strategy to use. AMG requires pyamg
to be installed. It can be faster on very large, sparse problems,
but may also lead to instabilities. If None, then ``'arpack'`` is
used.
random_state : int, RandomState instance, default=None
A pseudo random number generator used for the initialization of the
lobpcg eigen vectors decomposition when eigen_solver == 'amg' and by
the K-Means initialization. Use an int to make the randomness
deterministic.
See :term:`Glossary <random_state>`.
n_init : int, default=10
Number of time the k-means algorithm will be run with different
centroid seeds. The final results will be the best output of
n_init consecutive runs in terms of inertia.
eigen_tol : float, default=0.0
Stopping criterion for eigendecomposition of the Laplacian matrix
when using arpack eigen_solver.
assign_labels : {'kmeans', 'discretize'}, default='kmeans'
The strategy to use to assign labels in the embedding
space. There are two ways to assign labels after the laplacian
embedding. k-means can be applied and is a popular choice. But it can
also be sensitive to initialization. Discretization is another
approach which is less sensitive to random initialization. See
the 'Multiclass spectral clustering' paper referenced below for
more details on the discretization approach.
verbose : bool, default=False
Verbosity mode.
.. versionadded:: 0.24
Returns
-------
labels : array of integers, shape: n_samples
The labels of the clusters.
References
----------
- Normalized cuts and image segmentation, 2000
<NAME>, <NAME>
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.160.2324
- A Tutorial on Spectral Clustering, 2007
<NAME>
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.165.9323
- Multiclass spectral clustering, 2003
<NAME>, <NAME>
https://www1.icsi.berkeley.edu/~stellayu/publication/doc/2003kwayICCV.pdf
Notes
-----
The graph should contain only one connect component, elsewhere
the results make little sense.
This algorithm solves the normalized cut for k=2: it is a
normalized spectral clustering.
"""
if assign_labels not in ('kmeans', 'discretize'):
raise ValueError("The 'assign_labels' parameter should be "
"'kmeans' or 'discretize', but '%s' was given"
% assign_labels)
random_state = check_random_state(random_state)
n_components = n_clusters if n_components is None else n_components
# The first eigen vector is constant only for fully connected graphs
# and should be kept for spectral clustering (drop_first = False)
# See spectral_embedding documentation.
maps = spectral_embedding(affinity, n_components=n_components,
eigen_solver=eigen_solver,
random_state=random_state,
eigen_tol=eigen_tol, drop_first=False)
if verbose:
print(f'Computing label assignment using {assign_labels}')
if assign_labels == 'kmeans':
_, labels, _ = k_means(maps, n_clusters, random_state=random_state,
n_init=n_init, verbose=verbose)
else:
labels = discretize(maps, random_state=random_state)
return labels
# -
# ## 5. Validation
|
spectralAnalysis.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Setup an experiment with given data and arch by adjusting config
# +
import os
import importlib
import neptune
import pandas as pd
import numpy as np
import datetime as dt
import torch
tnn = torch.nn
top = torch.optim
from torch.utils import data as tdt
from src import utils
# %matplotlib inline
ARCHS_DIR = 'archs'
DATA_DIR = 'data'
EXPERIMENTS_DIR = 'experiments'
NEPTUNE_PRJ = 'indiacovidseva/covid-net'
# -
# ### Device Info
CUDA="cuda:0"
CPU="cpu"
if torch.cuda.is_available():
device = torch.device(CUDA)
cd = torch.cuda.current_device()
print("Num devices:", torch.cuda.device_count())
print("Current device:", cd)
print("Device name:", torch.cuda.get_device_name(cd))
print("Device props:", torch.cuda.get_device_properties(cd))
# print(torch.cuda.memory_summary(cd))
else:
device = torch.device(CPU)
print(device)
# ### Experiment Config
# +
config = {
"NEPTUNE_ID": "",
"ID": "0027",
"DESC": "Batch 1, weather data - thp",
"ARCH": "v3",
"DATASET": "ds_cdthp_pgba_4020_dataset_2020-09-15_v3.csv.pt",
"IP_FEATURES": [0, 2, 3, 4],
"OP_FEATURES": [0],
"AUX_FEATURES": [],
"BATCH_SIZE": 1,
"HIDDEN_SIZE": 40,
"NUM_LAYERS": 4,
"DROPOUT": 0.5,
"LEARNING_RATE": 0.001,
"NUM_EPOCHS": 3001
}
NEPTUNE = True # toggle neptune
RESUME = False # retrain a previously trained model
resume_exp = EXPERIMENTS_DIR + "/" + "0001"
resume_cp = "latest-e" + "1740" + ".pt"
if RESUME:
assert resume_exp and resume_cp
# setup exp
experiment_dir = EXPERIMENTS_DIR + '/' + config['ID']
try:
os.mkdir(experiment_dir)
except OSError:
print("!!WARNING!! EXPERIMENT ALREADY EXISTS:", config['ID'])
else:
print("Initialising experiment:", config['ID'])
print("Resume:", RESUME, resume_exp, resume_cp)
# load data
ds = torch.load(DATA_DIR + "/" + config['DATASET'])
print("Dataset loaded")
config['DS'] = ds['config']
print(config['DS'])
# load arch
arch_mod = importlib.import_module("." + config['ARCH'], ARCHS_DIR)
importlib.reload(arch_mod) # ensure changes are imported
# init Net
model = arch_mod.CovidNet(
ip_seq_len=config['DS']['IP_SEQ_LEN'],
op_seq_len=config['DS']['OP_SEQ_LEN'],
ip_size=len(config['IP_FEATURES']),
op_size=len(config['OP_FEATURES']),
hidden_size=config['HIDDEN_SIZE'],
num_layers=config['NUM_LAYERS'],
dropout=config['DROPOUT'],
ip_aux_size=len(config['AUX_FEATURES']),
device=device
)
model = model.to(device)
print ("Model initialised")
print("Num params:", sum(p.numel() for p in model.parameters() if p.requires_grad))
# init Loss and Optimizer
loss_fn = tnn.L1Loss()
optimizer = top.Adam(model.parameters(), lr=config['LEARNING_RATE'])
# init dataset loaders
trn_loader = tdt.DataLoader(ds['trn'], shuffle=True, batch_size=config['BATCH_SIZE'])
val_loader = tdt.DataLoader(ds['val'], shuffle=True, batch_size=config['BATCH_SIZE'])
# -
# ### Training loop
# +
trn_loss_vals = []
val_loss_vals = []
trn_acc_vals = []
val_acc_vals = []
e = 0
min_val_loss = np.Inf
max_val_acc = 0
if RESUME:
cp = utils.load_checkpoint(resume_exp, resume_cp, device=device)
old_config, md, od = cp['config'], cp['model_state_dict'], cp['optimizer_state_dict']
model.load_state_dict(md)
optimizer.load_state_dict(od)
# e = cp['epoch'] + 1
# trn_loss_vals, val_loss_vals, min_val_loss = cp['trn_losses'], cp['val_losses'], cp['min_val_loss']
# trn_acc_vals, val_acc_vals, max_val_acc = cp['trn_acc'], cp['val_acc'], cp['max_val_acc']
try:
assert config['IP_FEATURES'] == old_config['IP_FEATURES']
assert config['OP_FEATURES'] == old_config['OP_FEATURES']
assert config['HIDDEN_SIZE'] == old_config['HIDDEN_SIZE']
assert config['DS']['IP_SEQ_LEN'] == old_config['DS']['IP_SEQ_LEN']
assert config['DS']['OP_SEQ_LEN'] == old_config['DS']['OP_SEQ_LEN']
assert config['AUX_FEATURES'] == old_config.get('AUX_FEATURES', [])
except:
import traceback as tb
print("Failed to resume.")
print("Old config:", old_config)
print("New config:", config)
tb.print_exc()
if NEPTUNE:
neptune_prj = neptune.init(NEPTUNE_PRJ)
if config['NEPTUNE_ID']:
neptune_exp = neptune_prj.get_experiments(id=config['NEPTUNE_ID'])[0]
else:
neptune_exp = neptune.create_experiment(name=config['ID'], params=config)
config['NEPTUNE_ID'] = neptune_exp.id
# TRAIN
print("BEGIN: [", dt.datetime.now(), "]")
while e < config['NUM_EPOCHS']:
model.train()
trn_losses = []
trn_ops = []
for data in trn_loader:
ip, ip_aux, op = data
# print("ip all", ip)
ip = ip[:, :, config['IP_FEATURES']].to(device)
# print("ip model", ip)
ip_aux = ip_aux[:, config['AUX_FEATURES']].to(device).view(-1, len(config['AUX_FEATURES'])) if len(config['AUX_FEATURES']) else None
op = op[:, :, config['OP_FEATURES']].to(device)
optimizer.zero_grad() # set grads to 0
preds = model(
ip.view(-1, config['DS']['IP_SEQ_LEN'], len(config['IP_FEATURES'])),
aux_ip=ip_aux
) # predict
# print("preds", preds)
loss = loss_fn(preds, op.view(-1, config['DS']['OP_SEQ_LEN'], len(config['OP_FEATURES']))) # calc loss
loss.backward() # calc and assign grads
optimizer.step() # update weights
trn_losses.append(loss) # logging
trn_ops.append(op.mean())
avg_trn_loss = torch.stack(trn_losses).mean().item()
avg_trn_acc = 1 - avg_trn_loss / torch.stack(trn_ops).mean().item()
trn_loss_vals.append(avg_trn_loss * 10000)
trn_acc_vals.append(avg_trn_acc * 100)
model.eval()
with torch.no_grad():
val_losses = []
val_ops = []
for data in val_loader:
ip, ip_aux, op = data
ip = ip[:, :, config['IP_FEATURES']].to(device)
ip_aux = ip_aux[:, config['AUX_FEATURES']].to(device).view(-1, len(config['AUX_FEATURES'])) if len(config['AUX_FEATURES']) else None
op = op[:, :, config['OP_FEATURES']].to(device)
preds = model(
ip.view(-1, config['DS']['IP_SEQ_LEN'], len(config['IP_FEATURES'])),
aux_ip=ip_aux
)
loss = loss_fn(preds, op.view(-1, config['DS']['OP_SEQ_LEN'], len(config['OP_FEATURES'])))
val_losses.append(loss)
val_ops.append(op.mean())
avg_val_loss = torch.stack(val_losses).mean().item()
avg_val_acc = 1 - avg_val_loss / torch.stack(val_ops).mean().item()
val_loss_vals.append(avg_val_loss * 10000)
val_acc_vals.append(avg_val_acc * 100)
if NEPTUNE:
neptune_exp.log_metric('validation accuracy', avg_val_acc*100)
neptune_exp.log_metric('training accuracy', avg_trn_acc*100)
neptune_exp.log_metric('validation loss', avg_val_loss*1e4)
neptune_exp.log_metric('training loss', avg_trn_loss*1e4)
if e%1==0:
print(
"[", dt.datetime.now(), "] epoch:", f"{e:5}",
"val_acc:", f"{avg_val_acc*100: 4.2f}", "trn_acc:", f"{avg_trn_acc*100: 4.2f}",
"val_loss:", f"{avg_val_loss*1e4: 4.2f}", "trn_loss:", f"{avg_trn_loss*1e4: 4.2f}"
)
if e%1==0:
utils.save_checkpoint(
config, e, model, optimizer,
trn_loss_vals, val_loss_vals, min_val_loss,
trn_acc_vals, val_acc_vals, max_val_acc,
experiment_dir, "/latest-e" + str(e) + ".pt"
)
if avg_val_acc >= max_val_acc:
max_val_acc = avg_val_acc
# utils.save_checkpoint(
# config, e, model, optimizer,
# trn_loss_vals, val_loss_vals, min_val_loss,
# trn_acc_vals, val_acc_vals, max_val_acc,
# experiment_dir, "/best-e" + str(e) + ".pt"
# )
e+=1
print("END: [", dt.datetime.now(), "]")
if NEPTUNE:
neptune_exp.stop()
# -
# ### Plot loss & acc
# +
df_loss = pd.DataFrame({
'trn_loss': trn_loss_vals,
'val_loss': val_loss_vals
})
df_acc = pd.DataFrame({
'trn_acc': trn_acc_vals,
'val_acc': val_acc_vals
})
# smoothen
df_loss['trn_loss'] = df_loss['trn_loss'].rolling(3, min_periods=1, center=True).mean()
df_loss['val_loss'] = df_loss['val_loss'].rolling(3, min_periods=1, center=True).mean()
df_acc['trn_acc'] = df_acc['trn_acc'].rolling(3, min_periods=1, center=True).mean()
df_acc['val_acc'] = df_acc['val_acc'].rolling(3, min_periods=1, center=True).mean()
_ = df_loss[2:].plot(
y=['trn_loss', 'val_loss'],
title='Loss per epoch',
subplots=False,
figsize=(5,5),
sharex=False,
logy=True
)
_ = df_acc[2:].plot(
y=['trn_acc', 'val_acc'],
title='Acc per epoch',
subplots=False,
figsize=(5,5),
sharex=False,
logy=False
)
|
Experiments.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# +
import numpy as np
import pandas as pd
def flatten(t):
return [item for sublist in t for item in sublist]
# +
# rozkład wiosna
pv_efficiency_1 = [
0,
0,
0,
0,
0,
0,
0,
0,
0,
45,
300,
320,
555,
330,
310,
250,
75,
0,
0,
0,
0,
0,
0,
0,
]
pv1 = [[x for x in pv_efficiency_1] for j in range(90) ]
print(pv1)
# +
# rozkład lato
pv_efficiency_2 = [
0,
0,
0,
10,
80,
240,
320,
550,
700,
700,
600,
600,
600,
500,
400,
300,
200,
100,
50,
0,
0,
0,
0
]
pv2 = [[x for x in pv_efficiency_2] for j in range(91) ]
# +
# rozkład jesien
pv_efficiency_3 = [
0,0,0,0,0,0,
100,
250,
500,
620,
720,
755,
720,
620,
500,
250,
100,
50,
0,
0,
0,
0,
0
]
pv3 = [[x for x in pv_efficiency_3] for j in range(92) ]
# +
# rozkład jesien
pv_efficiency_4 = [
0,0,0,0,0,0,0,0,0,
100,
250,
277,
100,
0,
110,
0,
0,
0,
0,
0,
0,
0,
0
]
pv4 = [[x for x in pv_efficiency_4] for j in range(92) ]
# -
pv_efficiency = (flatten(pv1) + flatten(pv2) + flatten(pv3) + flatten(pv4))
pv_efficiency = [x/1000 for x in pv_efficiency]
# +
wind_efficiency_1 = [300 for x in range(90)]
wind_efficiency_2 = [100 for x in range(91)]
wind_efficiency_3 = [100 for x in range(92)]
wind_efficiency_4 = [300 for x in range(92)]
wind_efficiency = (wind_efficiency_1 + wind_efficiency_2 + wind_efficiency_3 + wind_efficiency_4)
# +
pv_efficiency = dict(zip(range(len(pv_efficiency)),pv_efficiency))
wind_efficiency = dict(zip(range(len(wind_efficiency)),wind_efficiency))
# -
|
Untitled2.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # This notebook describes and directs to notebooks, scripts and other documents I have produced
# <h2>
# <a href = "https://nbviewer.jupyter.org/urls/bitbucket.org/midoss/analysis-ashutosh/raw/default//wind_climatology_maps.ipynb">
# Wind Climatology Maps
# </a>
# </h2>
# <p>Notebook used for generating the wind climatology maps at the beginning of the term. Heavily refactored for accessibility 18 Apr 2019.
# <ul>
# <li>Mean, Standard Deviation, Maxima, percentage of days exceeding 5m/s and 10m/s by month computed for each year (2015, 2016, 2017, 2018)</li>
# <li>Mean, Standard Deviation, Maxima, percentage of days exceeding 5m/s and 10m/s by month computed for all years (2015, 2016, 2017, 2018)</li>
# </ul>
# </p>
# <p>Sample output:</p>
# <a href="./breadcrumbs/1_wind_climatology_maps.png" target="new">
# <img src="./breadcrumbs/1_wind_climatology_maps.png" style="width:600px">
# </a>
# <h2>
# <a href="https://nbviewer.jupyter.org/urls/bitbucket.org/midoss/analysis-ashutosh/raw/default//WindSpeedTimeseries.ipynb">
# Wind Speed Timeseries
# </a>
# </h2>
# <p>Notebook used for generating the wind speed time series at each of the salmon bank, turn point and strait of Georgia locations.
# <ul>
# <li>Raw plot of timeseries, Daily average, Running mean and Standard deviation of wind speed by month (for years 2015, 2016, 2017, 2018)</li>
# </ul>
# </p>
# <p>Sample output:</p>
# <a href="./breadcrumbs/wind_speed_timeseries.png" target="new">
# <img src="./breadcrumbs/wind_speed_timeseries.png" style="width:600px">
# </a>
# <h2>
# <a href = "https://nbviewer.jupyter.org/urls/bitbucket.org/midoss/analysis-ashutosh/raw/default//explore_pytables_h5py.ipynb">
# Exploration of pytables and h5py libraries for creating forcing files for MOHID
# <a/>
# </h2>
# <ul>
# <li>Walking the file structure of Shihan's input files to reverse engineer the tree</li>
# <li>Creating winds and current input files using pytables. Saw that I couldn't figure out how to write metadata without modifying carray class, but blanched when I saw the output files were 10X larger than they should be e.g. 30GB for a week of SSC output.</li>
# <li>Creating winds and current input files using h5py. This method was able to replicate Shihan's input files and solve problems faced when using pytables.</li>
# <li>For each compression factor from 1-9, looking at the saving in file size and time taken to create input files using h5py</li>
# </ul>
# <h2>
# <a href="https://nbviewer.jupyter.org/urls/bitbucket.org/midoss/analysis-ashutosh/raw/default//SpringNeapTide.ipynb">
# Spring/Neap Tide Record
# </a>
# </h2>
# <p>Notebook used for generating plots of spring/neap tide cycles
# <ul>
# <li>For each of the three chosen locations, plot the spring/neap tide record.
# Signal Calculated as: max(tidal_record_for_each_day) - min(tidal_record_for_each_day.
# (for years 2015, 2016, 2017, 2018)</li>
# </ul>
# </p>
# <p>Sample output:</p>
# <a href="./breadcrumbs/SpringNeapTide.png" target="new">
# <img src="./breadcrumbs/SpringNeapTide.png" style="width:600px">
# </a>
# <h2> Plots of Stratification Vs Depth </h2>
# <p>
# <ul>
# <li>For each of the three chosen locations, plot the stratification vs depth at various times</li>
# <li>Used a text input date parser to locate netcdf files on /results2 and extract profiles</li>
# </ul>
# </p>
# <p>Sample output:</p>
# <a href="./breadcrumbs/stratification_v_depth.png" target="new">
# <img src="./breadcrumbs/stratification_v_depth.png" style="width:600px">
# </a>
# <h2> Downscaling winds </h2>
# <p> Comparing different ways of regridding GEM files for MOHID
# <ul>
# <li>Use scipy.interpolate.giddata to regrid using nearest neighbout, bilinear and bicubic interpolation</li>
# <li>Visualise difference</li>
# </ul>
# </p>
# <p>Sample output:</p>
# <a href="./breadcrumbs/downscale_winds.png" target="new">
# <img src="./breadcrumbs/downscale_winds.png" style="height:500px">
# </a>
# <h2> Plot Rich's Drifter release Location on NEMO grid </h2>
# <p>
# <ul>
# <li>Visualise drifter release locations</li>
# <p>Sample output:</p>
# <a href="./breadcrumbs/drifter_release.png" target="new">
# <img src="./breadcrumbs/drifter_release.png" style="height:500px">
# </a>
# <li>Find drifters within certain radius of input coordinates</li>
# </ul>
# </p>
# <p>Sample output:</p>
# <p>
# Radius = 10 km
#
# Drifters within radius 10 km of SoG point:
#
# Index =(276, 265); time (days) = 2014-09-20 05:11:49; distance (km) = 9.968412326014565
# Index =(282, 247); time (days) = 2017-02-14 22:03:05; distance (km) = 4.406343047696986
# Index =(282, 247); time (days) = 2017-11-28 00:39:44; distance (km) = 4.391117416880188
#
# Drifters within radius 10 km of turn point:
#
# Index =(129, 233); time (days) = 2016-11-01 08:03:12; distance (km) = 7.650155015730338
# Index =(148, 243); time (days) = 2017-04-13 17:47:21; distance (km) = 2.744154790298269
# Index =(146, 239); time (days) = 2017-05-08 19:56:32; distance (km) = 1.420864404212626
# Index =(147, 262); time (days) = 2017-05-12 18:03:13; distance (km) = 9.999106563957708
# Index =(147, 262); time (days) = 2017-05-12 18:07:10; distance (km) = 9.733297460085222
# Index =(129, 230); time (days) = 2017-09-01 08:17:08; distance (km) = 8.38425604118368
# Index =(144, 260); time (days) = 2017-10-07 01:01:34; distance (km) = 8.92364498670166
#
# no drifters found within 10 km of salmon bank
# </p>
#
|
Breadcrumbs.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] nbpresent={"id": "601d0118-f7de-409d-a353-81c531c65795"}
# # Geocomputing course
#
# Welcome to geocomputing!
# + [markdown] nbpresent={"id": "e99ff0b0-dbc5-4334-a0c1-b64fd43df906"}
# ## Day 1. Introduction to Python
#
# - Installation flailing
# - Quick course overview
# - [**Intro to Python**](../notebooks/Intro_to_Python.ipynb)
#
#
# - Lightning talks
# - A couple of quick demos
# - [**Intro to Python**](../notebooks/Intro_to_Python.ipynb) — continued
# - Check out
# + [markdown] nbpresent={"id": "2bef1a51-4c16-4878-8dee-5b4b2b0d97a8"}
# ## Day 2. Introduction to scientific computing
#
# - Check in + Introductions
# - [**Intro to functions**](../notebooks/Intro_to_functions.ipynb)
# - [**Practice functions**](../notebooks/Practice_functions.ipynb)
#
#
# - Personal projects
# - [**Intro to NumPy**](../notebooks/Intro_to_NumPy.ipynb)
# - Check out
# + [markdown] nbpresent={"id": "46020cb0-0041-4fa9-813f-20fa44396679"}
# ## Day 3. Scientific computing continued
#
# - Check in
# - [**Practice NumPy**](../notebooks/Practice_NumPy.ipynb)
# - [**Reading data from files**](../notebooks/Reading_data_from_files.ipynb)
#
#
# - [**Intro to matplotlib**](../notebooks/Intro_to_matplotlib.ipynb)
# - [**Intro to seismic data**](../notebooks/Intro_to_seismic_data.ipynb)
# - **Running Python scripts**
# - **Command line utilities**
# - **Modules and packages**
# - Check out
# + [markdown] nbpresent={"id": "26a3f466-2dad-46ce-8fb2-ea66682f393c"}
# ## Day 4. Introduction to machine learning
#
# - Check in
# - [**Introduction to machine learning**](../data/notebooks/Intro_to_machine_learning_classification.ipynb)
#
#
# - Lightning talks
# - [**More machine learning**](../data/notebooks/Intro_to_Machine_learning_regression.ipynb)
# - **Wrap up machine learning**
# - Project round-up and form teams for Day 5
# + [markdown] nbpresent={"id": "66c2d7cf-2fb1-4f2a-bc3e-f6601928811e"}
# ## Day 5. Hackathon
#
# - Project headlines
# - **Code all day!**
# - Demos at 3 o'clock.
# + [markdown] nbpresent={"id": "8a962158-b836-405f-a3bb-ef2bc1d7dfd6"}
# ----
#
# ## Optional segments
#
# ### Programming
#
# - [Intro to testing](../notebooks/Intro_to_testing.ipynb)
# - [Intro to documentation](../notebooks/Intro_to_documentation.ipynb)
# - [Intro to OOP](../notebooks/Intro_to_OOP.ipynb)
# - [Practice OOP](../notebooks/Practice_OOP.ipynb)
#
#
# ### File formats
#
# - [Dealing with images](../notebooks/Dealing_with_images.ipynb)
# - [Read and write LAS](../notebooks/Read_and_write_LAS.ipynb)
# - [Read SEGY with segyio](../notebooks/Read_SEG-Y_with_SegyIO.ipynb)
# - [Write SEGY with ObsPy](../notebooks/Write_SEGY_with_ObsPy.ipynb)
#
#
# ### Geoscience
#
# - [Volumetrics](../notebooks/Volumetrics.ipynb)
# - [Get info from web](../notebooks/Get_info_from_web.ipynb)
# - [Intro to bruges](../notebooks/Intro_to_bruges.ipynb)
# - [Synthetic wedge](../notebooks/Synthetic_wedge.ipynb)
# - [Synthetic seismogram](../notebooks/Synthetic_seismogram.ipynb)
# - [Gridding map data](../notebooks/Gridding_map_data.ipynb)
# - [Intro to AVO](../notebooks/Intro_to_AVO.ipynb)
# - [Wavelets and sweeps](../notebooks/Wavelets_and_sweeps.ipynb)
# - [Find and analyse seismograph data](../notebooks/Find_and_analyse_seismograph_data.ipynb)
# - [Photomicrograph segmentation](../notebooks/Image_segmentation.ipynb)
# -
|
master/_Curriculum.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda_python3
# language: python
# name: conda_python3
# ---
# [](https://github.com/awslabs/aws-data-wrangler)
#
# # 7 - Redshift, MySQL, PostgreSQL and SQL Server
#
# [Wrangler](https://github.com/awslabs/aws-data-wrangler)'s Redshift, MySQL and PostgreSQL have two basic function in common that tries to follow the Pandas conventions, but add more data type consistency.
#
# - [wr.redshift.to_sql()](https://aws-data-wrangler.readthedocs.io/en/2.6.0/stubs/awswrangler.redshift.to_sql.html)
# - [wr.redshift.read_sql_query()](https://aws-data-wrangler.readthedocs.io/en/2.6.0/stubs/awswrangler.redshift.read_sql_query.html)
# - [wr.mysql.to_sql()](https://aws-data-wrangler.readthedocs.io/en/2.6.0/stubs/awswrangler.mysql.to_sql.html)
# - [wr.mysql.read_sql_query()](https://aws-data-wrangler.readthedocs.io/en/2.6.0/stubs/awswrangler.mysql.read_sql_query.html)
# - [wr.postgresql.to_sql()](https://aws-data-wrangler.readthedocs.io/en/2.6.0/stubs/awswrangler.postgresql.to_sql.html)
# - [wr.postgresql.read_sql_query()](https://aws-data-wrangler.readthedocs.io/en/2.6.0/stubs/awswrangler.postgresql.read_sql_query.html)
# - [wr.sqlserver.to_sql()](https://aws-data-wrangler.readthedocs.io/en/2.6.0/stubs/awswrangler.sqlserver.to_sql.html)
# - [wr.sqlserver.read_sql_query()](https://aws-data-wrangler.readthedocs.io/en/2.6.0/stubs/awswrangler.sqlserver.read_sql_query.html)
# +
import awswrangler as wr
import pandas as pd
df = pd.DataFrame({
"id": [1, 2],
"name": ["foo", "boo"]
})
# -
# ## Connect using the Glue Catalog Connections
#
# - [wr.redshift.connect()](https://aws-data-wrangler.readthedocs.io/en/2.6.0/stubs/awswrangler.redshift.connect.html)
# - [wr.mysql.connect()](https://aws-data-wrangler.readthedocs.io/en/2.6.0/stubs/awswrangler.mysql.connect.html)
# - [wr.postgresql.connect()](https://aws-data-wrangler.readthedocs.io/en/2.6.0/stubs/awswrangler.postgresql.connect.html)
# - [wr.sqlserver.connect()](https://aws-data-wrangler.readthedocs.io/en/2.6.0/stubs/awswrangler.sqlserver.connect.html)
con_redshift = wr.redshift.connect("aws-data-wrangler-redshift")
con_mysql = wr.mysql.connect("aws-data-wrangler-mysql")
con_postgresql = wr.postgresql.connect("aws-data-wrangler-postgresql")
con_sqlserver = wr.sqlserver.connect("aws-data-wrangler-sqlserver")
# ## Raw SQL queries (No Pandas)
with con_redshift.cursor() as cursor:
for row in cursor.execute("SELECT 1"):
print(row)
# ## Loading data to Database
wr.redshift.to_sql(df, con_redshift, schema="public", table="tutorial", mode="overwrite")
wr.mysql.to_sql(df, con_mysql, schema="test", table="tutorial", mode="overwrite")
wr.postgresql.to_sql(df, con_postgresql, schema="public", table="tutorial", mode="overwrite")
wr.sqlserver.to_sql(df, con_sqlserver, schema="dbo", table="tutorial", mode="overwrite")
# ## Unloading data from Database
wr.redshift.read_sql_query("SELECT * FROM public.tutorial", con=con_redshift)
wr.mysql.read_sql_query("SELECT * FROM test.tutorial", con=con_mysql)
wr.postgresql.read_sql_query("SELECT * FROM public.tutorial", con=con_postgresql)
wr.sqlserver.read_sql_query("SELECT * FROM dbo.tutorial", con=con_sqlserver)
con_redshift.close()
con_mysql.close()
con_postgresql.close()
con_sqlserver.close()
|
tutorials/007 - Redshift, MySQL, PostgreSQL, SQL Server.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <br><h1 align="Center"> <img src="https://serea2017.uniandes.edu.co/images/Logo.png" height="60" width="200" align="Center" />MIIA-4203 MODELOS AVANZADOS PARA ANÁLISIS DE DATOS II</h1>
# <h2 align="Center">
# Presentado por:<br>
# <NAME> - Cód 201924252<br>
# <NAME> - Cód 201523509<br>
# </h2>
# <h3 align="Center">Introducción a las redes neuronales
#
# Actividad 2
#
# Profesor: <NAME> (<EMAIL>) </h3>
#
#
#
# En esta actividad vamos a estudiar una primera aproximación a los modelos de redes neuronales, utilizando como base el modelo de regresión logística.
#
# Algunos paquetes iniciales que vamos a utilizar
import pandas as pd
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn import model_selection
# ## 1. Problema de clasificación: riesgo de default
#
# Examinemos los datos con lo cuales ya estamos familiarizados:
#
# https://archive.ics.uci.edu/ml/datasets/statlog+(german+credit+data)
credit_1 = pd.read_csv("germancredit.csv")
credit_1 = pd.get_dummies(credit_1, columns=['checkingstatus1','history','purpose','savings',
'employ','status','others','property','otherplans','housing','job','tele',
'foreign'], prefix = ['checkingstatus1','history','purpose','savings',
'employ','status','others','property','otherplans','housing','job','tele',
'foreign'])
X = credit_1.iloc[:, 1:62]
Y = credit_1.iloc[:, 0]
CE_x, CP_x, CE_y, CP_y = model_selection.train_test_split(X, Y, test_size=0.4, random_state=42, stratify=Y)
print("Tamaño de CE, CP: ", CE_y.shape, CP_y.shape)
print("Observaciones de la clase positiva en entrenamiento: " +str(sum(CE_y)) +" y en prueba: " +str(sum(CP_y)))
# ## 2. Construcción de una neurona Sigmoide
#
# Una neurona Sigmoide puede ser vista como un perceptrón *suavizado* que recibe una señal y entonces se activa. Al activarse, transforma la señal para entender mejor el mensaje. Esta transformación la ejecuta a partir de la función Sigmoide.
#
# Si tomamos la señal como un conjunto de datos de entrada y el mensaje como la predicción de un valor, la función de activación jugará el papel de transformadora de los datos de entrada en aquello que se quiere entender/predecir, que además replica un modelo logit con la función de activación sigmoide.
#
# A continuación construiremos un clasificador de regresión logística bajo la perspectiva de una red neuronal, estudiando la arquitectura general de un algoritmo de aprendizaje. De esta manera, necesitaremos incluir la inicialización de los parámetros, el cálculo de la función de coste y su gradiente, y utilizar un algoritmo de optimización como por ejemplo el descenso en la dirección del gradiente (GD)
#
# **Formulación del algoritmo**:
#
# Para un ejemplo $x^{(i)}$:
# $$z^{(i)} = w^T x^{(i)} + b \tag{1}$$
# $$\hat{y}^{(i)} = a^{(i)} = sigmoide(z^{(i)})\tag{2}$$
# $$ \mathcal{L}(a^{(i)}, y^{(i)}) = - y^{(i)} \log(a^{(i)}) - (1-y^{(i)} ) \log(1-a^{(i)})\tag{3}$$
#
# El coste se calcula sumando sobre todos los ejemplos de entrenamiento:
# $$ L = \frac{1}{m} \sum_{i=1}^m \mathcal{L}(a^{(i)}, y^{(i)})\tag{4}$$
#
#
# ### Construimos las partes del algoritmo
#
# - Inicializar los parámetros del modelo
# - Bucle:
# - Calcular la pérdida actual (propagación hacia delante)
# - Calcular el gradiente actual (retro-propagación)
# - Actualizar los parámetros (descenso en la dirección del gradiente)
#
#
# ### Ejercicio 2.1
# Implemente la funcion `sigmoide()` $$\sigma( w^T x + b) = \frac{1}{1 + e^{-(w^T x + b)}}$$ Para ello puede utilizar np.exp().
def sigmoide(z):
"""
Input:
z: Un escalar o arreglo numpy de cualquier tamaño
Output:
s: sigmoid(z)
"""
s = 1/(1+np.exp(-z))
return s
# <img src="https://raw.githubusercontent.com/albahnsen/PracticalMachineLearningClass/master/notebooks/images/logistic_function.png" width="65%" />
#
# _(Source: Python Machine Learning, <NAME>)_
print ("sigmoide([99,1,0,-1,-99]) = " + str(sigmoide(np.array([99,1,0,-1,-99]))))
# **Salida esperada**:
#
# <table>
# <tr>
# <td> sigmoide([99,1,0,-1,-99]) = </td>
# <td> [ 1.00000000e+00 7.31058579e-01 5.00000000e-01 2.68941421e-01
# 1.01122149e-43] </td>
# </tr>
# </table>
# ### Ejercicio 2.2
#
# Debemos inicializar los parámetros a cero. Puede utilizar la funcion np.zeros(), apoyandose en la documentación de la biblioteca Numpy.
def inicializa_ceros(dim):
"""
Esta función crea un vector de ceros de dimensión (dim, 1) para w e inicializa b a 0.
Input:
dim: tamaño del vector w (número de parámetros para este caso)
Output:
w: vector inicializado de tamaño (dim, 1)
b: escalar inicializado (corresponde con el sesgo)
"""
w = np.zeros(shape=(dim,1))
b = 0
assert(w.shape == (dim, 1))
assert(isinstance(b, float) or isinstance(b, int))
return w, b
dim = 6
w, b = inicializa_ceros(dim)
print ("w = " + str(w))
print ("b = " + str(b))
# **Salida esperada**:
#
#
# <table style="width:35%">
# <tr>
# <td> w </td>
# <td> [[0.]
# [0.]
# [0.]
# [0.]
# [0.]
# [0.]] </td>
# </tr>
# <tr>
# <td> b </td>
# <td> 0 </td>
# </tr>
# </table>
# ### Ejercicio 2.3
# #### Propagación hacia delante y hacia atrás
#
# Una vez los estimadores están inicializados, se pueden implementar los pasos de propagación hacia "delante" y hacia "atrás" para el aprendizaje automático.
#
# La propagación hacia delante consiste en calcular la función de activación sigmoide sobre la combinacón lineal de los patrones y los coeficientes inciales.
#
# Luego la propagación hacia atrás, o *retro-propagación*, es el paso más importante, donde utilizamos el gradiente de la función del error o de pérdida para actualizar los coeficientes.
#
# Este procedimiento se repite iterativamente replicando el procediemiento de descenso en la dirección del gradiente o *Gradient Descent* (GD).
#
# A continuación implemente la función `propaga()` que calcula la función de coste y su gradiente.
#
# **Ayuda**:
#
# Propagación hacia delante:
# - Se tiene $X$
# - Se calcula $A = \sigma(w^T X + b) = (a^{(1)}, a^{(2)}, ..., a^{(m-1)}, a^{(m)})$
# - Se calcula la función de coste/pérdida: $L = -\frac{1}{m}\sum_{i=1}^{m}y^{(i)}\log(a^{(i)})+(1-y^{(i)})\log(1-a^{(i)})$
#
# Para la retro-propagación, tenemos que calcular la derivada parcial de *L* con respecto a nuestros coeficientes $(w,b)$:
#
# $$ \frac{\partial L}{\partial w} = \frac{1}{m}X(A-Y)^T\tag{5}$$
# $$ \frac{\partial L}{\partial b} = \frac{1}{m} \sum_{i=1}^m (a^{(i)}-y^{(i)})\tag{6}$$
#
# *Nota:* Para el cálculo de estas derivadas debemos hacer uso de la regla de la cadena.
#
# Esto es, dado $Z=w^T X + b$, se tiene que $$\frac{\partial L}{\partial Z} = \frac{\partial L}{\partial A} \frac{\partial A}{\partial Z} = \bigg(\frac{-Y}{A}+\frac{1-Y}{1-A}\bigg) (A \cdot (1-A)) $$
#
def propaga(w, b, X, Y):
"""
Implemente la función de coste y su gradiente para la propagación
Input:
w: pesos, un arreglo numpy
b: sesgo, un escalar
X: datos de entrada
Y: vector de etiquetas
Output:
coste: coste negativo de log-verosimilitud para la regresión logística
dw: gradiente de la pérdida con respecto a w, con las mismas dimensiones que w
db: gradiente de la pérdida con respecto a b, con las mismas dimensiones que b
(Sugerencia: utilice las funciones np.log(), np.dot()
"""
m = X.shape[1]
# A: Activación (w traspuesto * X + b)
A = sigmoide(np.dot(w.T,X)+b)
# Se calcula el costo en dos etapas:
# 1. El interior de la sumatoria
# 2. La sumatoria
logs = np.multiply(Y,np.log(A)) + np.multiply((1 - Y),np.log(1 - A))
coste = -1/m * np.sum(logs)
# dw: gradiente de la pérdida con respecto a w, con las mismas dimensiones que w
# db: gradiente de la pérdida con respecto a b, con las mismas dimensiones que b
dw = 1/m * np.dot(X,(A-Y).T)
db = 1/m * np.sum(A-Y,keepdims=True)
assert(dw.shape == w.shape)
assert(db.dtype == float)
coste = np.squeeze(coste)
assert(coste.shape == ())
grads = {"dw": dw,
"db": db}
return grads, coste
w, b, X, Y = np.array([[0.1],[0.1]]), 0.5, np.array([[66.,99.,-33.],[32.,55.,-2.1]]), np.array([[0,0,1]])
grads, coste = propaga(w, b, X, Y)
print ("dw = " + str(grads["dw"]))
print ("db = " + str(grads["db"]))
print ("coste = " + str(coste))
# **Salida esperada**:
#
# <table style="width:50%">
# <tr>
# <td> dw </td>
# <td> [[65.48251839]
# [29.66675568]]</td>
# </tr>
# <tr>
# <td> db </td>
# <td> 0.348980796447886 </td>
# </tr>
# <tr>
# <td> cost </td>
# <td> 9.752716367426284 </td>
# </tr>
# </table>
#
# Los parámetros obtenidos están de acuerdo a la salida espera.
# ### Ejercicio 2.4
# #### Optimización
#
# - Se tienen los parámetros inicializados.
# - También se tiene el código para calcular la función de coste y su gradiente.
# - Ahora se quieren actualizar los parámetros utilizando el GD.
#
# Escriba la función de optimización para aprender $w$ y $b$ minimizando la función de coste $L$.
#
# Para un parámetro $\theta$, la regla de actualización es $ \theta = \theta - \alpha \text{ } d\theta$, donde $\alpha$ es la tasa de aprendizaje.
def optimiza(w, b, X, Y, num_iter, tasa, print_cost):
"""
Esta función optimiza w y b implementando el algoritmo de GD
Entradas:
w: pesos, un arreglo numpy
b: sesgo, un escalar
X: datos de entrada
Y: vector de etiquetas
num_iter: número de iteracionespara el bucle de optimización
tasa: tasa de aprendizaje para la regla de actualización del GD
print_cost: True para imprimir la pérdida cada 100 iteraciones
Salidas:
params: diccionario con los pesos w y el sesgo b
grads: diccionario con los gradientes de los pesos y el sesgo con respecto a la función de pérdida
costes: lista de todos los costes calculados durante la optimización, usados para graficar la curva de aprendizaje.
Sugerencia: puede escribir dos pasos e iterar sobre ellos:
1) Calcule el coste y el gradiente de los parámetros actuales. Use propaga().
2) Actualize los parámetros usando la regla del GD para w y b.
"""
costes = []
for i in range(num_iter):
# Computación del coste y el gradiente
grads, coste = propaga(w,b,X,Y)
# Recupere las derivadas de grads
dw = grads["dw"]
db = grads["db"]
# Actualize la regla
w = w - tasa * dw
b = b - tasa * db
# Guarde los costes
if i % 100 == 0:
costes.append(coste)
# Se muestra el coste cada 100 iteraciones de entrenamiento
if print_cost and i % 100 == 0:
print ("Coste tras la iteración %i: %f" %(i, coste))
params = {"w": w,
"b": b}
grads = {"dw": dw,
"db": db}
return params, grads, costes
# +
params, grads, costes = optimiza(w, b, X, Y, num_iter= 10, tasa = 0.001, print_cost = False)
print ("w = " + str(params["w"]))
print ("b = " + str(params["b"]))
print ("dw = " + str(grads["dw"]))
print ("db = " + str(grads["db"]))
# -
# **Salida esperada**:
#
# <table style="width:40%">
# <tr>
# <td> w </td>
# <td>[[-0.07262234]
# [ 0.02112647]] </td>
# </tr>
# <tr>
# <td> b </td>
# <td> 0.49898148713402446 </td>
# </tr>
# <tr>
# <td> dw </td>
# <td> [[1.42076721]
# [0.43496446]] </td>
# </tr>
# <tr>
# <td> db </td>
# <td> -0.007821662502973652 </td>
# </tr>
# </table>
# ### Ejercicio 2.5
#
# La función anterior aprende los parámetros w y b, que se pueden usar para predecir sobre el conjunto de datos X.
#
# Hay dos pasos para calcular las predicciones:
#
# 1. Calcular $\hat{Y} = A = \sigma(w^T X + b)$
#
# 2. Converir a 0 las entradas de $a$ (si la activación es <= 0.5) o 1 (si la activación es > 0.5), guarde las predicciones en un vector `Y_pred`.
#
# Ahora implemente la función `pred()`.
def pred(w, b, X):
'''
Prediga si una etiqueta es 0 o 1 usando los parámetros de regresión logística aprendidos (w, b)
Entrada:
w: pesos, un arreglo numpy
b: sesgo, un escalar
X: datos de entrada
Salida:
Y_pred: vector con todas las predicciones (0/1) para los ejemplos en X
'''
m = X.shape[1]
Y_pred = np.zeros((1,m))
w = w.reshape(X.shape[0], 1)
# Se calcula "A"
A = sigmoide(np.dot(w.T,X)+b)
for i in range(A.shape[1]):
# Convierta las probabilidades A[0,i] a predicciones p[0,i]
Y_pred[0,i] = round(A[0,i],0)
assert(Y_pred.shape == (1, m))
return Y_pred
w = np.array([[0.12],[0.23]])
b = -0.09
X = np.array([[3.1,-2.9,0.2],[1.9,1.8,-0.09]])
print ("predicciones = " + str(pred(w, b, X)))
# **Salida esperada**:
#
# <table style="width:40%">
# <tr>
# <td> predicciones </td>
# <td>[[ 1. 0. 0.]] </td>
# </tr>
# </table>
# ### Ejercicio 2.6
# #### Combine todas las funciones
#
# Ahora juntemos todos los bloques que ha programado arriba.
#
# Implemente la función del modelo "madre". Use la siguiente notación:
# - YP_pred para las predicciones sobre el conjunto de prueba
# - YE_pred para las predicciones sobre el conjunto de entrenamiento
# - w, costes, grads para las salidas de optimiza()
CP_x.shape
def modelo(CE_x, CP_x, CE_y, CP_y, num_iter, tasa, print_cost):
"""
Construye el modelo de regresión logística llamando las funciones implementadas anteriormente
Output:
d: diccionario con la información sobre el modelo.
"""
# Inicialice los parametros con ceros
w, b = inicializa_ceros(CE_x.shape[0])
# Descenso en la dirección del gradiente (GD)
params, grads, costes = optimiza(w, b, CE_x, CE_y, num_iter, tasa, print_cost)
# Recupere los parámetros w y b del diccionario "params" ##
w = params["w"]
b = params["b"]
# Prediga los ejemplos de prueba y entrenamiento (≈ 2 líneas de código)
YP_pred = pred(w, b, CP_x)
YE_pred = pred(w, b, CE_x)
# Imprima los errores de entrenamiento y prueba
print("Precisión de entrenamiento: {} %".format(100 - np.mean(np.abs(YE_pred - CE_y)) * 100))
print("Precisión de prueba: {} %".format(100 - np.mean(np.abs(YP_pred - CP_y)) * 100))
d = {"Costes": costes,
"Prediccion_prueba": YP_pred,
"Prediccion_entrenamiento" : YE_pred,
"w" : w,
"b" : b,
"Tasa de aprendizaje" : tasa,
"Numero de iteraciones": num_iter}
return d
# ### Pregunta 2.7
#
# De qué dimensiones deben ser las matrices con los datos de entrada y de salida?
# +
# Se hallan las dimensiones de las matrices con los datos de entrada y de salida
CE_x2 = CE_x.T
CP_x2 = CP_x.T
CE_y2 = np.array(CE_y)[np.newaxis]
CP_y2 = np.array(CP_y)[np.newaxis]
print(CE_x2.shape, CE_y2.shape)
# -
# Ahora, ejecute la siguiente celda para entrenar el modelo:
d = modelo(CE_x2, CP_x2, CE_y2, CP_y2, num_iter = 1000, tasa = 1e-6, print_cost = True)
# **Salida esperada**:
#
# <table style="width:50%">
# <tr>
# <td> Coste tras la iteración 0 </td>
# <td> 0.693147 </td>
# </tr>
# <tr>
# <td> <center> $\vdots$ </center> </td>
# <td> <center> $\vdots$ </center> </td>
# </tr>
# <tr>
# <td> Precisión de entrenamiento </td>
# <td> 70.0 % </td>
# </tr>
# <tr>
# <td> Precisión de prueba </td>
# <td> 70.0 % </td>
# </tr>
# </table>
# La precisión de entrenamiento es muy similar a la que conseguimos mediante la regresion logistica. También podemos observar que el error de prueba es igual al de entrenamiento. Este resultado sugiere que el modelo aprende segun entrenamiento, y generaliza de igual forma sobre los observaciones nuevas.
# Grafiquemos la función de pérdida.
# Gráfica de la curva de aprendizaje (con costes)
costes = np.squeeze(d['Costes'])
plt.plot(costes)
plt.ylabel('coste')
plt.xlabel('iteraciones (en cientos)')
plt.title("Tasa de aprendizaje =" + str(d["Tasa de aprendizaje"]))
plt.show()
# **Interpretación**:
# Se puede ver el coste decreciendo, demostrando que los parámetros están siendo aprendidos.
# Ya tenemos un primer modelo de clasificación. Ahora examinemos distintos valores para la tasa de aprendizaje $\alpha$.
#
# #### Selección de la tasa de aprendizaje ####
#
# Para que el método del GD funcione de manera adecuada, se debe elegir la tasa de aprendiazaje de manera acertada. Esta tasa $\alpha$ determina qué tan rápido se actualizan los parámetros. Si la tasa es muy grande se puede "sobrepasar" el valor óptimo. Y de manera similar, si es muy pequeña se van a necesitar muchas iteraciones para converger a los mejores valores. Por ello la importancia de tener una tasa de aprendizaje bien definida.
#
# Ahora, comparemos la curva de aprendizaje de nuestro modelo con distintas elecciones para $\alpha$. Ejecute el código abajo. También puede intentar con valores distintos a los tres que estamos utilizando abajo para `tasas` y analize los resultados.
#
#
# +
tasas = [1e-4, 1.04e-6, 1e-6, 1e-10, 2e-20]
modelos = {}
for i in tasas:
print ("La tasa de aprendizaje es: " + str(i))
modelos[str(i)] = modelo(CE_x2, CP_x2, CE_y2, CP_y2, num_iter = 2000, tasa = i, print_cost = False)
print ('\n' + "-------------------------------------------------------" + '\n')
for i in tasas:
plt.plot(np.squeeze(modelos[str(i)]["Costes"]), label= str(modelos[str(i)]["Tasa de aprendizaje"]))
plt.ylabel('coste')
plt.xlabel('iteraciones (en cientos)')
legend = plt.legend(loc='upper center', shadow=True)
frame = legend.get_frame()
frame.set_facecolor('0.90')
plt.show()
# -
# #### Comentarios acerca de la tasa de aprendizaje
#
# Los pesos de cada neurona para el algoritmo GD **gradient descent**, donde el error de cada neurona está relacionado con su peso. La regla para actualizar el parámetro es: $ \theta = \theta - \alpha \text{ } d\theta$ donde $\alpha$ es la tasa de aprendizaje $\theta$.
#
# <img src='https://raw.githubusercontent.com/albahnsen/PracticalMachineLearningClass/master/notebooks/images/updateParameters.png'>
#
# Con una tasa de muy baja (en nuestro caso $1*10^{20}$), se requieren muchas iteraciones antes de alcanzar el punto mínimo; mientras que una tasa de aprendizaje muy alta (en nuestro caso $1*10^{4}$), causas actualizaciones muy drásticas en los pesos lo que conlleva a comportamientos erráticos . En contraposición una buena tasa de aprendizaje (en nuestro caso $1*10^{6}$) optimiza el tiempo de cómputo, así como la precisión de la red neuronal. Imágenes tomadas de <a href="https://raw.githubusercontent.com/albahnsen/PracticalMachineLearningClass/master/notebooks/images/updateParameters.png">Repositorio git hub de <NAME></a>.
# ### Pregunta 2.8
#
# Analice los resultados, con cuál tasa de aprendizaje intentaría mejorar el desempeño del modelo?
# R/ Valores entre $1.04*10^6$ y $1*10^6$ se podría encontrar el máximo absoluto.
# ## 3. Comparacion con la implementación tradicional de regresión logística
# A continuación ajustamos el modelo logístico y lo probamos:
logT = LogisticRegression(penalty='none', max_iter=1500)
logT.fit(CE_x, CE_y)
y_tr = logT.predict(CE_x)
y_pred = logT.predict(CP_x)
logT_coef = logT.coef_
# Examinemos los coeficientes del modelo de la neurona sigmoide y su desviación con respecto a la estimación tradicional de regresion logistica.
# +
from astropy.table import QTable, Table, Column
Tabla = Table([logT_coef.T, d['w'], CE_x.T], names=("Regresion logistica", "Neurona sigmoide", "Diferencia"))
Tabla
# -
# ### Pregunta 3.1
#
# Qué puede observar en esta comparativa?
# Se nota diferencia entre las unidades de la estimación.
# Veamos la exactitud de los modelos
print("La neurona sigmoide tiene una exactitud de entrenamiento: "
+str(float((d['Prediccion_entrenamiento'] == CE_y2).mean())) +" y de validacion: " +str(float((d['Prediccion_prueba'] == CP_y2).mean())))
print("La regresion tradicional tiene una exactitud de entrenamiento: "
+str(float((y_tr == CE_y).mean())) +" y de validacion: " +str(float((y_pred == CP_y).mean())))
# ### Ejercicio 3.2
#
# Ahora puede desarrollar su propio código intentando mejorar los resultados obtenidos.
#
# Intente sobrepasar los resultados de la regresion logistica tradicional. Optimice la tasa de aprendizaje, el número de iteraciones o (bono) investigue y cambie la manera en la cual inicializamos los coeficientes.
for i in range(10,1000,10):
print("*** tasa = " + str(i) + " ***" )
d = modelo(CE_x2, CP_x2, CE_y2, CP_y2, num_iter = 1000, tasa = 1/(i*1000), print_cost = True)
# Dado que la red neuronal se contruyó con sólo una neurona, la regresión logística tradicional supera a la red neuronal, no obstante, si se buscaran otras configuraciones para la red neuronal, como aumentar el número de neuronas y adicionar capas ocultas, sería posible encuentrar mejores resultados.
|
Semana2_IntroduccionRedesNeuronales_cholo_orjuela.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Mapping Epicollect points in Hillside Woods
# Our goal is to display the user's current location, using a phone GPS point, simultaneously alongside EpiCollect5 data for the project (https://five.epicollect.net/project/hillside-woods-restoration-project).
#
# We can do this using a Python package to collect data using the EpiCollect5 API, using the `folium` mapping tool (which wraps leaflet.js), and using the LocateControl functionality in leaflet.js (code here: https://github.com/python-visualization/folium/blob/master/folium/plugins/locate_control.py)
# ### installs (run these in terminal)
# +
# pip install pyepicollect
# conda install folium -c conda-forge
# -
# ### imports
# package for getting EpiCollect data
import pyepicollect as pyep
# package for mapping
import folium
# tool for adding current location
from folium.plugins import LocateControl
# linear algebraic tools
import numpy as np
# # 1) Access and format EpiCollect5 data using `pyepicollect`
# load entries -- set the per_page value high so that we get em all
entries = pyep.api.get_entries('hillside-woods-restoration-project',per_page=10000)
# +
# save list of lats
lats = [i['5_Location']['latitude'] for i in entries['data']['entries']]
# save list of lons
lons = [i['5_Location']['longitude'] for i in entries['data']['entries']]
# +
# zip em together
lat_lon_pairs = [list(i) for i in zip(lats,lons)]
# look at the first several:
lat_lon_pairs[:10]
# -
# # 2) Define a map with `folium`
# ## Map:
# #### Start by defining map parameters:
# +
# give the map a general starting location (middle of hillside woods)
starting_location = [40.997895, -73.867473]
# give the map a starting zoom
zoom_start = 16
# give the map a maximum zoom -- higher number allows farther zoom, but makes things slower...
max_zoom = 22
# maybe giving a min_zoom will speed up rendering?
min_zoom = 0
# -
# #### Now define the map and locator:
# +
# define a simple map
m = folium.Map(starting_location,
zoom_start=zoom_start,
max_zoom=max_zoom,
min_zoom=min_zoom)
# turn on the gps locator
LocateControl(strings={"title": "See your current location"}).add_to(m);
# -
# ## Grid:
# #### Add gridline spacings that we want, covering the whole area:
# here are broad boundaries
gridlons = [-74.5,-73.5]
gridlats = [40.5,50.5]
# extract linear points between these.
# This controls spacing...
hlines_bigbounds = np.array([[[i,gridlons[0]],[i,gridlons[1]]] for i in np.linspace(gridlats[0],gridlats[1],10001)])
vlines_bigbounds = np.array([[[gridlats[0],i],[gridlats[1],i]] for i in np.linspace(gridlons[0],gridlons[1],1001)])
# #### Exclude gridlines that are far from Hillside:
# fit boundaries tightly around Hillside
boundlats = [40.993,41.004]
boundlons = [-73.878,-73.857]
# exclude far-away horizontal lines
hlines = hlines_bigbounds[np.logical_and(hlines_bigbounds[:,0,0] >= boundlats[0], hlines_bigbounds[:,0,0] <= boundlats[1])]
hlines[:,0,1] = boundlons[0]
hlines[:,1,1] = boundlons[1]
# exclude far-away vertical lines
vlines = vlines_bigbounds[np.logical_and(vlines_bigbounds[:,0,1] >= boundlons[0], vlines_bigbounds[:,0,1] <= boundlons[1])]
vlines[:,0,0] = boundlats[0]
vlines[:,1,0] = boundlats[1]
# save all lines for grid
lines = np.vstack([hlines,vlines])
# #### Add grid to map -- each gridline is plotted separately:
# loop through gridlines, adding each one
for line in lines:
pl=folium.PolyLine(locations=line,weight=1)
m.add_child(pl)
# ## EpiCollect:
# #### Finally, add the EpiCollect data to the map:
# add the EpiCollect data points
# for now, the popup just says that they are points
# later, we could add the EpiCollect information for each point
for i in range(len(lat_lon_pairs)):
folium.Marker(
lat_lon_pairs[i],
popup='epicollect data point'
).add_to(m)
# # 3) Display the map:
# * You can zoom in or out, and drag around the map.
# * You can click the location toggle in the upper-left corner to show your gps location.
# * You can click on the EpiCollect points in Hillside Woods to show information about them (although they're currently informationless!)
m
|
notebooks/simple_mapping_demo.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Qiskit v0.34.2 (ipykernel)
# language: python
# name: python3
# ---
# # **Meta-Optimization for Automating Model**
# ## **Installing and Importing Libraries**
# +
# installing packages
# !pip install hyperopt
# !pip install hyperas
# !pip install autokeras
# !pip install tensorflow
# array processing
import numpy as np
# deep learning staple libraries
import tensorflow as tf
from tensorflow import keras
# meta-optimization
import hyperopt
import hyperas
# neural architecture search
import autokeras as ak
# -
# ## **Loading Data**
# For data, we'll use the CIFAR-10 dataset, with a small adaptation to decrease the size of the dataset for faster meta-optimization training - only data instances with a label of 0 or 1 are included.
# +
# load cifar-10 data
(x_train, y_train), (x_test, y_test) = keras.datasets.cifar10.load_data()
# get training data
valid_train_indices = (y_train.reshape(len(y_train))==0)|(y_train.reshape(len(y_train))==1)
x_train = x_train[valid_train_indices]
y_train = y_train[valid_train_indices]
# get testing data
valid_test_indices = (y_test.reshape(len(y_test))==0)|(y_test.reshape(len(y_test))==1)
x_test = x_test[valid_test_indices]
y_test = y_test[valid_test_indices]
# -
# ## **HyperOpt**
# ### **Finding Minimum of a Continuous Function**
# Code to use Bayesian optimization via TPE algorithm to find the minimum of the function $f(x)=(x-1)^2$.
# +
# define the search space
from hyperopt import hp
space = {'x':hp.normal('x', mu=0, sigma=10)}
# define objective function
def obj_func(params):
return (params['x']-1)**2
# perform minimization procedure
from hyperopt import fmin, tpe
best = fmin(obj_func, space, algo=tpe.suggest, max_evals=500)
# -
# ### **Finding Minimum of a Non-continuous Function Using Statuses**
# Code to find the minimum of the function $\left|\frac{1}{x}\right| + x^2$, a function that is undefined at $x=0$, to demonstrate the usage of `ok` and `fail` statuses.
# +
# define the search space
from hyperopt import hp
space = {'x':hp.normal('x', mu=0, sigma=10)}
# define objective function
def obj_func(params):
if params['x']==0:
return {'status':'fail'}
return {'loss':np.abs(1/params['x'])+params['x']**2,
'status':'ok'}
# perform minimization procedure
from hyperopt import fmin, tpe
best = fmin(obj_func, space, algo=tpe.suggest, max_evals=500)
# -
# ### **Finding Optimal Optimizer and Learning Rate in CNN**
# +
# define search space
from tensorflow.keras.optimizers import Adam, RMSprop, SGD
optimizers = [Adam, RMSprop, SGD]
space = {'optimizer':hp.choice('optimizer',optimizers),
'lr':hp.lognormal('lr', mu=0.005, sigma=0.001)}
# import necessary model and layers
from keras.models import Sequential
import keras.layers as L
# build objective function
def objective(params):
# build model
model = Sequential()
model.add(L.Input((32,32,3)))
for i in range(4):
model.add(L.Conv2D(32, (3,3), activation='relu'))
model.add(L.Flatten())
model.add(L.Dense(64, activation='relu'))
model.add(L.Dense(1, activation='sigmoid'))
# compile
optimizer = params['optimizer'](lr=params['lr'])
model.compile(loss='binary_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
# fit
model.fit(x_train, y_train, epochs=1, verbose=0) # increase epochs for better performance
# evaluate accuracy (second elem. w/ .evaluate())
acc = model.evaluate(x_test, y_test, verbose=0)[1]
# return negative of acc such that smaller = better
return -acc
# perform search
best = fmin(objective, space, algo=tpe.suggest, max_evals=1) # increase evals for better performance
# -
# ### **Finding Optimal Model Architecture**
# +
# specify important parameters for search space
min_num_convs = 3
max_num_convs = 8
min_num_dense = 2
max_num_dense = 5
# obtain list of dropout rates
conv_drs, dense_drs = [], []
for layer in range(max_num_convs):
conv_drs.append(hp.normal(f'c{layer}', 0.15, 0.1))
for layer in range(max_num_dense):
dense_drs.append(hp.normal(f'd{layer}', 0.2, 0.1))
# define search space
space = {'#convs':hp.quniform('#convs',
min_num_convs,
max_num_convs,
q=1),
'#dense':hp.quniform('#dense',
min_num_dense,
max_num_dense,
q=1),
'conv_dr':conv_drs,
'dense_dr':dense_drs}
# define objective function
def objective(params):
# convert set of params to list for mutability
conv_drs = list(params['conv_dr'])
dense_drs = list(params['dense_dr'])
# make sure dropout rate is 0 <= r < 1
for ind in range(len(conv_drs)):
if conv_drs[ind] > 0.9:
conv_drs[ind] = 0.9
if conv_drs[ind] < 0:
conv_drs[ind] = 0
for ind in range(len(dense_drs)):
if dense_drs[ind] > 0.9:
dense_drs[ind] = 0.9
if dense_drs[ind] < 0:
dense_drs[ind] = 0
# build model template + input
model = Sequential()
model.add(L.Input((32,32,3)))
# build convolutional component
for ind in range(int(params['#convs'])):
# add convolutional layer
model.add(L.Conv2D(32, (3,3), activation='relu'))
# add corresponding dropout rate
model.add(L.Dropout(conv_drs[ind]))
# add flattening for dense component
model.add(L.Flatten())
# build dense component
for ind in range(int(params['#dense'])):
# add dense layer
model.add(L.Dense(32, activation='relu'))
# add corresponding dropout rate
model.add(L.Dropout(dense_drs[ind]))
# add output
model.add(L.Dense(1, activation='sigmoid'))
# compile
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
# fit
model.fit(x_train, y_train, epochs=1, verbose=0) # increase epochs for better performance
# evaluate accuracy (second elem. w/ .evaluate())
acc = model.evaluate(x_test, y_test, verbose=0)[1]
# return negative of acc such that smaller = better
return -acc
# perform search
best = fmin(objective, space, algo=tpe.suggest, max_evals=1) # increase evals for better performance
# -
# ## **Auto-Keras**
# Note that Auto-Keras is quite memory consuming. If you run multiple meta-optimization campaigns in one session, expect for memory problems.
# ### **Simple Image Block**
# +
# define architecture
inp = ak.ImageInput()
imageblock = ak.ImageBlock()(inp)
output = ak.ClassificationHead()(imageblock)
# aggregate into model
search = ak.AutoModel(
inputs=inp, outputs=output, max_trials=1 # increase max trials for better performance
)
# fit
search.fit(x_train, y_train, epochs=1) # increase epochs for better performance
# export model
best_model = search.export_model()
# -
# ### **Custom Search Space**
# +
# define architecture
inp = ak.ImageInput()
aug = ak.ImageAugmentation(translation_factor=0.1,
vertical_flip=False,
horizontal_flip=True)(inp)
resnetblock = ak.ResNetBlock(pretrained=True,
version=None)(aug)
denseblock = ak.DenseBlock()(resnetblock)
output = ak.ClassificationHead()(denseblock)
# aggregate into model
search = ak.AutoModel(
inputs=inp, outputs=output, max_trials=1 # increase max trials for better performance
)
# fit
search.fit(x_train, y_train, epochs=1) # increase epochs for better performance
# export model
best_model = search.export_model()
# -
# ### **Nonlinear Topology**
# +
# define architecture
inp = ak.ImageInput()
resnetblock = ak.ResNetBlock(pretrained=True)(inp)
xceptionblock = ak.XceptionBlock(pretrained=True)(inp)
merge = ak.Merge()([resnetblock, xceptionblock])
denseblock = ak.DenseBlock()(merge)
output = ak.ClassificationHead()(denseblock)
# aggregate into model
search = ak.AutoModel(
inputs=inp, outputs=output, max_trials=1 # increase max trials for better performance
)
# fit
search.fit(x_train, y_train, epochs=1) # increase epochs for better performance
# export model
best_model = search.export_model()
|
Meta-Optimization for Automating Model.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
import tensorflow as tf
# $ f(x)=W.x +b $
TRUE_W=3.0
TRUE_B=2.0
NUM_EXAMPLES=1000
x=tf.random.normal(shape=[NUM_EXAMPLES])
noise=tf.random.normal(shape=[NUM_EXAMPLES])
y=x*TRUE_W + TRUE_B +noise
import matplotlib.pyplot as plt
plt.scatter(x,y,c="r")
plt.show()
# +
#y=x*TRUE_W + TRUE_B
# +
#plt.scatter(x,y,c="r")
#plt.show()
# -
# # Lets define the model
class MyModel(tf.Module):
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.w=tf.Variable(5.0)
self.b=tf.Variable(0.0)
def __call__(self,x):
return self.w*x +self.b
model=MyModel()
model(3)
class Test:
def __init__(self,x):
self.x=x
def __call__(self):
return self.x**3
obj=Test(2)
obj
obj()
model=MyModel()
model(3)
model.w
model.b
model.variables
def MSE_loss(target_y, predicted_y):
error=target_y-predicted_y
squared_error=tf.square(error)
mse=tf.reduce_mean(squared_error)
return mse
plt.scatter(x,y,c="b")
pred_y=model(x)# without train the model
plt.scatter(x, pred_y, c="r")
plt.show()
current_loss=MSE_loss(y, model(x))
current_loss.numpy()
# # Lets define our training
def train(model,x,y,learning_rate):
with tf.GradientTape() as tape:
current_loss=MSE_loss(y, model(x))
dc_dw,dc_db=tape.gradient(current_loss,[model.w,model.b])
model.w.assign_sub(learning_rate * dc_dw)
model.b.assign_sub(learning_rate * dc_db)
model=MyModel()
Ws,bs=[],[]
epochs=25
learning_rate=0.1
w=model.w.numpy()
b=model.w.numpy()
init_loss=MSE_loss(y, model(x))
print(f"Initial W: {w}, initial bias: {b}, initial_loss: {init_loss}")
for epoch in range(epochs):
train(model,x,y,learning_rate)
Ws.append(model.w.numpy())
bs.append(model.b.numpy())
current_loss=MSE_loss(y,model(x))
print(f"For epoch: {epoch}, W: {Ws[-1]}, b: {bs[-1]}, current_loss: {current_loss}")
plt.plot(range(epochs), Ws, 'r', range(epochs), bs, "b")
plt.plot([TRUE_W]*epochs, "r--", [TRUE_B]*epochs, "b--")
plt.legend(["W","b","True W", "True B"])
plt.show()
plt.scatter(x,y,c="b")
pred_y=model(x)# after train the model
plt.scatter(x, pred_y, c="r")
plt.show()
|
TensorFlow/Simple Linear Regression.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import sys ###noise_simulation_header###
sys.path.append('../scripts/')
from ideal_robot import *
class Robot(IdealRobot): ###empty_robot###
pass
# +
world = World(30, 0.1) ###test_multi_animation###
for i in range(100):
circling = Agent(0.2, 10.0/180*math.pi)
r = Robot( np.array([0, 0, 0]).T, sensor=None, agent=circling )
world.append(r)
world.draw()
# -
|
section_uncertainty/noise_simulation1.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy
import matplotlib.pyplot as plt
# ## Plotting phasor blobs
#
# A bivariate gaussian blob can be interpreted as a set of complex numbers.
#
# In the plot below, take the y axis to be the imaginary axis, and the x axis to be the real. Each of the points is a complex number. Also, each of them is a phasor representing a sinusoidal wave with a given amplitude and phase offset!
#
# The upshot is that we can visualize a set of phasors as a blob like the below.
# +
def complex_blob(n, scale=1.0):
x, y = numpy.random.multivariate_normal([0, 0], [[scale ** 2, 0], [0, scale ** 2]], n).T
return x + y * 1j
def plot_blob(c):
plt.plot(c.real, c.imag, 'x')
plt.axis('equal')
plt.show()
plot_blob(complex_blob(500, scale=0.1))
# -
# ## Perturbing blobs
#
# To model the kind of "noise" that I think probably gets added to hathitrust volumes, when represented as semantic phasor embeddings, we can **randomly turn the blob by some amount**, and then also **add a bit of noise** to the position of each phasor.
#
# These operations can themselves be done with more complex blobs! To rotate, we multiply all the phasors by a normalized complex number. To add noise, we add a relatively small complex number to each phasor.
#
# Let's begin by creating a blob of 100 phasors.
start = complex_blob(100)
# Now we do the above two operations. Inside the `perturb_blob` function, `offset` contains the rotated blobs. `noisy` has the same blobs with a bit of unique noise added to each one. We create 100 copies and plot the first two to show how these perturbations change the shape of the data.
# +
def perturb_blob(start, n=100):
"""Randomly rotate the starting blob and add a little bit of noise."""
offset = [start * c / (c * c.conjugate()) for c in complex_blob(n)]
noisy = [o + complex_blob(len(start), scale=0.1) for o in offset]
return offset, noisy
offset, noisy = perturb_blob(start)
plot_blob(offset[0])
plot_blob(noisy[0])
plot_blob(offset[1])
plot_blob(noisy[1])
# -
# ## Generating a distinctive signature for noisy blobs that came from the same starting point
#
# Our goal is to generate a "signature" that will identify perturbed blobs that came from the same initial blob. These signatures will be vectors, and they should cluster together tightly when they correspond to blobs from the same starting point.
#
# This entails figuring out a way to denoise the phasor blob and undo (or at least mask) the rotation. Since the noise being added is independently distributed, we can pick a random subset of noisy phasors and take the average. This will attenuate the noise, because the noise will tend to go in all directions, and on average, will cancel itself out. So in theory this average phasor will be very similar for blobs that came from the same starting point.
#
# This also gives us a stable reference point for measuring relative rotation. We can't tell how much the blob was rotated, but given a stable reference point, we can measure the relative position of other phasors. So now, we chose another random subset of noisy phasors, and divide by the normalized value of the average phasor above. That gives us our signature!
#
# Unfortunately...
def signature_fail_1(blob):
cent = sum(blob[:10]) / 10
cnorm = cent / (cent * cent.conjugate())
return numpy.array([c / cnorm for c in blob[10:20]])
# ...it doesn't work. These signatures aren't close together at all.
# +
signatures = [signature_fail_1(n) for n in noisy]
diff_mags = [(s * s.conjugate()).real
for sig in signatures[1:]
for s in signatures[0] - sig]
print("Maximum difference beteween signature phasors:")
print(max(diff_mags))
print()
print("Average differene between signature phasors:")
print(numpy.mean(diff_mags))
print()
print('Yuck.')
# -
# The maximum difference between two signature phasors is enormous, and the average difference is also really big.
#
# ## What has gone wrong?
#
# Let's consider this hypothesis: when the average phasor is based on a lot of points that are close to the origin, small absolute error terms can still cause extreme phase errors. This could even be happening when the points are far from the origin but distributed in a way that places the average near the origin.
#
# To test this hypothesis, let's see what happens if we pick phasors that are far way from the origin, and instead of taking their average directly, we just take the average of their phase. Here that means taking the geometric mean of their normalized values.
#
# So let's sort the phasors by their magnitude and pick the largest. Because there's some noise, we might not always pick exactly the same phasors, but we can hope that the outliers will mostly be the same, and that the difference won't throw off the average too much.
# +
def mag(c):
return c * c.conjugate()
def norm(c):
return c / mag(c)
def signature(blob):
top10_mag = sorted(mag(c) for c in blob)[-10:]
offset = numpy.prod(top10_mag) ** (1 / 10)
return numpy.array([c / offset for c in blob[:10]])
# -
# This time it seems to work pretty well!
# +
signatures = [signature(n) for n in noisy]
diff_mags = [(s * s.conjugate()).real
for sig in signatures[1:]
for s in signatures[0] - sig]
print("Maximum difference beteween signature phasors:")
print(max(diff_mags))
print()
print("Average differene between signature phasors:")
print(numpy.mean(diff_mags))
print()
print('Yay!')
# -
# If the noise that gets added to hathitrust volumes is similar enough to gaussian noise, then these signatures are likely to be particularly effective for identifying duplicates.
|
notebooks/phasor-perturbation-demo.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
import sys
sys.path.append('../../python')
import caffe
import io
from PIL import Image
import matplotlib.pyplot as plt
import numpy as np
import scipy.misc
import time
import pdb
import glob
import pickle as pkl
import random
import h5py
from multiprocessing import Pool
from threading import Thread
import skimage.io
import copy
file_path = 'ucsdPed1_image.txt'
random.seed(10)
f = open(file_path,'r')
f_lines = f.readlines()
f.close()
video_dict = {}
current_line = 0
path_to_images = '/usr/not-backed-up/1_DATABASE/UCSD_Anomaly_Dataset.tar/UCSD_Anomaly_Dataset.v1p2/UCSDped1/Train/'
# print(path_to_images)
video_order = []
for ix, line in enumerate(f_lines):
video = line.split(' ')[0].split('/')[1]
print(video) # add anything in the end of each line -> keep [0] will remove \n
frames = glob.glob('%s%s/*.tif' %(path_to_images, video))
num_frames = len(frames)
video_dict[video]={}
# video_dict[video]['frames'] = frames[0].split('.')[0] + '.%04d.tif'
# a = frames[0].split('.')[0] + '.%04d.tif'
# print(a)
# print(num_frames)
video_order.append(video)
# input("pause")
print(video_order)
a = glob.glob('/usr/not-backed-up/1_DATABASE/UCSD_Anomaly_Dataset.tar/UCSD_Anomaly_Dataset.v1p2/UCSDped1/Train/Train001/*.tif')
print(len(a))
idx_list = range(0,7)
|
examples/LRCN_activity_recognition/patch_data_layer.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python (storedna)
# language: python
# name: storedna
# ---
# # Score Data with a Ridge Regression Model Trained on the Diabetes Dataset
# This notebook loads the model trained in the Diabetes Ridge Regression Training notebook, prepares the data, and scores the data.
import json
import numpy
from azureml.core.model import Model
import joblib
# ## Load Model
model_path = Model.get_model_path(model_name="sklearn_regression_model.pkl")
model = joblib.load(model_path)
# ## Prepare Data
# +
raw_data = '{"data":[[1,2,3,4,5,6,7,8,9,10],[10,9,8,7,6,5,4,3,2,1]]}'
data = json.loads(raw_data)["data"]
data = numpy.array(data)
# -
# ## Score Data
# +
request_headers = {}
result = model.predict(data)
print("Test result: ", {"result": result.tolist()})
|
experimentation/Diabetes Ridge Regression Scoring.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
from complete import *
import pickle
from simtk import unit
# i will start by extracting benzene in solvent and running the vanilla `annealed_importance_sampling` on it
with open('benzene_methylbenzene.solvent.factory.pkl', 'rb') as f:
factory = pickle.load(f)
with open('benzene_methylbenzene.vacuum.factory.pkl', 'rb') as f:
subset_factory = pickle.load(f)
# below, i will extract and save the solvated positions and box vectors as `.npy` files
# +
# solvated_positions = np.array([factory._old_positions.value_in_unit(unit.nanometers)])
# sbv = factory._old_system.getDefaultPeriodicBoxVectors()
# box_vectors = np.array([[[sbv[0][0].value_in_unit(unit.nanometers), 0., 0.], [0., sbv[1][1].value_in_unit(unit.nanometers), 0.], [0., 0., sbv[2][2].value_in_unit(unit.nanometers)]]])
# +
# np.save('benzene.solvated_positions.npy', solvated_positions)
# np.save('benzene.box_vectors.npy', box_vectors)
# -
# we need to extract the approprate topologies and systems from the factories
# +
#extract system and vacuum system
print(f"defining systems")
endstate = 'old'
system = getattr(factory._topology_proposal, f"_{endstate}_system")
system_subset = getattr(subset_factory._topology_proposal, f"_{endstate}_system")
#now we have to extract the appropriate subset indices...
print(f"extracting appropriate subset indices...")
omm_topology = getattr(factory._topology_proposal, f"_{endstate}_topology")
subset_omm_topology = getattr(subset_factory._topology_proposal, f"_{endstate}_topology")
md_topology = md.Topology.from_openmm(omm_topology)
subset_md_topology = md.Topology.from_openmm(subset_omm_topology)
mol_indices = md_topology.select('resname MOL')
mol_atoms = [atom for atom in md_topology.atoms if atom.index in mol_indices]
subset_mol_indices = subset_md_topology.select('resname MOL')
subset_mol_atoms = [atom for atom in subset_md_topology.atoms if atom.index in mol_indices]
mol_atom_dict = {i:j for i, j in zip(mol_indices, subset_mol_indices)} #mol atom dict is complex-to-vacuum indices
save_indices = md_topology.select('not water')
#print what was extracted:
print(f"atom extractions...")
print(f"\t(vacuum_atom, atom_in_environment)")
for subset_atom, atom in zip(subset_mol_atoms, mol_atoms):
print(f"({subset_atom.name}, {atom.name})")
# -
number_of_applications = 1
steps_per_application = 5
write_trajectory_interval = 1
works = annealed_importance_sampling(system = system,
system_subset = system_subset,
subset_indices_map = mol_atom_dict,
endstate_positions_cache_filename = 'benzene.solvated_positions.npy',
directory_name = None,
trajectory_prefix = None,
md_topology = md_topology,
number_of_applications = int(number_of_applications),
steps_per_application = int(steps_per_application),
integrator_kwargs = {'temperature': 300.0 * unit.kelvin,
'collision_rate': 1.0 / unit.picoseconds,
'timestep': 1.0 * unit.femtoseconds,
'splitting': "V R O R F",
'constraint_tolerance': 1e-6,
'pressure': 1.0 * unit.atmosphere},
save_indices = save_indices,
position_extractor = None, #solvent_factory.new_positions
write_trajectory_interval = write_trajectory_interval,
endstate_box_vectors_cache_filename = 'benzene.box_vectors.npy'
)
# here, i am basically copying the `annealed_importance_sampling` method in order to play with some internal parameters
works
|
tests.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
import matplotlib.pyplot as plt
from eigenwell.src.grid import *
# ## gradually scaled non-uniform grid
# +
Nfine = [100,100]; #%specify nx, ny for each region
Ncoarse = [50,50];
Ntran = [50,50];
# 2) specify the dx and dy of each region
dx1 = 0.02; dy1 = 0.02;
dx2 = 0.005; dy2 = 0.005;
dfine = [dx2, dy2];
dcoarse = [dx1, dy1];
dtran = [0 ,0];
#3) stack the vectors
#drt does not have a value...
Nft = np.vstack((Ncoarse, Ntran, Nfine, Ntran, Ncoarse));
drt = np.vstack((dcoarse, dtran, dfine, dtran, dcoarse));
dr_mask = np.ones((np.sum(Nft[:,0]),np.sum(Nft[:,1]),2)); #mask stores dx, dy for every grid cell?
print(Nft)
print(Nft,np.sum(Nft[:,0]),np.sum(Nft[:,1]))
# # we need a base scale dl
# #scale is arbitrary, just take dcoarse;
# dr_reference = dcoarse;
# #4) construct scaling vectors from this information
# [dx_scale, dy_scale] = generate_nonuniform_scaling(Nft, drt./dr_reference);
# ## calculate Ntot and Ltot
# N = sum(Nft);
# Lx = sum(dr_reference(1)*dx_scale);
# Ly = sum(dr_reference(2)*dy_scale);
# xrange = 0.5*[-Lx, Lx];
# yrange = 0.5*[-Ly, Ly];
# xrange_array = cumsum(dr_reference(1)*dx_scale)-Lx/2;
# yrange_array = cumsum(dr_reference(1)*dy_scale)-Ly/2;
# Nx = N(1); Ny = N(2);
# ## output is a dxscale...dyscale
# +
Nx = np.sum(Nft[:,0]);
Ny = np.sum(Nft[:,1]);
dx_scale = np.ones(Nx)
dy_scale = np.ones(Ny);
num_regions = Nft.shape[0]; #iterate through 0,2,4
x0 = y0 = 0;
for i in range(0,num_regions,2):
dx_scale[x0:x0+Nft[i,0]] = drt[i,0];
dy_scale[y0:y0+Nft[i,1]] = drt[i,1];
if(i==num_regions-1): #%no transition after last region
x0 = x0+Nft[i,0];
y0 = y0+Nft[i,1];
else:
x0 = x0+Nft[i,0]+Nft[i+1,0];
y0 = y0+Nft[i,1]+Nft[i+1,1];
print(dx_scale)
x0 = Nft[1,0]; y0 = Nft[1,1];
for i in range(1, num_regions,2): #2:2:num_regions
dx1 = drt[i-1,0]; dx2 = drt[i+1,0];
dy1 = drt[i-1,1]; dy2 = drt[i+1,1];
nxt = Nft[i,0]; nyt = Nft[i,1];
grading_x = np.logspace(np.log10(dx1), np.log10(dx2), nxt+1);
grading_y = np.logspace(np.log10(dy1), np.log10(dy2), nyt+1);
dx_scale[x0-1:x0+nxt] = grading_x;
dy_scale[y0-1:y0+nyt] = grading_y;
x0 = x0+Nft[i,0]+Nft[i+1,0];
y0 = y0+Nft[i,1]+Nft[i+1,1];
print(dx_scale)
plt.plot(dx_scale)
## ========================================================================
## integrate into an operator
## ========================================================================
[Xs, Ys] = np.meshgrid(dx_scale, dy_scale);
#meshgrid isn't right for y
M = np.prod(Xs.shape)
# we have to this kind of flip because the flattening
# operation (:) doesn't retain row-major order
Ys=Ys.T; Xs = Xs.T;
Fsy = sp.spdiags(Ys.flatten(),0,M,M);
Fsx = sp.spdiags(Xs.flatten(),0,M,M);
# might as well construct the conjugate grid. What is the conjugate grid?
xc = (dx_scale+np.roll(dx_scale,[0,1]))/2;
yc = (dy_scale+np.roll(dy_scale,[0,1]))/2;
[Xc, Yc] = np.meshgrid(xc, yc);
Xc = Xc.T;
Yc = Yc.T;
Fsy_conj = sp.spdiags(Yc.flatten(),0,M,M);
Fsx_conj = sp.spdiags(Xc.flatten(),0,M,M);
# Dxf = Fsx^-1*createDws('x', 'f', dL, N);%*Fsx;
# Dyf = Fsy^-1*createDws('y', 'f', dL, N);%*Fsy;
# Dyb = Fsy_conj^-1*createDws('y', 'b', dL, N);%*Fsx_conj;
# Dxb = Fsx_conj^-1*createDws('x', 'b', dL, N);%*Fsy_conj;
# -
## visualizing a non-uniform grid
plt.figure(figsize = (20,20))
ax = plt.subplot(1,1,1)
plt.pcolormesh(Xs.T, Ys.T, np.zeros_like(Xs), cmap = 'cool', linewidth = 0.25, color = 'black')
plt.show();
## PML specification
Npml = [20,20];
|
notebooks/non uniform grid.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + colab={"base_uri": "https://localhost:8080/"} id="FaXqzO92BNMc" outputId="f0444645-dad3-47b6-b40f-335abf5971cf"
from google.colab import drive
drive.mount('/content/drive/')
# + colab={"base_uri": "https://localhost:8080/"} id="Dj_NC4gtEcHQ" outputId="cf78e099-b0df-4cdd-a2b0-b2fc20dcbf4a"
pip install scikit-plot
# + id="EYc_87KRE6qH"
#importing libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import MultinomialNB
from sklearn.svm import LinearSVC
from sklearn.metrics import confusion_matrix,auc,roc_auc_score,f1_score
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split,RandomizedSearchCV
from sklearn.feature_extraction.text import TfidfVectorizer
from scikitplot.metrics import plot_confusion_matrix
from tqdm import tqdm
import re
from bs4 import BeautifulSoup
# %matplotlib inline
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
# + colab={"base_uri": "https://localhost:8080/", "height": 683} id="CAqxYFZGE_-S" outputId="4be5a27a-3cd5-4a66-a106-ca7b5c6f4aa8"
#read the data from google drive its name is Review.csv file
reviews=pd.read_csv("/content/drive/MyDrive/Reviews.csv")
reviews=reviews[:40000]
reviews.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 683} id="DRAxVdrxGGdr" outputId="2119e380-b0f8-4cbd-b975-16e92f0f921c"
#Lets convert our sccores to three labels 0,1,2 for , negative(score>3),Positive(score>3),neutral(score==3)
def label(x):
if x>3:
return 1
elif x<3:
return 0
elif x==3:
return 2
reviews["Score"]=reviews["Score"].map(label)
reviews.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} id="VoNAGEE4GVXp" outputId="674e9072-ca1b-47d9-af1f-6d61c1d78737"
#lets check if our data is having duplicate values
reviews[reviews[["UserId","ProfileName","Time","Text"]].duplicated()]
# + colab={"base_uri": "https://localhost:8080/"} id="6NlQ-IN6GgO0" outputId="40f561c8-190e-4e9c-cafe-541c88d5469f"
# we can see in the above cell that there are duplicates in our data. Lets drop all of them.
print("Data set size before dropping duplicates",reviews.shape)
reviews_df = reviews.drop_duplicates(subset={"UserId","ProfileName","Time","Text"},keep='first')
print("Data set size after dropping duplicates",reviews_df.shape)
# + colab={"base_uri": "https://localhost:8080/"} id="7jfVEnkrGk6x" outputId="d35ddc26-11d3-421c-d914-16a84ff8427f"
#let us find if our data have any missing values
# from now on we deal only with Text and Score columns, Text is our feature and score is our label.
reviews_df[["Text","Score"]].isnull().any()
# + colab={"base_uri": "https://localhost:8080/"} id="0aaFnJlJGp2D" outputId="c6f0bb4a-23f2-4666-8704-52cf6d4c6c78"
print("Amount of data retianed is : ", reviews_df.shape[0]/reviews.shape[0])
# + colab={"base_uri": "https://localhost:8080/", "height": 267} id="Mb1sBTZHGuFm" outputId="15a15edf-e8f7-4e33-a9ec-05c627752cd4"
plt.bar(reviews_df["Score"].unique(),reviews_df["Score"].value_counts())
plt.xticks([0,1,2])
plt.show()
# + colab={"base_uri": "https://localhost:8080/", "height": 397} id="DAaWh2EAGxx5" outputId="e3fd3f3b-e23b-4cd7-e7ae-504083d8af72"
plt.pie(reviews_df["Score"].value_counts(),autopct='%1.0f%%',radius=2,labels=reviews_df["Score"].unique(),colors=["g","r","y"])
plt.title("Labels")
# + [markdown] id="-1C3XptRG46Z"
# # Observations
# After removing duplicates and missing values we were able to retain 93.6% of actual data.
# From the above bar plot we can clearly see that our data is imbalance
# + [markdown] id="Rb5mKD9QHLnJ"
# # 3. Data Preprocessing
# Though we removed noise data, we need to make sure that our data is clean with text data comes a lot of unwanted characters, symbols, numbers and common words which adds no value to the model's performance so we will try to remove these unwanted characters to get a clean data
# + colab={"base_uri": "https://localhost:8080/", "height": 103} id="hgchqMa7HaUa" outputId="5b556cce-59da-4fd5-8200-f9539b5a8d4f"
#21,15,28
review34=reviews_df["Text"][34]
review34
# + colab={"base_uri": "https://localhost:8080/"} id="merbs5b_HeU3" outputId="dafc2024-ab63-4889-fc86-67937f0bc9bd"
import nltk
nltk.download('punkt')
from nltk.stem import PorterStemmer
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
nltk.download('stopwords')
stop_words = set(stopwords.words('english'))
reviews_df=reviews_df[["Text","Score"]]
# + colab={"base_uri": "https://localhost:8080/"} id="QUROSlw5HjSK" outputId="e5d96163-0d04-4deb-db34-3f6ddfd4b485"
print(stop_words)
# + id="dzPLJMq8HofZ"
#let us remove word not from stop words, since it is the one of the most important word in classifing the review.
stop_words.remove("not")
# + id="jNTvL7lSHtMJ"
def text_Preprocessing(reviews):
#This will clean the text data, remove html tags, remove special characters and then tokenize the reviews to apply Stemmer on each word token"
pre_processed_reviews=[]
for review in tqdm(reviews):
review= BeautifulSoup(review,'lxml').getText() #remove html tags
review=re.sub('\\S*\\d\\S*','',review).strip()
review=re.sub('[^A-Za-z]+',' ',review) #remove special chars\n",
review=re.sub("n't","not",review)
review=word_tokenize(str(review.lower())) #tokenize the reviews into word tokens
# now we will split the review into words and then check if these words are in the stop words if so we will remove them, if not we will join
review=' '.join(PorterStemmer().stem(word) for word in review if word not in stop_words)
pre_processed_reviews.append(review.strip())
return pre_processed_reviews
# + colab={"base_uri": "https://localhost:8080/", "height": 86} id="swa_BYGnHyQU" outputId="74c397b1-9e31-42c7-a6c8-b9659fce378e"
preprocessed_reviews=text_Preprocessing(reviews_df["Text"])
preprocessed_reviews[34]
# + colab={"base_uri": "https://localhost:8080/", "height": 204} id="caHGr-y6ICWo" outputId="f14f57a0-7612-4629-d961-a3a3d51f7349"
preprocessed_reviews=pd.DataFrame({"text":preprocessed_reviews,"sentiment":reviews_df.Score})
preprocessed_reviews.head()
# + colab={"base_uri": "https://localhost:8080/"} id="r2CTRkDcISUh" outputId="58d5f804-72e6-419b-9073-0f4de4bd0878"
preprocessed_reviews.sentiment.value_counts()
# + [markdown] id="nilVZjOFIWtg"
# # Observations:
# we cleaned our text data, removed unnecessary tags
# Though we cleaned our data, it is still in string format which computers won't understand, for this we use text featuration
# + [markdown] id="5MqpLRX7Ifbq"
# # 3.1.Featurization
# + colab={"base_uri": "https://localhost:8080/"} id="DYR9ueh6IkQ_" outputId="138f3d59-232b-4211-c822-9a570db92f77"
#It is best practice to split the data Before we do text featurization
reviews_train,reviews_test,sentiment_train,sentiment_test=train_test_split(preprocessed_reviews.text,preprocessed_reviews.sentiment)
print(reviews_train.shape,reviews_test.shape)
print(sentiment_train.shape,sentiment_test.shape)
# + colab={"base_uri": "https://localhost:8080/"} id="jj0EcMUeIrB-" outputId="c90cd901-a36b-4629-aac6-b371da8a9c4a"
tfidf_model=TfidfVectorizer(ngram_range=(1,2),min_df=10, max_features=6000)
tfidf_model.fit(reviews_train,sentiment_train)
reviews_train_tfidf=tfidf_model.transform(reviews_train)
reviews_test_tfidf=tfidf_model.transform(reviews_test)
reviews_train_tfidf.shape,reviews_test_tfidf.shape
# + colab={"base_uri": "https://localhost:8080/", "height": 456} id="fTwu34Y6Ivqg" outputId="c943fb97-ed2a-4302-a9b2-14c7a9af62ed"
tfidf_df=pd.DataFrame(reviews_train_tfidf.toarray(),columns=tfidf_model.get_feature_names(),index=reviews_train.index)
tfidf_df
# + id="NCR-Ht3HIzvP"
# Lets checkout the top features\n",
top_features=sorted(zip(tfidf_model.idf_,tfidf_model.get_feature_names()))
top10=top_features[:10]
# + colab={"base_uri": "https://localhost:8080/", "height": 388} id="w6poFn7QI4t6" outputId="1ba24953-68c9-4c91-bcbe-a8956a76c223"
from wordcloud import WordCloud
plt.figure(figsize=(10,8))
wc = WordCloud(background_color="black",max_font_size=150, random_state=42)
wc.generate(str(top10))
plt.imshow(wc, interpolation='bilinear')
plt.suptitle('Top 10 words', size=30, y=0.88,color="r");
plt.axis("off")
plt.savefig("top10_words.png")
plt.show()
# + [markdown] id="qXrSnvxTI_Sj"
# # 4. Modeling
# + [markdown] id="pRkMDTAhJCwj"
# # 4.1. Logistic regression
# + colab={"base_uri": "https://localhost:8080/"} id="S4_aRm3-JHFo" outputId="de0d761c-5097-44ce-c9bf-7a9d00d7717b"
# Logistic Regression with default parameters\n",
lr=LogisticRegression(max_iter=1000)
lr.fit(reviews_train_tfidf,sentiment_train)
lr_predict=lr.predict(reviews_test_tfidf)
plain_lr_f1=f1_score(sentiment_test,lr_predict,average="weighted")
plain_lr_f1
# + colab={"base_uri": "https://localhost:8080/", "height": 295} id="aLleG7ANJN5o" outputId="1c061456-2e91-4cc7-cd04-91e79473edee"
plot_confusion_matrix(sentiment_test,lr_predict,normalize=True)
plt.title("Linear Regression with defult params")
plt.show()
# + colab={"base_uri": "https://localhost:8080/"} id="6fdX5onjJR9p" outputId="1a9060ab-543c-4125-bc70-3346bd9b7a02"
# we will tune the parameters of Logistic Regression with RandomizedsearchCV\n",
lr_params={"penalty":["l1","l2"],"C":[10**i for i in range(-4,4)]}
lr=LogisticRegression( max_iter=1000,solver="liblinear")
lr_rnm_clf=RandomizedSearchCV(lr,lr_params)
lr_rnm_clf.fit(oversampled_trainX,oversampled_trainY)
lr_rnm_clf.best_params_
# + colab={"base_uri": "https://localhost:8080/"} id="1JUzPwxBLiee" outputId="410faa19-f878-4bb5-a91c-548159d25884"
lr_bal=LogisticRegression(**lr_rnm_clf.best_params_, max_iter=1000,solver="liblinear")
lr_bal.fit(oversampled_trainX,oversampled_trainY)
lr_bal_predict=lr_bal.predict(reviews_test_tfidf)
lr_bal_f1=f1_score(lr_bal_predict,sentiment_test,average="weighted")
lr_bal_f1
# + colab={"base_uri": "https://localhost:8080/", "height": 314} id="C3_7_QYrMNp7" outputId="70bc48c8-8a11-4a08-ef66-e197235af9f4"
plot_confusion_matrix(sentiment_test,lr_bal_predict,normalize=True)
plt.title("Logistic regression Confusion matrix",size=15)
# + [markdown] id="JkG_PctKMSzf"
# # 4.2. Decision tree
# + id="OgRloO-BMYZa"
from sklearn.tree import DecisionTreeClassifier
dt_param={'max_depth':[i for i in range(5,2000,3)],'min_samples_split':[i for i in range(5,2000,3)]}
dt_clf=DecisionTreeClassifier()
rndm_clf=RandomizedSearchCV(dt_clf,dt_param)
rndm_clf.fit(oversampled_trainX,oversampled_trainY)
dt_best_params=rndm_clf.best_params_
# + colab={"base_uri": "https://localhost:8080/"} id="lcRvjiVGTfrZ" outputId="406c1331-6293-4d50-cc88-b76bc0af15c6"
dt_clf=DecisionTreeClassifier(**dt_best_params)
dt_clf.fit(oversampled_trainX,oversampled_trainY)
dt_predict=dt_clf.predict(reviews_test_tfidf)
dt_f1=f1_score(sentiment_test,dt_predict,average="weighted")
dt_f1
# + colab={"base_uri": "https://localhost:8080/", "height": 316} id="4H26yMsRT7Ji" outputId="a57e211c-46d1-4dbd-e404-ab2e87bd1654"
plot_confusion_matrix(sentiment_test,dt_predict,normalize=True)
plt.title("Decision Tree Confuison matrix",size=18)
# + [markdown] id="OI_gkJ4QUB7j"
# # 4.3. Naive Bayes
# + colab={"base_uri": "https://localhost:8080/"} id="eCHa1NflULzs" outputId="404384c4-ed4e-44ef-d864-6a50051c02de"
nb_params={"alpha":[10**i for i in range(-5,5)]}
nb_clf=MultinomialNB()
rndm_clf=RandomizedSearchCV(nb_clf,nb_params)
rndm_clf.fit(oversampled_trainX,oversampled_trainY)
rndm_clf.fit(oversampled_trainX,oversampled_trainY)
nb_best_params=rndm_clf.best_params_
nb_clf=MultinomialNB(**nb_best_params)
nb_clf.fit(oversampled_trainX,oversampled_trainY)
nb_predict=nb_clf.predict(reviews_test_tfidf)
nb_f1=f1_score(sentiment_test,nb_predict,average="weighted")
nb_f1
# + colab={"base_uri": "https://localhost:8080/", "height": 314} id="HpUaN8FJUzAu" outputId="7a7f7463-07e0-4d54-83c8-2515c8dda4cc"
plot_confusion_matrix(sentiment_test,nb_predict,normalize=True,cmap="Reds")
plt.title("Naive Bayes Confusion Matrix",size=15)
# + [markdown] id="8dFyBGG0U3bA"
# # 5. Model evaluation
# + id="J-5a83yRVpMo"
models=["LogesticRegression","DecisionTrees","NaiveBayes"]
f1_scores=[lr_bal_f1,dt_f1,nb_f1]
# + colab={"base_uri": "https://localhost:8080/", "height": 361} id="8CSMyL_PVuVc" outputId="83b40d44-7dc5-4169-bd06-195870083ab6"
plt.figure(figsize=(6,5))
plt.barh(models,f1_scores,color=['c','r','m'])
plt.title("F1 Scores of all models",size=20)
for index, value in enumerate(f1_scores):
plt.text(0.9,index,str(round(value,2)))
plt.xlabel('F1_SCores',size=15)
plt.ylabel("Models",size=15)
plt.savefig("f1_scores.png")
plt.show()
# + [markdown] id="NQhQnMMrVx49"
# # Obseravtions:
# After cross checking the confusion matrices of above models, Naive Bayes is slightly better than rest of the models.
# We will select Naive Bayes for our problem, lets Pickle the model for later use
# + id="AKaYlnU0V5ef"
# lets save the model
import pickle
pickle.dump(nb_clf,open("nb_clf.pkl","wb"))
pickle.dump(tfidf_model,open("tfidf_model.pkl","wb"))
|
FlaskFiles/Model_Code.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.6.9 64-bit
# language: python
# name: python3
# ---
#
# 
#
#
#
# Fig. 12. Keypoints identified by OpenPose
# The JSON data is fetched and stored in numpy arrays in sequences of 45 frames which is
# about 1.5 seconds of the video [2].
# 60% of the dataset has been used for training,
# 20% for testing
# and 20% for validation.
#
# The training data has 7989 sequences of 45 frames, each containing the
#
# 2D coordinates of the 18 keypoints captured by OpenPose.
# The validation data consists of 2224
# such sequences and the test data contains 2598 sequences.
# The number of frames varied from 60,20,20 split at the video level.
# This was because of the difference in duration of videos.
#
# +
import torch
import numpy as np
import numpy as np
import pandas as pd
import ast
from sklearn.model_selection import train_test_split
from torch.utils.data import DataLoader, TensorDataset
from os import listdir
# check if CUDA is available
train_on_gpu = torch.cuda.is_available()
if not train_on_gpu:
print('CUDA is not available. Training on CPU ...')
else:
print('CUDA is available! Training on GPU ...')
batch_size = 5
# +
filepaths = [ str("./20220201") + "/" + str(f) for f in listdir("./20220201/") if f.endswith('.csv')]
data = pd.concat(map(pd.read_csv, filepaths))
data.drop(data.columns[0], axis=1, inplace=True)
y = data[['1']]
x = data.drop(['1','2'] , axis=1)
x_train, x_test, y_train, y_test = train_test_split(x, y,test_size=0.2)
X = []
for i in x_train.values:
X.append(np.array(ast.literal_eval(i[0]))[0].T.astype(int))
x_train = np.array(X)
x_train = x_train[:, :2 , : ]
X_t = []
for i in x_test.values:
X_t.append(np.array(ast.literal_eval(i[0]))[0].T.astype(int))
x_test = np.array(X_t)
x_test = x_test[:, :2 , : ]
train_data = TensorDataset(torch.tensor(np.array(x_train) , dtype=torch.float) , torch.tensor(np.array(y_train).squeeze() , dtype=torch.long))
train_loader = DataLoader(train_data, batch_size=5, shuffle=True)
valid_data = TensorDataset(torch.tensor(np.array(x_test) , dtype=torch.float) , torch.tensor(np.array(y_test).squeeze() , dtype=torch.long))
valid_loader = DataLoader(valid_data, batch_size=5, shuffle=True)
classes = ['laying','setting', 'standing']
x_train[0]
# -
# ---
# ##### A one dimensional, one-layer CNN with 16 filters of size 3 x 3 is trained on the OpenPose keypoints.
#
# * The input shape is 18 x 2 which signifies the 18 keypoints having X and Y coordinates.
#
# * Batch normalization is applied to the output of the CNN layer so that the model converges faster.
#
# * We also have a dropout layer that prevents overfitting by randomly dropping some fraction of the
# weights.
#
# * The activation function used is Rectified Linear Unit (ReLU) which is applied for feature
# extraction on keypoints of each frame.
#
# * The final output is flattened before being passed to the
# dense layer with softmax activation and 6 units where every unit represents the likelihood of a
# yoga pose in cross entropy terms for all 6 classes. The model architecture summary is shown in
#
# 
# ### Define model structure
# +
import torch.nn as nn
import torch.nn.functional as F
# define the CNN architecture
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv1d(2, 16, 3, padding=1)
self.bnm = nn.BatchNorm1d(16, momentum=0.1)
self.fc1 = nn.Linear(272, 3)
self.dropout = nn.Dropout(.2)
self.act = nn.ReLU()
self.softmax = nn.LogSoftmax(dim=1)
def forward(self, x):
x = self.bnm(self.conv1(x))
x = self.act(self.dropout(x))
x = x.view(-1, 17 * 16)
x = self.softmax(self.fc1(x))
return x
# create a complete CNN
model = Net()
print(model)
# x = torch.rand(4, 2, 18)
# model = Net()
# y_pred = model(x)
# print(y_pred)
# move tensors to GPU if CUDA is available
if train_on_gpu:
model.cuda()
# -
# ## Train the model
#
# +
import torch.optim as optim
# specify loss function (categorical cross-entropy)
criterion = nn.CrossEntropyLoss()
# criterion = nn.BCELoss()
# specify optimizer
optimizer = optim.SGD(model.parameters(), lr=0.01)
# +
# number of epochs to train the model
n_epochs = 10
valid_loss_min = np.Inf # track change in validation loss
for epoch in range(1, n_epochs+1):
# keep track of training and validation loss
train_loss = 0.0
valid_loss = 0.0
###################
# train the model #
###################
model.train()
for data, target in train_loader:
# print(target)
# print(target)
# move tensors to GPU if CUDA is available
if train_on_gpu:
data, target = data.cuda(), target.cuda()
# clear the gradients of all optimized variables
optimizer.zero_grad()
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
# print(output)
# calculate the batch loss
loss = criterion(output, target)
# backward pass: compute gradient of the loss with respect to model parameters
loss.backward()
# perform a single optimization step (parameter update)
optimizer.step()
# update training loss
train_loss += loss.item()*data.size(0)
######################
# validate the model #
######################
model.eval()
for data, target in valid_loader:
# move tensors to GPU if CUDA is available
if train_on_gpu:
data, target = data.cuda(), target.cuda()
# forward pass: compute predicted outputs by passing inputs to the model
print(data.shape)
output = model(data)
# calculate the batch loss
loss = criterion(output, target)
# update average validation loss
valid_loss += loss.item()*data.size(0)
# calculate average losses
train_loss = train_loss/len(train_loader.sampler)
valid_loss = valid_loss/len(valid_loader.sampler)
# print training/validation statistics
print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(
epoch, train_loss, valid_loss))
# save model if validation loss has decreased
if valid_loss <= valid_loss_min:
print('Validation loss decreased ({:.6f} --> {:.6f}). Saving model ...'.format(valid_loss_min,valid_loss))
torch.save(model.state_dict(), 'model_cifar.pt')
valid_loss_min = valid_loss
# -
# ---
# # Test you model
model.load_state_dict(torch.load('model_cifar.pt'))
model
# +
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
import seaborn as sn
import pandas as pd
# track test loss
test_loss = 0.0
class_correct = list(0. for i in range(3))
class_total = list(0. for i in range(3))
y_pred = []
y_true = []
model.eval()
# iterate over test data
for data, target in valid_loader:
# move tensors to GPU if CUDA is available
if train_on_gpu:
data, target = data.cuda(), target.cuda()
# forward pass: compute predicted outputs by passing inputs to the model
output = model(data)
y_true.extend(target.cpu()) # Save Truth
# calculate the batch loss
loss = criterion(output, target)
# update test loss
test_loss += loss.item()*data.size(0)
# convert output probabilities to predicted class
_, pred = torch.max(output, 1)
y_pred.extend(pred.cpu())
# compare predictions to true label
correct_tensor = pred.eq(target.data.view_as(pred))
print(correct_tensor)
correct = np.squeeze(correct_tensor.numpy()) if not train_on_gpu else np.squeeze(correct_tensor.cpu().numpy())
# calculate test accuracy for each object class
for i in range(batch_size):
label = target.data[i]
class_correct[label] += correct[i].item()
class_total[label] += 1
# average test loss
test_loss = test_loss/len(valid_loader.dataset)
print('Test Loss: {:.6f}\n'.format(test_loss))
for i in range(3):
if class_total[i] > 0:
print('Test Accuracy of %5s: %2d%% (%2d/%2d)' % (
classes[i], 100 * class_correct[i] / class_total[i],
np.sum(class_correct[i]), np.sum(class_total[i])))
else:
print('Test Accuracy of %5s: N/A (no training examples)' % (classes[i]))
print('\nTest Accuracy (Overall): %2d%% (%2d/%2d)' % ( 100. * np.sum(class_correct) / np.sum(class_total),
np.sum(class_correct), np.sum(class_total)))
# Build confusion matrix
cf_matrix = confusion_matrix(y_true, y_pred)
df_cm = pd.DataFrame(cf_matrix/np.sum(cf_matrix) *10, index = [i for i in classes],
columns = [i for i in classes])
plt.figure(figsize = (12,7))
sn.heatmap(df_cm, annot=True)
plt.savefig('output.png')
# -
|
pose_classification/pose_classification.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Twitter sentiment analysis
#
# The data is represented as rows of of JSON strings.
# It consists of [tweets](https://dev.twitter.com/overview/api/tweets), [messages](https://dev.twitter.com/streaming/overview/messages-types), and a small amount of broken data (cannot be parsed as JSON).
#
# ## Tweets
#
# A tweet consists of many data fields. [Here is an example](https://gist.github.com/arapat/03d02c9b327e6ff3f6c3c5c602eeaf8b). You can learn all about them in the Twitter API doc. We are going to briefly introduce only the data fields that will be used in this homework.
#
# * `created_at`: Posted time of this tweet (time zone is included)
# * `id_str`: Tweet ID - we recommend using `id_str` over using `id` as Tweet IDs, becauase `id` is an integer and may bring some overflow problems.
# * `text`: Tweet content
# * `user`: A JSON object for information about the author of the tweet
# * `id_str`: User ID
# * `name`: User name (may contain spaces)
# * `screen_name`: User screen name (no spaces)
# * `retweeted_status`: A JSON object for information about the retweeted tweet (i.e. this tweet is not original but retweeteed some other tweet)
# * All data fields of a tweet except `retweeted_status`
# * `entities`: A JSON object for all entities in this tweet
# * `hashtags`: An array for all the hashtags that are mentioned in this tweet
# * `urls`: An array for all the URLs that are mentioned in this tweet
#
#
# ## Data source
#
# All tweets are collected using the [Twitter Streaming API](https://dev.twitter.com/streaming/overview).
#
#
# ## Users partition
#
# Besides the original tweets, we will provide you with a Pickle file, which contains a partition over 452,743 Twitter users. It contains a Python dictionary `{user_id: partition_id}`. The users are partitioned into 7 groups.
# # Part 0: Load data to a RDD
# ## Data
# Read file list from `../Data/data_input.txt`.
#
# ## Local test
#
# 1. Make RDD from the list of files in `data_input.txt`.
# 2. Mark the RDD to be cached (so in next operation data will be loaded in memory)
# 3. call the `print_count` method to print number of lines in all these files
#
# It should print
# ```
# Number of elements: 2193
# ```
# -*- coding: utf-8 -*-
#import timeit
#t2=timeit.default_timer()
def print_count(rdd):
print 'Number of elements:', rdd.count()
# Your code here
import findspark
findspark.init()
from pyspark import SparkContext
sc = SparkContext(master="local[4]")
# +
#sc.stop()
# -
all_files=open("../Data/data_input.txt","r")
lines=[line.strip() for line in all_files.readlines()]
text=sc.textFile(','.join(lines)).cache()
print_count(text)
# # Part 1: Parse JSON strings to JSON objects
# Python has built-in support for JSON.
# +
#import json
#json_example = '''
#{
# "id": 1,
# "name": "A green door",
# "price": 12.50,
# "tags": ["home", "green"]
#}
#'''
#
#json_obj = json.loads(json_example)
#json_obj
# -
# ## Broken tweets and irrelevant messages
#
# The data of this assignment may contain broken tweets (invalid JSON strings). So make sure that your code is robust for such cases.
#
# In addition, some lines in the input file might not be tweets, but messages that the Twitter server sent to the developer (such as [limit notices](https://dev.twitter.com/streaming/overview/messages-types#limit_notices)). Your program should also ignore these messages.
#
# *Hint:* [Catch the ValueError](http://stackoverflow.com/questions/11294535/verify-if-a-string-is-json-in-python)
#
#
# (1) Parse raw JSON tweets to obtain valid JSON objects. From all valid tweets, construct a pair RDD of `(user_id, text)`, where `user_id` is the `id_str` data field of the `user` dictionary (read [Tweets](#Tweets) section above), `text` is the `text` data field.
# +
import json
def safe_parse(raw_json):
# your code here
try:
json_object = json.loads(raw_json)
except ValueError, e:
return False
if('text' not in json_object and 'id_str' not in json_object):
return False
return True
#pass
def keypair(raw_json_string):
k=json.loads(raw_json_string)
return (k['user']['id_str'],k['text'].encode('utf-8'))
validtext=text.filter(safe_parse).map(keypair).cache()
# your code here
# -
# (2) Count the number of different users in all valid tweets (hint: [the `distinct()` method](https://spark.apache.org/docs/latest/programming-guide.html#transformations)).
#
# It should print
# ```
# The number of unique users is: 2083
# ```
def print_users_count(count):
print 'The number of unique users is:', count
# your code here
print_users_count(validtext.map(lambda k : k[0]).distinct().count())
#print_users_count(textdistinct.count())
# # Part 2: Number of posts from each user partition
# Load the Pickle file `../../Data/users-partition.pickle`, you will get a dictionary which represents a partition over 452,743 Twitter users, `{user_id: partition_id}`. The users are partitioned into 7 groups. For example, if the dictionary is loaded into a variable named `partition`, the partition ID of the user `59458445` is `partition["59458445"]`. These users are partitioned into 7 groups. The partition ID is an integer between 0-6.
#
# Note that the user partition we provide doesn't cover all users appear in the input data.
# (1) Load the pickle file.
# your code here
import pickle
partition=pickle.load(open("../Data/users-partition.pickle",'rb'))
# (2) Count the number of posts from each user partition
#
# Count the number of posts from group 0, 1, ..., 6, plus the number of posts from users who are not in any partition. Assign users who are not in any partition to the group 7.
#
# Put the results of this step into a pair RDD `(group_id, count)` that is sorted by key.
# your code here
def usermapping((u,t)):
if u in partition:
return (partition[u],1)
else:
return (7,1)
sortedkeyrdd=validtext.map(usermapping).reduceByKey(lambda a,b:a+b).sortByKey('false')
# (3) Print the post count using the `print_post_count` function we provided.
#
# It should print
#
# ```
# Group 0 posted 81 tweets
# Group 1 posted 199 tweets
# Group 2 posted 45 tweets
# Group 3 posted 313 tweets
# Group 4 posted 86 tweets
# Group 5 posted 221 tweets
# Group 6 posted 400 tweets
# Group 7 posted 798 tweets
# ```
def print_post_count(counts):
for group_id, count in counts:
print 'Group %d posted %d tweets' % (group_id, count)
print_post_count(sortedkeyrdd.collect())
# your code here
# # Part 3: Tokens that are relatively popular in each user partition
# In this step, we are going to find tokens that are relatively popular in each user partition.
#
# We define the number of mentions of a token $t$ in a specific user partition $k$ as the number of users from the user partition $k$ that ever mentioned the token $t$ in their tweets. Note that even if some users might mention a token $t$ multiple times or in multiple tweets, a user will contribute at most 1 to the counter of the token $t$.
#
# Please make sure that the number of mentions of a token is equal to the number of users who mentioned this token but NOT the number of tweets that mentioned this token.
#
# Let $N_t^k$ be the number of mentions of the token $t$ in the user partition $k$. Let $N_t^{all} = \sum_{i=0}^7 N_t^{i}$ be the number of total mentions of the token $t$.
#
# We define the relative popularity of a token $t$ in a user partition $k$ as the log ratio between $N_t^k$ and $N_t^{all}$, i.e.
#
# \begin{equation}
# p_t^k = \log \frac{C_t^k}{C_t^{all}}.
# \end{equation}
#
#
# You can compute the relative popularity by calling the function `get_rel_popularity`.
# (0) Load the tweet tokenizer.
# +
# # %load happyfuntokenizing.py
# #!/usr/bin/env python
"""
This code implements a basic, Twitter-aware tokenizer.
A tokenizer is a function that splits a string of text into words. In
Python terms, we map string and unicode objects into lists of unicode
objects.
There is not a single right way to do tokenizing. The best method
depends on the application. This tokenizer is designed to be flexible
and this easy to adapt to new domains and tasks. The basic logic is
this:
1. The tuple regex_strings defines a list of regular expression
strings.
2. The regex_strings strings are put, in order, into a compiled
regular expression object called word_re.
3. The tokenization is done by word_re.findall(s), where s is the
user-supplied string, inside the tokenize() method of the class
Tokenizer.
4. When instantiating Tokenizer objects, there is a single option:
preserve_case. By default, it is set to True. If it is set to
False, then the tokenizer will downcase everything except for
emoticons.
The __main__ method illustrates by tokenizing a few examples.
I've also included a Tokenizer method tokenize_random_tweet(). If the
twitter library is installed (http://code.google.com/p/python-twitter/)
and Twitter is cooperating, then it should tokenize a random
English-language tweet.
<NAME>:
I modified the regex strings to extract URLs in tweets.
"""
__author__ = "<NAME>"
######################################################################
import re
import htmlentitydefs
######################################################################
# The following strings are components in the regular expression
# that is used for tokenizing. It's important that phone_number
# appears first in the final regex (since it can contain whitespace).
# It also could matter that tags comes after emoticons, due to the
# possibility of having text like
#
# <:| and some text >:)
#
# Most imporatantly, the final element should always be last, since it
# does a last ditch whitespace-based tokenization of whatever is left.
# This particular element is used in a couple ways, so we define it
# with a name:
emoticon_string = r"""
(?:
[<>]?
[:;=8] # eyes
[\-o\*\']? # optional nose
[\)\]\(\[dDpP/\:\}\{@\|\\] # mouth
|
[\)\]\(\[dDpP/\:\}\{@\|\\] # mouth
[\-o\*\']? # optional nose
[:;=8] # eyes
[<>]?
)"""
# The components of the tokenizer:
regex_strings = (
# Phone numbers:
r"""
(?:
(?: # (international)
\+?[01]
[\-\s.]*
)?
(?: # (area code)
[\(]?
\d{3}
[\-\s.\)]*
)?
\d{3} # exchange
[\-\s.]*
\d{4} # base
)"""
,
# URLs:
r"""http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+"""
,
# Emoticons:
emoticon_string
,
# HTML tags:
r"""<[^>]+>"""
,
# Twitter username:
r"""(?:@[\w_]+)"""
,
# Twitter hashtags:
r"""(?:\#+[\w_]+[\w\'_\-]*[\w_]+)"""
,
# Remaining word types:
r"""
(?:[a-z][a-z'\-_]+[a-z]) # Words with apostrophes or dashes.
|
(?:[+\-]?\d+[,/.:-]\d+[+\-]?) # Numbers, including fractions, decimals.
|
(?:[\w_]+) # Words without apostrophes or dashes.
|
(?:\.(?:\s*\.){1,}) # Ellipsis dots.
|
(?:\S) # Everything else that isn't whitespace.
"""
)
######################################################################
# This is the core tokenizing regex:
word_re = re.compile(r"""(%s)""" % "|".join(regex_strings), re.VERBOSE | re.I | re.UNICODE)
# The emoticon string gets its own regex so that we can preserve case for them as needed:
emoticon_re = re.compile(regex_strings[1], re.VERBOSE | re.I | re.UNICODE)
# These are for regularizing HTML entities to Unicode:
html_entity_digit_re = re.compile(r"&#\d+;")
html_entity_alpha_re = re.compile(r"&\w+;")
amp = "&"
######################################################################
class Tokenizer:
def __init__(self, preserve_case=False):
self.preserve_case = preserve_case
def tokenize(self, s):
"""
Argument: s -- any string or unicode object
Value: a tokenize list of strings; conatenating this list returns the original string if preserve_case=False
"""
# Try to ensure unicode:
try:
s = unicode(s)
except UnicodeDecodeError:
s = str(s).encode('string_escape')
s = unicode(s)
# Fix HTML character entitites:
s = self.__html2unicode(s)
# Tokenize:
words = word_re.findall(s)
# Possible alter the case, but avoid changing emoticons like :D into :d:
if not self.preserve_case:
words = map((lambda x : x if emoticon_re.search(x) else x.lower()), words)
return words
def tokenize_random_tweet(self):
"""
If the twitter library is installed and a twitter connection
can be established, then tokenize a random tweet.
"""
try:
import twitter
except ImportError:
print "Apologies. The random tweet functionality requires the Python twitter library: http://code.google.com/p/python-twitter/"
from random import shuffle
api = twitter.Api()
tweets = api.GetPublicTimeline()
if tweets:
for tweet in tweets:
if tweet.user.lang == 'en':
return self.tokenize(tweet.text)
else:
raise Exception("Apologies. I couldn't get Twitter to give me a public English-language tweet. Perhaps try again")
def __html2unicode(self, s):
"""
Internal metod that seeks to replace all the HTML entities in
s with their corresponding unicode characters.
"""
# First the digits:
ents = set(html_entity_digit_re.findall(s))
if len(ents) > 0:
for ent in ents:
entnum = ent[2:-1]
try:
entnum = int(entnum)
s = s.replace(ent, unichr(entnum))
except:
pass
# Now the alpha versions:
ents = set(html_entity_alpha_re.findall(s))
ents = filter((lambda x : x != amp), ents)
for ent in ents:
entname = ent[1:-1]
try:
s = s.replace(ent, unichr(htmlentitydefs.name2codepoint[entname]))
except:
pass
s = s.replace(amp, " and ")
return s
# +
from math import log
tok = Tokenizer(preserve_case=False)
def get_rel_popularity(c_k, c_all):
return log(1.0 * c_k / c_all) / log(2)
def print_tokens(tokens, gid = None):
group_name = "overall"
if gid is not None:
group_name = "group %d" % gid
print '=' * 5 + ' ' + group_name + ' ' + '=' * 5
for t, n in tokens:
print "%s\t%.4f" % (t, n)
print
# -
# (1) Tokenize the tweets using the tokenizer we provided above named `tok`. Count the number of mentions for each tokens regardless of specific user group.
#
# Call `print_count` function to show how many different tokens we have.
#
# It should print
# ```
# Number of elements: 8949
# ```
# +
#from collections import defaultdict
#a=defaultdict(int)
#for u,t in validtext.collect():
# p=tok.tokenize(t)
# for l in p:
# a[l]+=1
#print len(a)
# +
# your code here
def usermapping2(u):
if u in partition:
return partition[u]
else:
return 7
#v1=
v1=validtext.mapValues(lambda t : set(tok.tokenize(t))).reduceByKey(lambda t,t1 : t.union(t1)).map(lambda (u,t) : (usermapping2(u),list(t))).flatMapValues(lambda t : t).cache()
print_count(v1.map(lambda (u,t): t ).distinct())
#combineByKey((lambda t : tok.tokenize(t)),(lambda acc, value: acc.(value)),(lambda acc1, acc2: acc1.extend(acc2) )).flatMap(lambda (u,t) : t).distinct().count()
#.flatMapValues(lambda t : list(t))
#.flatMapValues(lambda t : list(t)).map(lambda (u,t) : (t,usermapping2(u))).cache()
#print_count(v1.map(lambda (t,u): t ).distinct())
# -
# (2) Tokens that are mentioned by too few users are usually not very interesting. So we want to only keep tokens that are mentioned by at least 100 users. Please filter out tokens that don't meet this requirement.
#
# Call `print_count` function to show how many different tokens we have after the filtering.
#
# Call `print_tokens` function to show top 20 most frequent tokens.
#
# It should print
# ```
# Number of elements: 44
# ===== overall =====
# : 1388.0000
# rt 1237.0000
# . 826.0000
# … 673.0000
# the 623.0000
# trump 582.0000
# to 499.0000
# , 489.0000
# a 404.0000
# is 376.0000
# in 297.0000
# of 292.0000
# and 288.0000
# for 281.0000
# ! 269.0000
# ? 210.0000
# on 195.0000
# i 192.0000
# you 191.0000
# this 190.0000
# ```
# +
# your code here
#od3=validtext.filter(lambda (u,t) : u in partition)
#od4=validtext.filter(lambda (u,t) : u not in partition)
#od1=validtext.flatMapValues(lambda t: list(set(tok.tokenize(t)))).map(lambda (u,t) : (t,u)).groupByKey().mapValues(lambda x: set([a for a in x])).cache()
#ordered_tokens=od1.mapValues(lambda x: len(x)).filter(lambda (t,ul) : ul>=100)
#print_count(ordered_tokens)
#print_tokens(ordered_tokens.takeOrdered(20,key = lambda x: -x[1]))
v2=v1.map(lambda (t,u) : (u,1)).reduceByKey(lambda a,b : a+b).filter(lambda (t,ul) : ul>=100).cache()
print_count(v2)
print_tokens(v2.takeOrdered(20,key = lambda x: -x[1]))
# -
# (3) For all tokens that are mentioned by at least 100 users, compute their relative popularity in each user group. Then print the top 10 tokens with highest relative popularity in each user group. In case two tokens have same relative popularity, break the tie by printing the alphabetically smaller one.
#
# **Hint:** Let the relative popularity of a token $t$ be $p$. The order of the items will be satisfied by sorting them using (-p, t) as the key.
#
# It should print
# ```
# ===== group 0 =====
# ... -3.5648
# at -3.5983
# hillary -4.0875
# i -4.1255
# bernie -4.1699
# not -4.2479
# https -4.2695
# he -4.2801
# in -4.3074
# are -4.3646
#
# ===== group 1 =====
# #demdebate -2.4391
# - -2.6202
# & -2.7472
# amp -2.7472
# clinton -2.7570
# ; -2.7980
# sanders -2.8838
# ? -2.9069
# in -2.9664
# if -3.0138
#
# ===== group 2 =====
# are -4.6865
# and -4.7105
# bernie -4.7549
# at -4.7682
# sanders -4.9542
# that -5.0224
# in -5.0444
# donald -5.0618
# a -5.0732
# #demdebate -5.1396
#
# ===== group 3 =====
# #demdebate -1.3847
# bernie -1.8480
# sanders -2.1887
# of -2.2356
# that -2.3785
# the -2.4376
# … -2.4403
# clinton -2.4467
# hillary -2.4594
# be -2.5465
#
# ===== group 4 =====
# hillary -3.7395
# sanders -3.9542
# of -4.0199
# clinton -4.0790
# at -4.1832
# in -4.2143
# a -4.2659
# on -4.2854
# . -4.3681
# the -4.4251
#
# ===== group 5 =====
# cruz -2.3861
# he -2.6280
# are -2.7796
# will -2.7829
# the -2.8568
# is -2.8822
# for -2.9250
# that -2.9349
# of -2.9804
# this -2.9849
#
# ===== group 6 =====
# @realdonaldtrump -1.1520
# cruz -1.4532
# https -1.5222
# ! -1.5479
# not -1.8904
# … -1.9269
# will -2.0124
# it -2.0345
# this -2.1104
# to -2.1685
#
# ===== group 7 =====
# donald -0.6422
# ... -0.7922
# sanders -1.0282
# trump -1.1296
# bernie -1.2106
# - -1.2253
# you -1.2376
# clinton -1.2511
# if -1.2880
# i -1.2996
# ```
# +
# your code here
def popfn((t,c)):
#tmp=0
#if u[1][0]>0:
tmp=get_rel_popularity(c,ordt[t])
return ((-tmp,t),tmp)
#def tokenprint(par_text,it):
# p1=par_text.join(ordered_tokens).map(popfn).sortByKey().map(lambda (u,v) : (u[1],v)).take(10)
# print_tokens(p1,it)
# return p1
ordt=v2.collectAsMap()
#[r[0] for r in v2.collect()]
v3=v1.filter(lambda (p,t) : t in ordt).cache()
bm=-100
cm=-100
dm=-100
bg=7
cg=7
dg=7
for it in range(0,8):
p1=v3.filter(lambda (p,t) : p==it).map(lambda (p,t) : (t,1)).reduceByKey(lambda a,b : a+b).map(popfn).sortByKey().map(lambda (u,v) : (u[1],v)).take(10)
print_tokens(p1,it)
if it!=7:
for t,m in p1:
if "bernie" in t and m>bm:
bm=m
bg=it
if "sanders" in t and m>bm:
bm=m
bg=it
if "ted" in t and m>cm:
cm=m
cg=it
if "cruz" in t and m>cm:
cm=m
cg=it
if "donald" in t and m>dm:
dm=m
dg=it
if "trump" in t and m>dm:
dm=m
dg=it
#od2=od1.filter(lambda (t,u) : t in ordt).mapValues(lambda x : filter(lambda a : a in partition,x)).cache()
#for it in range(0,7):
# pt=tokenprint(od2.mapValues(lambda x:len(set(filter(lambda l : partition[l]==it,x)))).filter(lambda (u,v) : v>0),it)
'''for t,m in pt:
if t=="bernie" and m>bm:
bm=m
bg=it
if t=="sanders" and m>bm:
bm=m
bg=it
if t=="ted" and m>cm:
cm=m
cg=it
if t=="cruz" and m>cm:
cm=m
cg=it
if t=="donald" and m>dm:
dm=m
dg=it
if t=="trump" and m>dm:
dm=m
dg=it
if t=="@realdonaldtrump" and m>dm:
dm=m
dg=it'''
#placeholder=tokenprint(od1.mapValues(lambda x:len(set(filter(lambda l : l not in partition,x)))),7)
# -
# (4) (optional, not for grading) The users partition is generated by a machine learning algorithm that tries to group the users by their political preferences. Three of the user groups are showing supports to <NAME>, <NAME>, and <NAME>.
#
# If your program looks okay on the local test data, you can try it on the larger input by submitting your program to the homework server. Observe the output of your program to larger input files, can you guess the partition IDs of the three groups mentioned above based on your output?
# +
# Change the values of the following three items to your guesses
users_support = [
(bg, "<NAME>"),
(cg, "<NAME>"),
(dg, "<NAME>")
]
for gid, candidate in users_support:
print "Users from group %d are most likely to support %s." % (gid, candidate)
#t1=timeit.default_timer()
#print t1-t2
|
Code/twitter_sentiment_analysis.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Sequence Classification task with Vespa
# > python API for stateless model evaluation
# Vespa has [recently implemented](https://blog.vespa.ai/stateless-model-evaluation/) accelerated model evaluation using ONNX Runtime in the stateless cluster. This opens up new usage areas for Vespa, such as serving model predictions.
# ## Define the model server
# Define the task and the model to use. The [SequenceClassification](https://pyvespa.readthedocs.io/en/latest/reference-api.html#sequenceclassification) task takes a text input and return an array of floats that depends on the model used to solve the task. The `model` argument can be the id of the model as defined by the huggingface model hub.
# +
from vespa.ml import SequenceClassification
task = SequenceClassification(
model_id="bert_tiny",
model="google/bert_uncased_L-2_H-128_A-2"
)
# -
# A `ModelServer` is a simplified application package focused on stateless model evaluation. It can take as many tasks as we want.
# +
from vespa.package import ModelServer
model_server = ModelServer(
name="bert_model_server",
tasks=[task],
)
# -
# ## Deploy the model server
# We can either host our model server on Vespa Cloud or deploy it locally using a Docker container.
# ### Host it on VespaCloud
# Check [this short guide](https://pyvespa.readthedocs.io/en/latest/deploy-vespa-cloud.html) for detailed information about how to setup your Vespa Cloud account and where to find the environment variables defined below.
# + nbsphinx="hidden"
import os
os.environ["TENANT_NAME"] = "vespa-team"
os.environ["APPLICATION_NAME"] = "pyvespa-integration"
with open(os.path.join(os.getenv("WORK_DIR"), "key.pem"), "w") as f:
f.write(os.getenv("VESPA_CLOUD_USER_KEY").replace(r"\n", "\n"))
os.environ["USER_KEY"] = os.path.join(os.getenv("WORK_DIR"), "key.pem")
os.environ["INSTANCE_NAME"] = "test"
os.environ["DISK_FOLDER"] = os.path.join(os.getenv("WORK_DIR"), "sample_application")
# +
from vespa.deployment import VespaCloud
vespa_cloud = VespaCloud(
tenant=os.getenv("TENANT_NAME"),
application=os.getenv("APPLICATION_NAME"),
key_location=os.getenv("USER_KEY"),
application_package=model_server,
)
app = vespa_cloud.deploy(
instance=os.getenv("INSTANCE_NAME"), disk_folder=os.getenv("DISK_FOLDER")
)
# -
# ### Deploy locally
# Similarly, we can deploy the model server locally in a Docker container.
# +
from vespa.deployment import VespaDocker
vespa_docker = VespaDocker(disk_folder=os.getenv("DISK_FOLDER"), port=8081)
app = vespa_docker.deploy(application_package=model_server)
# -
# ## Get model information
# Get models available:
app.get_model_endpoint()
# Get information about a specific model:
app.get_model_endpoint(model_id="bert_tiny")
# ## Get predictions
# Get a prediction:
app.predict(x="this is a test", model_id="bert_tiny")
|
docs/sphinx/source/use_cases/sequence-classification-task-with-vespa-cloud.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Stock Market Timing / Buy Sell Signals
#
# This notebook we introduce buy / sell signals generated by Relative Strength Index (or [RSI](https://www.investopedia.com/terms/r/rsi.asp)).
# !pip install git+https://github.com/yiqiao-yin/YinPortfolioManagement.git
from YinCapital_forecast.modules import RSI_Timer
tmp1 = RSI_Timer(
start_date = '2018-01-01',
end_date = '2021-08-31',
tickers = 'FB',
pick_RSI = 2,
rsi_threshold_1 = 10,
rsi_threshold_2 = 30,
rsi_threshold_3 = 100,
buy_threshold = 40,
sell_threshold = 90 )
# Ends here.
|
docs/python_MM_RSITimer.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # <NAME>IND THE CODE 2020
#
# ## DESAFIO 2: PARTE 2
# ### Introdução
# Na parte 1 deste desafio, você realizou o pré-processamento e o treinamento de um modelo a partir de um conjunto de dados base fornecido. Nesta segunda etapa você irá integrar todas as transformações e eventos de treinamento criados anteriormente em uma Pipeline completa para *deploy* no **Watson Machine Learning**!
# ### Preparação do Notebook
# Primeiro realizaremos a instalação do scikit-learn e a importação das mesmas bibliotecas utilizadas anteriormente
# !pip install scikit-learn==0.20.0 --upgrade
import json
import requests
import pandas as pd
import numpy as np
import xgboost as xgb
from sklearn.impute import SimpleImputer
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.pipeline import Pipeline
from sklearn.model_selection import KFold, cross_validate
# É necessário inserir o conjunto de dados base novamente como um dataframe pandas, seguindo as instruções
#
# 
#
# Após a seleção da opção **"Insert to code"**, a célula abaixo será preenchida com o código necessário para importação e leitura dos dados no arquivo .csv como um DataFrame Pandas.
# +
import types
import pandas as pd
from botocore.client import Config
import ibm_boto3
def __iter__(self): return 0
# @hidden_cell
# The following code accesses a file in your IBM Cloud Object Storage. It includes your credentials.
# You might want to remove those credentials before you share the notebook.
client___removido__ = ibm_boto3.client(service_name='s3',
ibm_api_key_id='__removido__',
ibm_auth_endpoint="https://iam.cloud.ibm.com/oidc/token",
config=Config(signature_version='oauth'),
endpoint_url='https://s3-api.us-geo.objectstorage.service.networklayer.com')
body = client___removido__.get_object(Bucket='desafio2uninassau-lzgvx1spvveat6',Key='dataset_desafio_2.csv')['Body']
# add missing __iter__ method, so pandas accepts body as file-like object
if not hasattr(body, "__iter__"): body.__iter__ = types.MethodType( __iter__, body )
df_data_1 = pd.read_csv(body)
df_data_1.head()
# -
# ### Construção da Pipeline completa para encapsulamento no WML
# #### Preparando transformações personalizadas para carregamento no WML
# Na etapa anterior, foi mostrado como criar uma transformação personalizada, através da declaração de uma classe Python com os métodos ``fit`` e ``transform``.
#
# - Código da transformação personalizada DropColumns():
#
# from sklearn.base import BaseEstimator, TransformerMixin
# # All sklearn Transforms must have the `transform` and `fit` methods
# class DropColumns(BaseEstimator, TransformerMixin):
# def __init__(self, columns):
# self.columns = columns
# def fit(self, X, y=None):
# return self
# def transform(self, X):
# # Primeiro realizamos a cópia do dataframe 'X' de entrada
# data = X.copy()
# # Retornamos um novo dataframe sem as colunas indesejadas
# return data.drop(labels=self.columns, axis='columns')
#
# Para integrar esses tipos de transformações personalizadas nas Pipelines do Watson Machine Learning, é necessário primeiramente empacotar seu código personalizado como uma biblioteca Python. Isso pode ser feito facilmente com o uso da ferramenta *setuptools*.
#
# No seguinte repositório git: https://github.com/vnderlev/sklearn_transforms temos todos os arquivos necessários para a criação de um pacote Python, nomeado **my_custom_sklearn_transforms**.
# Esse pacote possui a seguinte estrutura de arquivos:
#
# /my_custom_sklearn_transforms.egg-info
# dependency_links.txt
# not-zip-safe
# PKG-INFO
# SOURCES.txt
# top_level.txt
# /my_custom_sklearn_transforms
# __init__.py
# sklearn_transformers.py
# PKG-INFO
# README.md
# setup.cfg
# setup.py
#
# O arquivo principal, que irá conter o código das nossas transformadas personalizadas, é o arquivo **/my_custom_sklearn_transforms/sklearn_transformers.py**. Se você acessá-lo no repositório, irá notar que ele contém exatamente o mesmo código declarado na primeira etapa (a classe DropColumns).
#
# Caso você tenha declarado transformações próprias (além da DropColumn fornecida), você deverá adicionar todas as classes dessas transformadas criadas por você nesse mesmo arquivo. Para tal, você deve realizar o fork desse repositório (isso pode ser feito na própria interface Web do Github, clicando no botão conforme a imagem abaixo), e adicionar suas classes personalizadas no arquivo **sklearn_transformers.py**.
#
# 
#
# Se você somente fez o uso da transformação fornecida (DropColumns), pode ignorar essa etapa de fork, e seguir utilizando o pacote base fornecido! :)
#
# Após a preparação do seu pacote Python com as suas transformadas personalizadas, substitua o link do repositório git na célula abaixo e execute-a. Caso você não tenha preparado nenhuma nova transformada, execute a célula com o link do repositório já fornecido.
#
# <hr>
#
# **OBSERVAÇÃO**
#
# Caso a execução da célula abaixo retorne um erro de que o repositório já existe, execute:
#
# **!rm -r -f sklearn_transforms**
# substitua o link abaixo pelo link do seu repositório git (se for o caso)
# !git clone https://github.com/vnderlev/sklearn_transforms.git
# !cd sklearn_transforms
# !ls -ltr
# Para subir o código no WML, precisamos enviar um arquivo .zip com todo o código fonte, então iremos zipar o diretório clonado em seguida:
# !zip -r sklearn_transforms.zip sklearn_transforms
# Com o arquivo zip do nosso pacote carregado no Kernel deste notebook, podemos utilizar a ferramenta pip para instalá-lo, conforme a célula abaixo:
# !pip install sklearn_transforms.zip
# Podemos agora realizar a importação do nosso pacote personalizado em nosso notabook!
#
# Iremos importar a transformação DropColumns. Se você possui outras transformações personalizadas, não se esqueça de importá-las!
from my_custom_sklearn_transforms.sklearn_transformers import DropColumns
# #### Declarando a Pipeline
#
# Após a importação das transformações personalizadas como um pacote Python, podemos partir para a declaração da nossa Pipeline.
#
# O processo é bem semelhante ao realizado na primeira etapa, porém com algumas diferenças importantes, então preste bem atenção!
#
# A Pipeline exemplo possui três estágios:
#
# - remover a coluna "NOME"
# - imputar "zeros" em todos os valores faltantes
# - inserir os dados pré-processados como entrada em um modelo treinado
#
# Relembrando, a entrada desta Pipeline será o conjunto cru de dados fornecido exceto a coluna "LABELS" (variável-alvo a ser determinada pelo modelo).
#
# Teremos então 17 valores de entrada **na PIPELINE** (no modelo serão 16 entradas, pois a coluna NAME será removida no primeiro estágio após a transformação DropColumn).
#
# MATRICULA - número de quatro algarismos único para cada estudante
# NOME - nome completo do estudante
# FALTAS_DE - número de faltas na disciplina de ``Direito Empresarial``
# FALTAS_EM - número de faltas na disciplina de ``Empreendedorismo``
# FALTAS_MF - número de faltas na disciplina de ``Matemática Financeira``
# MEDIA_DE - média simples das notas do aluno na disciplina de ``Direito Empresarial`` (0-10)
# MEDIA_EM - média simples das notas do aluno na disciplina de ``Empreendedorismo`` (0-10)
# MEDIA_MF - média simples das notas do aluno na disciplina de ``Matemática Financeira`` (0-10)
# HRS_ESTUDO_DE - horas de estudo particular na disciplina de ``Direito Empresarial``
# HRS_ESTUDO_EM - horas de estudo particular na disciplina de ``Empreendedorismo``
# HRS_ESTUDO_MF - horas de estudo particular na disciplina de ``Matemática Financeira``
# REPROVACOES_DE - número de reprovações na disciplina de ``Direito Empresarial``
# REPROVACOES_EM - número de reprovações na disciplina de ``Empreendedorismo``
# REPROVACOES_MF - número de reprovações na disciplina de ``Matemática Financeira``
# LIVROS_TEXTO - quantidade de livros e textos acessados pelo aluno no sistema da universidade
# AULAS_AO_VIVO - horas de aulas ao vivo presenciadas pelo aluno (total em todas as disciplinas)
# EXERCICIOS - número de exercícios realizados pelo estudante (total em todas as disciplinas) no sistema da universidade
#
# A saída da Pipeline será um valor estimado para a coluna "LABELS".
# +
# Criação de uma Transform personalizada ``DropColumns``
rm_columns = DropColumns(
columns=["NOME", "MATRICULA"]
)
# +
# Criação de um objeto ``SimpleImputer``
si = SimpleImputer(
missing_values=np.nan, # os valores faltantes são do tipo ``np.nan`` (padrão Pandas)
# strategy='constant', # a estratégia escolhida é a alteração do valor faltante por uma constante
# fill_value=0, # a constante que será usada para preenchimento dos valores faltantes é um int64=0.
# verbose=0,
# copy=True
)
# +
# Definição das colunas que serão features (nota-se que a coluna NOME está presente)
features = [
"MATRICULA", "NOME", 'REPROVACOES_DE', 'REPROVACOES_EM', "REPROVACOES_MF", "REPROVACOES_GO",
"NOTA_DE", "NOTA_EM", "NOTA_MF", "NOTA_GO",
"INGLES", "H_AULA_PRES", "TAREFAS_ONLINE", "FALTAS",
]
# Definição da variável-alvo
target = ["PERFIL"]
# Preparação dos argumentos para os métodos da biblioteca ``scikit-learn``
X = df_data_1[features]
y = df_data_1[target]
# -
# **ATENÇÃO!!**
#
# A célula acima, embora muito parecida com a definição de features na primeira etapa deste desafio, possui uma grande diferença!
#
# Nela está presente a coluna "NOME" como uma feature! Isso ocorre pois neste caso essas são as entradas da *PIPELINE*, e não do modelo.
# Separação dos dados em um conjunto de treino e um conjunto de teste
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=337, stratify=y)
# Na célula abaixo é realizada a declaração de um objeto **Pipeline** do scikit-learn, onde é declarado o parâmetro *steps*, que nada mais é do que uma lista com as etapas da nossa pipeline:
#
# 'remove_cols' - transformação personalizada DropColumns
# 'imputer' - transformação embutida do scikit-learn para imputação de valores faltantes
# 'dtc' - um classificador via árvore de decisão
#
# Note que passamos como passos as transformadas instanciadas anteriormente, sob nome `rm_columns` e `si`.
# Criação da nossa pipeline para armazenamento no Watson Machine Learning:
my_pipeline = Pipeline(
steps=[
('remove_cols', rm_columns),
('imputer', si),
('dtc', DecisionTreeClassifier(random_state=337)),
]
)
from sklearn.model_selection import GridSearchCV
param_grid={
'dtc__min_samples_split': [4, 9, 18, 36, 73, 147, 294],
'dtc__max_depth': range(7, 14, 2),
'dtc__min_samples_leaf': [5, 11, 55, 111],
'dtc__criterion': ['gini','entropy'],
'imputer__strategy': ['constant', 'mean', 'median']
}
my_pipeline_gs = GridSearchCV(my_pipeline, param_grid, cv=10, n_jobs=-1, verbose=1)
# Em seguida iremos executar o método `fit()` da Pipeline, realizando o pré-processamento e o treinamento do modelo de uma só vez.
# Inicialização da Pipeline (pré-processamento e realização do treinamento do modelo)
my_pipeline_gs.fit(X_train, y_train)
# +
# display(my_pipeline_gs.best_params_)
# display(my_pipeline_gs.best_score_)
# -
my_pipeline = my_pipeline_gs.best_estimator_
# Agora que temos uma pipeline completa, com etapas de pré-processamento configuradas e também um modelo por árvore de decisão já treinado, podemos realizar a integração com o Watson Machine Learning!
# ### Encapsulando uma Pipeline personalizada no Watson Machine Learning
# #### Estabelecendo conexão entre o cliente Python do WML e a sua instância do serviço na nuvem
# Biblioteca Python com implementação de um cliente HTTP para a API do WML
from watson_machine_learning_client import WatsonMachineLearningAPIClient
# As próximas células irão realizar o deploy da pipeline declarada neste notebook no WML. Só prossiga se você já está satisfeito com seu modelo e acha que já é a hora de fazer o deploy da sua solução.
#
# Cole as credenciais de sua instância do Watson Machine Learning na variável na célula abaixo.
#
# É importante que a variável que contém os valores tenha o nome de ``wml_credentials`` para que as próximas células deste notebook executem corretamente.
wml_credentials = {
"apikey": "__removido__",
"iam_apikey_description": "Auto-generated for key __removido__",
"iam_apikey_name": "Service credentials-1",
"iam_role_crn": "crn:v1:bluemix:public:iam::::serviceRole:Writer",
"iam_serviceid_crn": "crn:v1:bluemix:public:iam-identity::__removido__::serviceid:__removido__",
"instance_id": "__removido__",
"url": "https://us-south.ml.cloud.ibm.com"
}
# +
# Instanciando um objeto cliente do Watson Machine Learning a partir das credenciais fornecidas
clientWML = WatsonMachineLearningAPIClient(wml_credentials)
# +
# Extraindo detalhes da sua instância do Watson Machine Learning
instance_details = clientWML.service_instance.get_details()
print(json.dumps(instance_details, indent=4))
# -
# **ATENÇÃO!!**
#
# Fique atento para os limites de consumo de sua instância do Watson Machine Learning!
#
# Caso você expire a camada grátis, não será possível avaliar seu modelo (pois é necessária a realização de algumas chamadas de API que consomem predições!)
# #### Listando todos os artefatos armazenados no seu WML
# Para listar todos os artefatos armazenados em seu Watson Machine Learning, você pode usar a seguinte função:
#
# clientWML.repository.list()
# +
# Listando todos os artefatos atualmente armazenados na sua instância do WML
clientWML.repository.list()
# -
# No plano LITE do Watson Machine Learning só é permitido o deploy de um único modelo por vez. Se for o caso de você já possuir um modelo online na sua instância, você pode apagá-lo utilizando o método clientWML.repository.delete():
#
# artifact_guid = "__removido__"
# clientWML.repository.delete(artifact_guid)
# #### Criando uma nova definição de pacote Python personalizado no WML
# O primeiro passo para realizar seu deploy é armazenar o código das transformações personalizadas criadas por você.
#
# Para essa etapa precisamos apenas do arquivo .zip do pacote criado (que já possuimos carregado no Kernel!)
# +
# Definição de metadados do nosso pacote com as Transforms personalizadas
pkg_meta = {
clientWML.runtimes.LibraryMetaNames.NAME: "my_custom_sklearn_transform_1",
clientWML.runtimes.LibraryMetaNames.DESCRIPTION: "A custom sklearn transform",
clientWML.runtimes.LibraryMetaNames.FILEPATH: "sklearn_transforms.zip", # Note que estamos utilizando o .zip criado anteriormente!
clientWML.runtimes.LibraryMetaNames.VERSION: "1.0",
clientWML.runtimes.LibraryMetaNames.PLATFORM: { "name": "python", "versions": ["3.6"] }
}
custom_package_details = clientWML.runtimes.store_library( pkg_meta )
custom_package_uid = clientWML.runtimes.get_library_uid( custom_package_details )
print("\n Lista de artefatos de runtime armazenados no WML:")
clientWML.repository.list()
# -
# #### Criando uma nova definição de runtime Python personalizado no WML
#
# O segundo passo é armazenar uma definição de runtime Python para utilizar a nossa biblioteca personalizada.
#
# Isso pode ser feito da seguinte forma:
# +
runtime_meta = {
clientWML.runtimes.ConfigurationMetaNames.NAME: "my_custom_wml_runtime_1",
clientWML.runtimes.ConfigurationMetaNames.DESCRIPTION: "A Python runtime with custom sklearn Transforms",
clientWML.runtimes.ConfigurationMetaNames.PLATFORM: {
"name": "python",
"version": "3.6"
},
clientWML.runtimes.ConfigurationMetaNames.LIBRARIES_UIDS: [ custom_package_uid ]
}
runtime_details = clientWML.runtimes.store( runtime_meta )
custom_runtime_uid = clientWML.runtimes.get_uid( runtime_details )
print("\n Detalhes do runtime armazenado:")
print(json.dumps(runtime_details, indent=4))
# -
# Listando todos runtimes armazenados no seu WML:
clientWML.runtimes.list()
# #### Criando uma nova definição de Pipeline personalizada no WML
#
# Finalmente iremos criar uma definição (metadados) para a nossa Pipeline ser hospedada no WML.
#
# Definimos como parâmetros um nome para o artefato e o ID do runtime criado anteriormente.
model_meta = {
clientWML.repository.ModelMetaNames.NAME: 'desafio-2-mbtc2020-pipeline-1',
clientWML.repository.ModelMetaNames.DESCRIPTION: "my pipeline for submission",
clientWML.repository.ModelMetaNames.RUNTIME_UID: custom_runtime_uid
}
# Em seguida chamamos o método para armazenar a nova definição:
# +
# Função para armazenar uma definição de Pipeline no WML
stored_model_details = clientWML.repository.store_model(
model=my_pipeline, # `my_pipeline` é a variável criada anteriormente e contém nossa Pipeline já treinada :)
meta_props=model_meta, # Metadados definidos na célula anterior
training_data=None # Não altere esse parâmetro
)
print("\n Lista de artefatos armazenados no WML:")
clientWML.repository.list()
# Detalhes do modelo hospedado no Watson Machine Learning
print("\n Metadados do modelo armazenado:")
print(json.dumps(stored_model_details, indent=4))
# -
# #### Realizando o deployment do seu modelo para consumo imediato por outras aplicações
# +
# O deployment do modelo é finalmente realizado por meio do método ``deployments.create()``
model_deployment_details = clientWML.deployments.create(
artifact_uid=stored_model_details["metadata"]["guid"], # Não altere esse parâmetro
name="desafio-2-mbtc2020-deployment-1",
description="Solução do desafio 2 - MBTC",
asynchronous=False, # Não altere esse parâmetro
deployment_type='online', # Não altere esse parâmetro
deployment_format='Core ML', # Não altere esse parâmetro
meta_props=model_meta # Não altere esse parâmetro
)
# -
# #### Testando um modelo hospedado no Watson Machine Learning
# +
# Recuperando a URL endpoint do modelo hospedado na célula anterior
model_endpoint_url = clientWML.deployments.get_scoring_url(model_deployment_details)
print("A URL de chamada da sua API é: {}".format(model_endpoint_url))
# +
# Detalhes do deployment realizado
deployment_details = clientWML.deployments.get_details(
deployment_uid=model_deployment_details["metadata"]["guid"] # esse é o ID do seu deployment!
)
print("Metadados do deployment realizado: \n")
print(json.dumps(deployment_details, indent=4))
# +
scoring_payload = {
'fields': [
"MATRICULA", "NOME", 'REPROVACOES_DE', 'REPROVACOES_EM', "REPROVACOES_MF", "REPROVACOES_GO",
"NOTA_DE", "NOTA_EM", "NOTA_MF", "NOTA_GO",
"INGLES", "H_AULA_PRES", "TAREFAS_ONLINE", "FALTAS",
],
'values': [
[
513949,"<NAME>",1,1,1,1,4.3,9.0,9.1,4.9,0,3,4,3,
]
]
}
print("\n Payload de dados a ser classificada:")
print(json.dumps(scoring_payload, indent=4))
# +
result = clientWML.deployments.score(
model_endpoint_url,
scoring_payload
)
print("\n Resultados:")
print(json.dumps(result, indent=4))
# -
# <hr>
#
# ## Parabéns!
#
# Se tudo foi executado sem erros, você já tem um classificador baseado em machine learning encapsulado como uma API REST!
#
# Para testar a sua solução integrada com um assistente virtual e realizar a submissão, acesse a página:
#
# https://uninassau.maratona.dev
#
# Você irá precisar da endpoint url do seu modelo e das credenciais do WML :)
|
uninassau/solução/parte-2.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda root]
# language: python
# name: conda-root-py
# ---
# +
# %load_ext autoreload
# %autoreload 2
from sccf.SCCF import SCCF
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
frame_path = './datasets/surfer/0001.jpg'
frame = mpimg.imread(frame_path)
plt.figure()
plt.imshow(frame)
# -
# +
from image_utils import rgb2gray, pre_process, linear_mapping, window_func_2d, random_warp
n = 0.0 # Online learning rate parameter
l = 0.001#0.001 # gamma^2/v^2 where v - Controlling the prior variance of filter weight, gamma -
num_training = 500
rotation = False
center_x = 300
center_y = 200
width = 100
height = 200
plt.figure()
plt.imshow(frame[int(center_y - height/2):int(center_y + height/2),
int(center_x - width/2):int(center_x + width/2)])
plt.figure()
tracker = SCCF(center_x, center_y, width, height, n, l)
target_2D, target_2D_norm = tracker.initialize(frame)
#tracker.set_filter(target_2D_norm)
tracker.train_filter(target_2D, target_2D_norm, num_training, rotation)
# +
# #%reload_ext autoreload
new_frame = frame.copy()
plt.figure()
plt.imshow(new_frame)
new_target_region = new_frame[int(center_y - height/2):int(center_y + height/2),
int(center_x - width/2):int(center_x + width/2)]
plt.figure()
plt.imshow(new_target_region)
tracker.test(new_frame)
# +
new_frame = np.roll(frame.copy(), -50, axis=1)
plt.figure()
plt.imshow(new_frame)
new_target_region = new_frame[int(center_y - height/2):int(center_y + height/2),
int(center_x - width/2):int(center_x + width/2)]
plt.figure()
plt.imshow(new_target_region)
tracker.test(new_frame)
|
Playground_SCCF.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # GEOG489 SP22 Midterm
# ---
# ### Question 1:
# Select a package that has a wrong definition/explanation.
#
# **a. `tqdm` is a comprehensive library for creating static, animated, and interactive visualizations in Python.**
#
# b. `Pandas` is a Python package providing fast, flexible, and expressive data structures designed to make working with “relational” or “labeled” data both easy and intuitive.
#
# c. `rasterio` reads and writes gridded raster datasets such as satellite imagery and terrain models with formats (e.g., GeoTIFF; *.tiff).
#
# d. `GeoPandas` is an open-source project to add support for geographic data to pandas objects.
#
# e. `NumPy` is a Python library that provides a multidimensional array object and supports scientific computing in Python.
#
# ---
# ---
# ### Question 2:
# Select the wrong information about Python from the following.
#
# **a. It requires a commercial license, so it often costs a moderate amount of money.**
#
# b. It is an intuitive language, so it is pretty easy to learn.
#
# c. It is a high-level language, so it can do a lot of tasks with a little amount of code.
#
# d. It is multiplatform so that it can run on various operating systems such as Mac, Windows, or Linux.
#
# e. It has robust support among object-oriented programming languages.
#
# ---
# ---
# ### Question 3:
# Suppose that you have a shapefile `county.shp` in the same directory with your Jupyter Notebook. Which GeoPandas function should you use to load it on your Jupyter notebook?
#
# ```python
# import geopandas as gpd
# ```
# **a. gpd.read_file('county.shp')**
#
# b. gpd.read_csv('county.shp')
#
# c. gpd.load_file('county.shp')
#
# d. gpd.read_json('county.shp')
#
# e. gpd.read_excel('county.shp')
#
# ---
# ---
# ### Question 4:
# Suppose you want to loop through the contents of a Pandas DataFrame `df` by its rows. Which of the following code should you use?
#
# a. df.iteritems()
#
# **b. df.iterrows()**
#
# c. df.loopitems()
#
# d. df.looprows()
#
# e. df.shape()
#
# ---
# ---
# ### Question 5:
# What is the data type of the following variable?
# ```python
# var = '<NAME>'
# ```
# a. Boolean
#
# b. Dictionary
#
# c. Integer
#
# **d. String**
#
# e. List
#
# ---
# ---
#
# ### Question 6:
# What is the data type of the following variable?
# ```python
# var = {'Key1': 'Variable1', 'Key2': 'Variable2'}
# ```
# a. List
#
# b. Tuple
#
# **c. Dictionary**
#
# d. Set
#
# e. String
#
# ---
# ### Question 7:
# What will be the data type of the result (i.e., type(result))?
# ```python
# num1 = 4
# num2 = 0.5
# num3 = 8
# result = num1 * num2 + num3
# ```
# a. Boolean
#
# b. Tuple
#
# c. Integer
#
# d. String
#
# **e. Float**
# ---
#
# ### Question 8:
# What will be the output of the following code?
# ```python
# print(bool(3))
# ```
# a. 'False'
#
# b. 0
#
# c. 'True'
#
# d. False
#
# **e. True**
#
# ---
# ---
# ### Question 9:
# What will be the result of the following code?
# ```python
# True == 'True'
# ```
# **a. False**
#
# b. No
#
# c. Yes
#
# d. True
#
# e. 0
#
# f. 1
#
# ---
# ---
#
# ### Question 10:
#
# Connect the proper geometry type with the classes in the `shapely` package.
#
# shapely.geometry.MultiPolygon() --- ***Collection of Polygons*** <br>
#
# shapely.geometry.Polygon() --- ***Polygon*** <br>
#
# shapely.geometry.MultiPoint() --- ***Collection of Points*** <br>
#
# shapely.geometry.MultiLineString() --- ***Collection of Lines*** <br>
#
# shapely.geometry.LineString() --- ***Line*** <br>
#
# shapely.geometry.Point() --- ***Point*** <br>
#
# ---
# ---
# ### Question 11:
# What will be the result of print(foo[-1]) based on the list `foo` below?
# ```python
# foo = [1., 2., 3, 'four', 'five', [6., 7., 8], 'nine']
# print(foo[-1])
# ```
# ---
foo = [1., 2., 3, 'four', 'five', [6., 7., 8], 'nine']
print(foo[-1])
# ---
# ### Question 12:
# Based on the list `bar` below, what will be the result of `print(bar[::2])`?
#
# ```python
# bar = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
# print(bar[::2])
# ```
# ---
bar = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
print(bar[::2])
# ---
# ### Question 13:
# Based on the dictionary `dict_car` below, how can you slice `dict_car` to obtain the value “Toyota”?
# ```python
# dict_car = {
# "brand" : "Toyota",
# "model" : "Sienna",
# "year" : 2022
# }
# ```
# ---
# +
dict_car = {
"brand" : "Toyota",
"model" : "Sienna",
"year" : 2022
}
print(dict_car["brand"])
# -
# ---
# ### Question 14:
# Suppose you have a Pandas DataFrame `df`, and you obtain the result below. What does it mean?
# ```python
# print(df.shape)
# >>> (10, 5)
# ```
# ---
# +
import pandas as pd
df = pd.DataFrame(index=[i for i in range(10)], columns=[f'col_{j}' for j in range(5)])
print(df.shape)
df
# -
# ---
# ### Question 15:
# What will be the output of the following print function?
# ```python
# name = '<NAME>'
# course = 'GEOG489'
# print(f'This is {name}. I enjoy teaching {course}.')
# ```
# ---
name = '<NAME>'
course = 'GEOG489'
print(f'This is {name}. I enjoy teaching {course}.')
# ---
# ### Question 16:
# Suppose you are converting a Pandas DataFrame to a GeoPandas GeoDataFrame. Fill in the missing information for the `geometry` attribute based on the provided DataFrame `capitals` below. Copy and Paste the entire code in the cell and fill in your answer at [YOUR ANSWER HERE].
# ```python
# # Your answer here
# capitals_gdf = gpd.GeoDataFrame(capitals, geometry= [YOUR ANSWER HERE])
# ```
# ---
capitals = pd.DataFrame({'City': ['Buenos Aires', 'Brasilia', 'Santiago', 'Bogota', 'Caracas'],
'Country': ['Argentina', 'Brazil', 'Chile', 'Colombia', 'Venezuela'],
'Latitude': [-34.58, -15.78, -33.45, 4.60, 10.48],
'Longitude': [-58.66, -47.91, -70.66, -74.08, -66.86]}
)
capitals
# +
import geopandas as gpd
capitals_gdf = gpd.GeoDataFrame(capitals, geometry=gpd.points_from_xy(capitals.Longitude, capitals.Latitude))
capitals_gdf
# -
# ---
# ### Question 17:
# Suppose you are making a figure with 6 maps (i.e., 2 columns and 3 rows). Complete the following code of making multiple Axes with a Figure. Copy and Paste the entire code in the cell and fill in your answer at [YOUR ANSWER HERE].
#
# ```python
# import matplotlib.pyplot as plt
#
# # Your answer here
# fig, axes = plt.subplots(nrows=[YOUR ANSWER HERE], ncols=[YOUR ANSWER HERE])
# plt.show()
# ```
# ---
# +
import matplotlib.pyplot as plt
# Your answer here
fig, axes = plt.subplots(nrows=3, ncols=2)
plt.show()
# -
# ---
# ### Question 18:
# Suppose you conduct a spatial join between two GeoDataFrame (`fire` with points and `states` with polygon) and count the number of fires in each state.Number the following five tasks in a reasonable manner.
#
# ---
# Cell: Load data
states = gpd.read_file('./midterm_data/states.json')
fire = gpd.read_file('./midterm_data/fires_usgs.shp')
# Cell: Match Coordinate Reference System (CRS)
fire = fire.to_crs(epsg=4326)
states = states.to_crs(epsg=4326)
# Cell: Conduct spatial join
sjoin_result = gpd.sjoin(states, fire)
# Cell: Calculate fire counts per state
counts_per_state = sjoin_result.groupby('name').size()
counts_per_state = counts_per_state.to_frame(name='fire_count')
# Cell: Merge fire counts to the original `state` GeoDataFrame
states = states.merge(counts_per_state, left_on='name', right_on='name')
# ---
# ### Question 19:
# Suppose you want to visualize a Choropleth map showing the number of fires in each state with the GeoDataFrame `states` below. Modify the following code to satisfy all the requirements provided below.
#
# * Requirements
# * Color map: Reds
# * Show Legend
# * Classification Scheme: FisherJenks
# * Number of Classes: 5
# <br><br>
# * Code Skeleton
# ```python
# # Your answer here
# states.plot(column=[YOUR ANSWER HERE],
# cmap=[YOUR ANSWER HERE],
# legend=[YOUR ANSWER HERE],
# scheme=[YOUR ANSWER HERE],
# k=[YOUR ANSWER HERE]
# )
# ```
# ---
states.head()
states.plot(column='fire_count',
cmap='Reds',
legend=True,
scheme='FisherJenks',
k=5
)
# ---
# ### Question 20:
# Suppose you have a GeoDataFrame like the one below.
#
# <img src="./midterm_data/q20.jpg" width=500 />
#
#
# 1) With what kind of code can you set the index of the GeoDataFrame with the column name `postal`?
#
# 2) What is the purpose of changing the index of a GeoDataFrame/DataFrame?
#
# ---
#
# **Answer**:
# 1) states = states.set_index('postal') <br>
# 2) Setting up an index often uses meaningful strings or numbers instead of random numbers so that it helps to locate a specific row in the future analysis.
states = states[['name', 'postal', 'region', 'fire_count', 'geometry']]
states.head()
states = states.set_index('postal')
states.head()
# ---
# ### Question 21:
# Describe the difference between Pandas DataFrame and GeoPandas GeoDataFrame in the context of their `.plot()` function.
#
# **Answer**: <br>
# The main difference between the two types of dataframes is the existence of `geometry` column. The column helps GeoDataFrame to create a map when it uses `.plot()` function, whereas DataFrame only creates numerical graphs since it does not have that column.
#
# ---
capitals.plot()
capitals_gdf.plot()
# ---
#
# ### Question 22:
# Interpret the following list comprehension.
#
# ```python
# foo = range(10)
# print(sum([val for val in foo if val%3 == 0]))
# ```
# 1) Write the expected result (i.e., number)
#
# 2) Explain why the result will be obtained.
#
# ---
foo = range(10)
print(sum([val for val in foo if val%3 == 0]))
# **Answer 22.2**:
# 1) List comprehension iterates every value in `foo`, which is 0, 1, 2, 3, 4, 5, 6, 7, 8, 9.<br>
# 2) Among them the `if statement` selects only values that doesn't have remainder when the number is divided by 3, then it provides 0, 3, 6, 9. <br>
# 3) `sum()` function add 0, 3, 6, 9, which makes 18.
# ---
# ### Question 23:
#
# Suppose a variable `df` is a Pandas DataFrame. Describe the difference between `df.iloc[]` and `df.loc[]`.
#
# ---
# **Answer**:
# * df.iloc[] is primarily integer position based (from 0 to length-1 of the axis), but may also be used with a boolean array.
# * df.loc[] is primarily label based, but may also be used with a boolean array.
#
#
states.head()
states.iloc[2, 0]
states.loc['AZ', 'name']
# ---
# ### Question 24:
#
# Interpret the following code. Replace the text [YOUR ANSWER HERE] with your own interpretation and write up the purpose of each chunk.
# Suppose the variable `hc` (i.e., healthcare resources) is a GeoPandas GeoDataFrame having point geometry, and the variable `cbg` (i.e., census block groups) is a GeoPandas GeoDataFrame having polygon geometry, as shown below.
# Copy and Paste the entire code in the cell and fill in your answer at [YOUR ANSWER HERE].
#
# ```python
# dist = 8000
#
# # [YOUR ANSWER HERE]
# hc_buffer = hc.geometry.buffer(dist)
#
# # [YOUR ANSWER HERE]
# within_cbg = []
# for idx, buf in hc_buffer.iteritems():
# temp_gdf = cbg.loc[cbg.geometry.within(buf)]
# within_cbg.extend(temp_gdf['GEOID'].to_list())
#
# # [YOUR ANSWER HERE]
# y_buffer = cbg.loc[cbg['GEOID'].isin(set(within_cbg))]
# n_buffer = cbg.loc[~cbg['GEOID'].isin(set(within_cbg))]
# ```
# ---
# +
# Import data
hc = gpd.read_file('./midterm_data/healthcare.shp') # Healthcare resources
cbg = gpd.read_file('./midterm_data/census_block_group.shp') # Census block groups
# Change to a local CRS
hc = hc.to_crs(epsg=26971)
cbg = cbg.to_crs(epsg=26971)
# +
dist = 8000
# Creating buffer
hc_buffer = hc.geometry.buffer(dist)
hc_buffer.plot()
# +
# Collect GEOIDs of accessible Census Block Groups
within_cbg = []
for idx, buf in hc_buffer.iteritems():
temp_gdf = cbg.loc[cbg.geometry.within(buf)]
within_cbg.extend(temp_gdf['GEOID'].to_list())
print(set(within_cbg))
# +
# Slice the original Census Block Group GeoDataFrame to sort cbg based on they are accessible or not.
y_buffer = cbg.loc[cbg['GEOID'].isin(set(within_cbg))]
n_buffer = cbg.loc[~cbg['GEOID'].isin(set(within_cbg))]
y_buffer
# +
# Plot results
import matplotlib.pyplot as plt
fig, ax = plt.subplots(figsize=(7, 10))
hc_buffer.boundary.plot(ax=ax, color='blue', lw=0.5, zorder=2)
y_buffer.plot(ax=ax, color='#ef8a62', zorder=1)
n_buffer.plot(ax=ax, color='#bababa', zorder=1)
hc.plot(ax=ax, markersize=50, color='blue', zorder=2)
cbg.boundary.plot(ax=ax, linestyle='dotted', lw=0.5, color='black', zorder=1)
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()
# -
# ---
# ### Question 25:
# Suppose you have four bands (2, 3, 4, 5) of Landsat8 Satellite image with the name below.
# * The file name of the blue band: Landsat8_band2.TIF
# * The file name of the green band: Landsat8_band3.TIF
# * The file name of the red band: Landsat8_band4.TIF
# * The file name of the near infra-red (NIR) band: Landsat8_band5.TIF
#
# Complete the following code so that the variable `arr_bands` is ready to be fed to the `earthpy.plot.plot_rgb()` function for displaying True and False color images.
# Copy and Paste the entire code in the cell and fill in your answer at [YOUR ANSWER HERE].
#
# ```python
# import rasterio as rio
#
# l_bands = [] # Empty list for storing Numpy array
#
# for i in [2, 3, 4, 5]: # Blue, Green, Red, Near-infrared Bands
# temp_path = [YOUR ANSWER HERE] # define image path; utilize f-string approach for changing file name
# temp_band = [YOUR ANSWER HERE] # Import image into rasterio package
# l_bands.append(temp_band.read(1)) # Convert image to Numpy array and append it to the empty list
#
# arr_bands = [YOUR ANSWER HERE] # Now convert the list to a 3D array.
# ```
# ---
# +
import rasterio as rio
import numpy as np
l_bands = [] # Empty list for storing Numpy array
for i in [2, 3, 4, 5]: # Blue, Green, Red, Near-infrared Bands
temp_path = f'./midterm_data/Landsat8_band{i}.TIF' # define image path; utilize f-string approach for changing file name
temp_band = rio.open(temp_path) # Import image into rasterio package
l_bands.append(temp_band.read(1)) # Convert image to Numpy array and append it to the empty list
arr_bands = np.stack(l_bands) # Now convert the list to a 3D array.
# -
import earthpy.plot as ep
ep.plot_rgb(arr_bands,
rgb=(2,1,0), # Indices of the three bands to be plotted.
str_clip=0.5, # (Optional) The percentage of clip to apply to the stretch.
figsize =(10,10), # The x and y integer dimensions of the output plot.
stretch=True # (Optional) If set to True, a linear stretch will be applied.
)
plt.show()
|
Exam/GEOG489 SP22 Midterm.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
def forced_choice_permutation_accuracy(data1, data2, model, n_permutations=10000, metric='correlation'):
'''Compute one-tailed p-values for forced choice accuracy using permutation test
Args:
data1: (Brain_Data) Brain_Data instance of Condition1(must be same subject order across conditions)
data2: (Brain_Data) Brain_Data instance of Condition2 (must be same subject order across conditions)
model: (Brain_Data) Brain_Data instance of model to test on each condition
n_permutations: (int) Number of permutations to run, default 10,000
Returns:
dict: Dictionary with accuracy, one-tailed p-value, and null distribution
'''
if len(data1) != len(data1):
raise ValueError('Conditions have a different number of subjects.')
pexp = pd.DataFrame({'Condition1':data1.similarity(model, method=metric),
'Condition2':data2.similarity(model, method=metric)})
accuracy = np.mean(pexp['Condition1'] > pexp['Condition2'])
null = []
for iteration in range(n_permutations):
null_iteration = []
for i in range(len(pexp)):
choice = np.random.choice([0,1])
null_iteration.append(pexp.iloc[i, choice] > pexp.iloc[i, 1-choice])
null.append(np.mean(null_iteration))
p = 1 - np.mean(accuracy > null)
return {'accuracy':accuracy, 'p':p, 'null':null, 'pattern_similarity':pexp}
|
Codes/fMRI_analyses/Forced_choice_permutation_accuracy.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda_tensorflow2_p36
# language: python
# name: conda_tensorflow2_p36
# ---
# # Cifar10: TF Custom training: walkthrough
#
#
# # 0. 환경 셋업
# +
# # ! pip install tensorflow-gpu==2.4.1
# +
import tensorflow as tf
import numpy as np
import os
from tensorflow.keras.layers import Dense, Flatten, Conv2D
from tensorflow.keras import Model
print(tf.__version__)
# -
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
# 텐서플로가 첫 번째 GPU만 사용하도록 제한
try:
tf.config.experimental.set_visible_devices(gpus[7], 'GPU')
except RuntimeError as e:
# 프로그램 시작시에 접근 가능한 장치가 설정되어야만 합니다
print(e)
# # 1. 데이터 준비
# +
HEIGHT = 32
WIDTH = 32
DEPTH = 3
NUM_CLASSES = 10
NUM_DATA_BATCHES = 5
NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = 10000 * NUM_DATA_BATCHES
INPUT_TENSOR_NAME = 'inputs_input' # needs to match the name of the first layer + "_input"
def get_filenames(channel_name, channel):
if channel_name in ['train', 'validation', 'eval']:
return [os.path.join(channel, channel_name + '.tfrecords')]
else:
raise ValueError('Invalid data subset "%s"' % channel_name)
def _input(epochs, batch_size, channel, channel_name):
print(f"\nChannel Name: {channel_name}\n")
filenames = get_filenames(channel_name, channel)
dataset = tf.data.TFRecordDataset(filenames)
#dataset = dataset.interleave(tf.data.TFRecordDataset, cycle_length=3)
ds_size = sum(1 for _ in dataset)
# print("# of batches loading TFRecord : {0}".format(tf.data.experimental.cardinality(dataset).numpy()))
print("# of batches loading TFRecord : {0}".format(ds_size))
# Parse records.
dataset = dataset.map(_dataset_parser, num_parallel_calls=10)
dataset = dataset.repeat(1)
# Potentially shuffle records.
if channel_name == 'train':
# Ensure that the capacity is sufficiently large to provide good random
# shuffling.
# buffer_size = int(NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN * 0.4) + 3 * batch_size
buffer_size = ds_size
dataset = dataset.shuffle(buffer_size=buffer_size)
print("buffer_size: ", buffer_size)
# Batch it up.
dataset = dataset.batch(batch_size, drop_remainder=True)
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
return dataset
def _train_preprocess_fn(image):
"""Preprocess a single training image of layout [height, width, depth]."""
# Resize the image to add four extra pixels on each side.
image = tf.image.resize_with_crop_or_pad(image, HEIGHT + 8, WIDTH + 8)
# Randomly crop a [HEIGHT, WIDTH] section of the image.
image = tf.image.random_crop(image, [HEIGHT, WIDTH, DEPTH])
# Randomly flip the image horizontally.
image = tf.image.random_flip_left_right(image)
return image
def _dataset_parser(value):
"""Parse a CIFAR-10 record from value."""
featdef = {
'image': tf.io.FixedLenFeature([], tf.string),
'label': tf.io.FixedLenFeature([], tf.int64),
}
example = tf.io.parse_single_example(value, featdef)
image = tf.io.decode_raw(example['image'], tf.uint8)
image.set_shape([DEPTH * HEIGHT * WIDTH])
# Reshape from [depth * height * width] to [depth, height, width].
image = tf.cast(
tf.transpose(tf.reshape(image, [DEPTH, HEIGHT, WIDTH]), [1, 2, 0]),
tf.float32)
label = tf.cast(example['label'], tf.int32)
image = _train_preprocess_fn(image)
return image, label
# return image, tf.one_hot(label, NUM_CLASSES)
def save_model(model, output):
tf.saved_model.save(model, output+'/1/')
logging.info("Model successfully saved at: {}".format(output))
return
# -
train_dir = '../../data/cifar10/train'
validation_dir = '../../data/cifar10/validation'
eval_dir = '../../data/cifar10/eval'
# +
train_dataset = _input(5, 8, train_dir, 'train')
train_batch_size = sum(1 for _ in train_dataset)
print("# of batches in train: ", train_batch_size)
train_dataset2 = _input(5, 8, train_dir, 'train')
train_batch_size = sum(1 for _ in train_dataset2)
print("# of batches in train: ", train_batch_size)
train_dataset3 = _input(5, 8, train_dir, 'train')
train_batch_size = sum(1 for _ in train_dataset3)
print("# of batches in train: ", train_batch_size)
# +
batch_num = 1
for images, labels in train_dataset.take(batch_num):
labels = labels.numpy()
print(labels)
# print(labels.numpy().mean())
break
batch_num = 1
for images, labels in train_dataset2.take(batch_num):
labels = labels.numpy()
print(labels)
# print(labels.numpy().mean())
break
batch_num = 1
for images, labels in train_dataset3.take(batch_num):
labels = labels.numpy()
print(labels)
# print(labels.numpy().mean())
break
# +
train_dataset = _input(5, 256, train_dir, 'train')
train_batch_size = sum(1 for _ in train_dataset)
print("# of batches in train: ", train_batch_size)
validation_dataset = _input(5, 10000, validation_dir, 'validation')
validation_batch_size = sum(1 for _ in validation_dataset)
print("# of batches in validation: ", validation_batch_size)
eval_dataset = _input(5, 10000, eval_dir, 'eval')
eval_batch_size = sum(1 for _ in eval_dataset)
print("# of batches in eval: ", eval_batch_size)
# -
# # 2. 모델 정의
model = tf.keras.Sequential(
[
tf.keras.layers.Conv2D(32, [3, 3], activation="relu"),
tf.keras.layers.Conv2D(64, [3, 3], activation="relu"),
tf.keras.layers.MaxPooling2D(pool_size=(2, 2)),
tf.keras.layers.Dropout(0.25),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation="relu"),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(10, activation="softmax"),
]
)
# # 3. 모델 생성
# ## Gradient 생성 함수 정의
# +
loss_object = tf.keras.losses.SparseCategoricalCrossentropy()
optimizer = tf.keras.optimizers.Adam()
@tf.function
def train_step(images, labels):
with tf.GradientTape() as tape:
predictions = model(images)
loss = loss_object(labels, predictions)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
return loss
# +
test_loss = tf.keras.metrics.Mean(name='test_loss')
test_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='test_accuracy')
#test_accuracy = tf.keras.metrics.CategoricalCrossentropy(name='test_accuracy')
@tf.function
def test_step(images, labels):
predictions = model(images)
t_loss = loss_object(labels, predictions)
print("t_loss: ", t_loss)
test_loss(t_loss)
test_accuracy(labels, predictions)
# +
@tf.function
def test_step2(test_loss,test_accuracy, images, labels):
predictions = model(images)
t_loss = loss_object(labels, predictions)
print("t_loss: ", t_loss)
test_loss(t_loss)
test_accuracy(labels, predictions)
# -
# # 4. 모델 훈련
# +
print_interval = 200
EPOCHS = 2
for epoch in range(EPOCHS):
for batch, (images, labels) in enumerate(train_dataset):
loss_value = train_step(images, labels)
if batch % print_interval == 0:
print("Step #%d\tLoss: %.6f" % (batch, loss_value))
# Reset the metrics at the start of the next epoch
test_loss.reset_states()
test_accuracy.reset_states()
for test_images, test_labels in validation_dataset:
# test_step(test_images, test_labels)
test_step2(test_loss, test_accuracy, test_images, test_labels)
template = 'Epoch {}, Test Loss: {}, Test Accuracy: {}'
print(template.format(epoch+1,
test_loss.result(),
test_accuracy.result()*100))
print('Training Finished.')
# -
# # 5. 추론
# +
# Reset the metrics at the start of the next epoch
test_loss.reset_states()
test_accuracy.reset_states()
n_batch = 10
#for batch_id, (test_images, test_labels) in enumerate(validation_dataset.take(n_batch)):
for batch_id, (test_images, test_labels) in enumerate(eval_dataset):
print(batch_id)
test_step(test_images, test_labels)
print(
#ㅁ f'Epoch {epoch + 1}, '
f'Test Loss: {test_loss.result()}, '
f'Test Accuracy: {test_accuracy.result() * 100}'
)
break
# -
|
code/phase0/working/TF-WalkThrough/Z.REF-CIFAR10-Training.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:root]
# language: python
# name: conda-root-py
# ---
# # Nystrom Tica aka Spectral-OASIS tutorial
# Nystrom Tica is based on the Nyström matrix operation theory, which can approximately reconstruct the time-lagged covariance matrix of all input features while using only a subset of features as input. Given an initial input feature set, Spectral-oASIS samples a subset of these features that best reconstructs the leading eigenfunctions of the time lagged covariance matrix obtained from MD simulations
#
# The end result of Nystrom tica is the same as Sparse Tica but nystrom tica uses different math to get there
#
# Nystrom tica is implemented in PyEmma but it is missing from the documentation
#
# see origonal paper here:
#
# <NAME>.; <NAME>.; <NAME>.; <NAME>.; <NAME>.; <NAME>.; <NAME>.; <NAME>
# Rapid Calculation of Molecular Kinetics Using Compressed Sensing
# J. Chem. Theory Comput. 2018, 14 (5), 2771−2783
# https://pubs.acs.org/doi/10.1021/acs.jctc.8b00089?ref=PDF
#
# Xuhui Huang spoke very highly of Nystrom tica. See how his lab uses it in their MSM workflow here:
#
# <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>
# Markov State Models to Study the Functional Dynamics of Proteins in the Wake of Machine Learning
# JACS Au 2021
# https://doi.org/10.1021/jacsau.1c00254
# For this tutorial I will use the D.E Shaw main protease trajectory as a test system. All pairwize alpha carbon distances would be ~ 48,000 distances. Far to many to fit into memory.
#
# I will first load a set of all phi and psi dihedrals (1220 total) and see if I can reduce the number of distances while still keeping a high implied timescale
#
# Second I will see how many distances I can through at Nystrom tica before it crashes due to memory proplems
#
# For more details on the test system see:
# https://github.com/jgpattis/Desres-sars-cov-2-apo-mpro
import pyemma.coordinates as coor
import numpy as np
import pandas as pd
import time
import matplotlib.pyplot as plt
# %matplotlib inline
# +
# load distance data I had already calculated and saved
data_path = '/media/tun37047/HD/tun37047/covid19/desres-protease/monomer_global_apo_msm'
data_a = coor.load(f'{data_path}/feature_data_02/backbone_chain_0.h5')
data_b = coor.load(f'{data_path}/feature_data_02/backbone_chain_1.h5')
data = data_a + data_b
# -
# The benifit of Nystrom tica is you let tica do the hard work of deciding which features are important
#
# I would recomend more general featurizations as input. Depending on your system size things like all pairwize heavy atom distances, all pairwize alpha carbon and beta carbon distances, or all all pairwize alpha carbon distances
#
# for examples of these featurizations see:
# https://github.com/vvoelz/msm-best-practices/wiki/03-Featurization
# or the PyEmma tutorial
# Here I am workin with 200 trajectories
len(data)
# Each trajectory is 1 microsecond long (1000 frames) with
# 1 ns between frames and 1220 features(sin and cos products of backbone phi and psi dihedral angles)
data[0].shape
# Nystrom tica is not in the PyEmma documentation
# so to look at the inputs and outputs look at the PyEmma github:
# https://github.com/markovmodel/PyEMMA/blob/devel/pyemma/coordinates/transform/nystroem_tica.py#L51
#
# or alternativly print the doc string
# The docstring is long so use prnt to format it nice
print(coor.tica_nystroem.__doc__)
# These options are very similar to regular tica except:
#
# max_columns: How many features to use. I would recomend making an informed decision by plotting different options vs implied timescale (shown below)
#
# nsel: How many columns to scan through at once. The origonal paper uses 20. The lower nsel is the better the tica approximation should be but the longer the calculation takes. As seen below larger numbers here work well
start = time.time()
Ntica = coor.tica_nystroem(max_columns=200, data=data, lag=15, nsel=10)
end = time.time()
print(f'This took {np.round(end-start)} seconds')
start = time.time()
Ntica_20 = coor.tica_nystroem(max_columns=200, data=data, lag=15, nsel=20)
end = time.time()
print(f'This took {np.round(end-start)} seconds')
start = time.time()
Ntica_30 = coor.tica_nystroem(max_columns=200, data=data, lag=15, nsel=30)
end = time.time()
print(f'This took {np.round(end-start)} seconds')
fig, ax = plt.subplots()
nsel_list = [10, 20, 30]
tica_trials = [Ntica, Ntica_20, Ntica_30]
for i in range(5): # lets look at top 5 timescales
temp = [k.timescales[i] for k in tica_trials]
ax.plot(nsel_list, temp, label=f'ts {i + 1}')
ax.set_xlabel('nsel option', fontsize=16)
ax.set_ylabel('Timescale (ns)', fontsize=16)
# in the above, implied timescales go up with increasing nsel, which is the opposite of what I was expecting, and I believe the opposite of what it should be.
#
# I believe this means that would could keep increasing nsel for a nice speedup of the calculation without loss of accuracy
#
# I will learn later that larger values of max_columns also slows down the calculation
# Larger numbers of nsel affects the resolution of max_columns
print(f'Features in Ntica, Ntica_20, Ntica_30: {len(Ntica.column_indices)} {len(Ntica_20.column_indices)} {len(Ntica_30.column_indices)}')
# Ok now lets look at the tica components
Ntica_30.get_output()
# So Nystrom tica is refusing to give me the tica components it calculated. I tried lots of different options and was unable to figure out what was going wrong. This is really bad but I will show a work around later
#
# Next lets make a plot of implied timescales vs max_columns to make an informed decision on how sparce we can go
# +
# I would recommend exponetial scaling for column list
column_list = [10, 25, 50, 75, 100, 150, 200, 250, 300, 400, 500, 700, 900]
out_list = []
true_column_list = []
start = time.time()
for i in column_list:
temp_ntica = coor.tica_nystroem(max_columns=i, data=data, lag=15, nsel=30)
out_list.append(temp_ntica.timescales)
true_column_list.append(len(temp_ntica.column_indices))
end = time.time()
print(f'This took {np.round(end-start)} seconds')
# -
reg_tica = coor.tica(data, lag=15)
out_list.append(reg_tica.timescales)
column_list.append(1220) # full number of features
true_column_list.append(1220)
# The true number of columns is not what I input for max_columns
# instead it is around max_columns
true_column_list
fig, ax = plt.subplots()
for i in range(5): # plot the top i number of timescales
temp = [out_list[k][i] for k in range(len(out_list))]
ax.plot(true_column_list, temp, label=f'ts {i +1}')
ax.legend()
ax.set_xlabel('Cloumns (features) in tica output', fontsize=16)
ax.set_ylabel('Timescale (ns)', fontsize=16)
# In the apove plot implied timescales increase with increasing numbers of features. From this we can make a decison on how much 'information' we are willing to loose to make tica simpliar and easier to interpret. These two things area tradeoff one for the other.
#
# In this plot I personally see two good choices:
#
# 721 features: (aka the free lunch) at 721 features there is a very tiny decrease in timescales from regular (full) tica. With this option we would reduce the number of features by 43% while loosing very little kinetic information (we still describe the motions in the main protease very well). This is what the theory paper suggests a optimal, that you look for the asymptote of the curve
#
# 421 features: (aka the compromize) at 421 features there is a signifigent decrease in timescae of the first three tICs, but it's not quite as dramatic as lower than 400 features. This is a happy medium where we are not describing the motions quite as well as we could be but that allows for a 67% reduction in features
#
# Lower that 421 features in my opinion is a bad option as too much kenitic information is lost
#
# See the two publications I cite at the top of this tutorial for more examples
#
# Your results may vary, but this plot helps you make an informed decision
# ok now lets figure out how to actually get tica components
# run nystrom tica with the optimum output of 700 features
start = time.time()
Ntica_700 = coor.tica_nystroem(max_columns=700, data=data, lag=15, nsel=30)
end = time.time()
print(f'This took {np.round(end-start)} seconds')
print(f'This took {np.round(end-start)} seconds')
# notice that moving from a max_columns of 200 to 700 slowed down the calculation from 22 seconds to 82 seconds
# this object contains a column_indices attribute which gives
# the index of the columns retained
Ntica_700.column_indices.shape
# interesting that it isn't exactly 700??
# Maybe I should be plotting against column_indices
# but anyway, lets now filter my origonal dataset
Ntica_700.column_indices
# go through your list of trajectories and filter each one by our new set of features
filtered_data = [i[:,Ntica_700.column_indices] for i in data]
data[0].shape
# the first dimentions is frames
# the second dimention is features
# check to make sure you filtered along the correct dimention
filtered_data[0].shape
# +
# I have not loaded my PyEmma featurizer object at the biginning because I did this earlier and saved it
# but you could filter your feature discription so you know what these features are
pdb = f'{data_path}/DESRES_protease_chainid.pdb'
''' Bachbone Phi and Psi torsion angles '''
featurizer = coor.featurizer(pdb)
featurizer.add_backbone_torsions(cossin=True, selstr=f'chainid == 0')
featurizer.describe()[:20]
# -
new_discriptions = [featurizer.describe()[i] for i in Ntica_700.column_indices]
# now we know which features were included
new_discriptions[:20]
# +
# now lets put the filtered dataset into regular tica
regular_tica_700 = coor.tica(filtered_data, lag=15, kinetic_map=False, commute_map=True, var_cutoff=0.85)
# -
tica_output = regular_tica_700.get_output()
# Success
# you can now plot this and cluster this
tica_output
# +
# let's make a nice plot because we can
from pyemma.plots import plot_density
cat_output = np.concatenate(tica_output)
fig, ax, cb = plot_density(cat_output[:,0], cat_output[:,1])
ax.set_xlabel('IC 1', fontsize=16)
ax.set_ylabel('IC 2', fontsize=16)
# -
# Everything done so far could also be done with sparse tica (avialable in MSMBuilder)
#
# One downside of Tica and Sparse Tica is that they require N_features^3 memory. Sparse Tica makes the full covariance matrix then kicks features out to find a sparse solution. Nystrom tica builds the covariance matrix a little bit at a time and should in theory be able to handle more features.
#
# Regular tica will crash on this computer (with 128 GB of RAM) at ~9000 features. Let's see how high Nystrom tica can go
# # Warning!! The next section may crash your computer, save anything important, and procede at your own risk ... or better yet just look at my results and don't do this
# +
# Let's use pairwize CA distances
traj_num = [f'{i:04d}' for i in range(100)]
traj_path = '/../DESRES-Trajectory_sarscov2-10880334-no-water-no-ion-glueCA/sarscov2-10880334-no-water-no-ion-glueCA/sarscov2-10880334-no-water-no-ion-glueCA-'
traj_list = [ data_path + traj_path + str(i) + '.dcd' for i in traj_num]
feat = coor.featurizer(pdb)
feat.add_distances(feat.pairs(feat.select('name == CA and chainid == 0'), excluded_neighbors=3))
traj = coor.load(traj_list, feat, stride=5)
traj_cat = np.concatenate(traj)
# -
# working with source also helps minimize RAM usage
# NOTE: I later find out that source does not imporve anyting
# since Nystrom tica needs all data in memory.
traj_source = coor.source(traj_list, feat, stride=5)
# 46,000 distances !!!
traj[0].shape
# Let's start with 15,000
data_15 = [i[:,:15000] for i in traj]
# Let's see what happens
start = time.time()
Ntica_big = coor.tica_nystroem(max_columns=700, data=data_15, lag=15, nsel=100)
end = time.time()
print(f'This took {np.round(end-start)} seconds')
print(f'This took {np.round((end-start)/60)} minutes')
# Let's go for broke
# use source instead of load to save memory
start = time.time()
Ntica__really_big = coor.tica_nystroem(max_columns=2000, data=traj_source, lag=15, nsel=400)
end = time.time()
print(f'This took {np.round(end-start)} seconds')
# Ok so the above ran for 24 hours without crashing so I think it will work I just didn't want to wain for it.
#
# Also note that PyEmma through a warning that Nystrom Tica needs to load all data into memory so using source didn't help save memeory
len(Ntica_big.column_indices)
# # Conclustion
# Nystrom tica was able to greatly reduce reduce the number of features in the Main Protease system while still describing the slow motions basically as well as regular tica
#
# making a plot of implied timescales vs the number of column indices (with len(Ntica.column_indices) helps you make an informed decision of how sparse you can go
#
# It was also shown that Nystrom tica can be fed extremly large numbers of features and still work
#
# larger numbers for nsel and smaller numbers for max_columns greatly speed up the calculation
#
# The fact that Nystrom tica has still not been added to the PyEmma manual and that the get_output() method is broken is very concerning. It is possible the PyEmma developers are not activly maintaining the code
|
Nystrom_tica_tutorial.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/Weizhuo-Zhang/DeepLearning.ai-Summary/blob/master/1_Machine_Learning_landscape.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="gE4lU6ZteRHD" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 411} outputId="4b0e9a85-042e-462d-90ec-1a70f7b1b0f6"
# !wget https://raw.githubusercontent.com/ageron/handson-ml/master/datasets/lifesat/oecd_bli_2015.csv
# !wget https://raw.githubusercontent.com/ageron/handson-ml/master/datasets/lifesat/gdp_per_capita.csv
# + id="-coY2t6Ggii-" colab_type="code" colab={}
def prepare_country_stats(oecd_bli, gdp_per_capita):
oecd_bli = oecd_bli[oecd_bli["INEQUALITY"]=="TOT"]
oecd_bli = oecd_bli.pivot(index="Country", columns="Indicator", values="Value")
gdp_per_capita.rename(columns={"2015": "GDP per capita"}, inplace=True)
gdp_per_capita.set_index("Country", inplace=True)
full_country_stats = pd.merge(left=oecd_bli, right=gdp_per_capica,
left_index=True, right_index=True)
full_country_stats.sort_values(by="GDP per capita", inplace=True)
remove_indices = [0,1,6,8,33,34,35]
keep_indices = list(set(range(36)) - set(remove_indices))
return full_country_stats[["GDP per capita", 'Life satisfaction']].iloc[keep_indices]
# + id="XWUlNwO5a23r" colab_type="code" colab={}
# load library
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import sklearn
# + id="S7ljl9olcLB5" colab_type="code" colab={}
# load dataset
oecd_bli = pd.read_csv("oecd_bli_2015.csv", thousands=',')
gdp_per_capica = pd.read_csv("gdp_per_capita.csv", thousands=',',delimiter='\t',
encoding='latin1',na_values='n/a')
# + id="ZwVuEKV_cVyP" colab_type="code" colab={}
# prepare dataset
country_stats = prepare_country_stats(oecd_bli, gdp_per_capica)
X = np.c_[country_stats["GDP per capita"]]
Y = np.c_[country_stats["Life satisfaction"]]
# + id="y1wzoP04gUFH" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 283} outputId="ad8c8931-02b6-466c-c342-3f8c69f0e142"
# Visulisation data
country_stats.plot(kind='scatter', x="GDP per capita", y="Life satisfaction")
plt.show()
# + id="jlQukSlNmNp4" colab_type="code" colab={}
|
1_Machine_Learning_landscape.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="SKOD9lvq5kdM"
# # PYTORCH CNN Classifier
#
# To run this notebook on an another benchmark, use
#
# ```
# papermill utils/torch_cnn_character.ipynb torch_cnn_character_experiments/[DATASET NAME].ipynb -p DATASET [DATASET NAME]
# ```
# + id="E8prt3AL5kdO" tags=["parameters"]
DATASET = 'no_dataset'
VERSION = 0
BATCH_SIZE = 32
EPOCHS = 5
# + colab={"base_uri": "https://localhost:8080/"} id="rohBItfR5kdP" outputId="7da7aafe-0858-48be-bf7d-f698784d88df"
print(DATASET, VERSION, BATCH_SIZE, EPOCHS)
# + [markdown] id="Y-YQrVyb5kdQ"
# ## Config
# + id="FIunuyGk5kdR"
import torch
from torch.utils.data import DataLoader
from torchtext.data.utils import get_tokenizer
from genomic_benchmarks.dataset_getters.pytorch_datasets import get_dataset
from genomic_benchmarks.models.torch import CNN
from genomic_benchmarks.dataset_getters.utils import coll_factory, LetterTokenizer, build_vocab, check_seq_lengths, check_config, VARIABLE_LENGTH_DATASETS
# + id="UgXiF6Zz5kdR"
USE_PADDING = DATASET in VARIABLE_LENGTH_DATASETS
config = {
"dataset": DATASET,
"dataset_version": VERSION,
"epochs": EPOCHS,
"batch_size": BATCH_SIZE,
"use_padding": USE_PADDING,
"force_download": False,
"run_on_gpu": True,
"number_of_classes": 2,
"embedding_dim": 100,
}
check_config(config)
# + [markdown] id="AUWF-avz5kdS"
# ## Choose the dataset
# + colab={"base_uri": "https://localhost:8080/"} id="1oaryDJr5kdS" outputId="6a425df9-4092-4601-9cd9-f61d524b6278"
train_dset = get_dataset(config["dataset"], 'train')
# + [markdown] id="B0lqmy935kdS"
# ## Tokenizer and vocab
# + colab={"base_uri": "https://localhost:8080/"} id="NY_zualp5kdT" outputId="9034d65f-2f9a-4c83-a314-13d3b808fc65"
tokenizer = get_tokenizer(LetterTokenizer())
vocabulary = build_vocab(train_dset, tokenizer, use_padding=config["use_padding"])
print("vocab len:" ,vocabulary.__len__())
print(vocabulary.get_stoi())
# + [markdown] id="3hGQpSJA5kdT"
# ## Dataloader and batch preparation
# + colab={"base_uri": "https://localhost:8080/"} id="gXUgPH6q5kdT" outputId="a1eb6ba2-9bcd-416d-8662-e3516121da96"
# Run on GPU or CPU
device = 'cuda' if config["run_on_gpu"] and torch.cuda.is_available() else 'cpu'
print('Using {} device'.format(device))
max_seq_len, nn_input_len = check_seq_lengths(dataset=train_dset, config=config)
# Data Loader
if(config["use_padding"]):
collate = coll_factory(vocabulary, tokenizer, device, pad_to_length = nn_input_len)
else:
collate = coll_factory(vocabulary, tokenizer, device, pad_to_length = None)
train_loader = DataLoader(train_dset, batch_size=config["batch_size"], shuffle=True, collate_fn=collate)
# + [markdown] id="sTTomnnP5kdU"
# ## Model
# + id="ZNuXGIdD5kdU"
model = CNN(
number_of_classes=config["number_of_classes"],
vocab_size=vocabulary.__len__(),
embedding_dim=config["embedding_dim"],
input_len=nn_input_len
).to(device)
# + [markdown] id="1pqIm92h5kdV"
# ## Training
# + colab={"base_uri": "https://localhost:8080/"} id="uvO_-RFm5kdV" outputId="7799e5f9-78a7-4ef2-95e8-f06751912eda"
model.train(train_loader, epochs=config["epochs"])
# + [markdown] id="bR2tamrc5kdV"
# ## Testing
# + id="MtBiFLNe7b5U"
def test(self, dataloader, positive_label = 1):
size = dataloader.dataset.__len__()
num_batches = len(dataloader)
test_loss, correct = 0, 0
tp, p, fp = 0, 0, 0
with torch.no_grad():
for X, y in dataloader:
pred = self(X)
test_loss += self.loss(pred, y).item()
correct += (torch.round(pred) == y).sum().item()
p += (y == positive_label).sum().item()
if(positive_label == 1):
tp += (y * pred).sum(dim=0).item()
fp += ((1 - y) * pred).sum(dim=0).item()
else:
tp += ((1 - y) * (1 - pred)).sum(dim=0).item()
fp += (y * (1 - pred)).sum(dim=0).item()
print("p ", p, "; tp ", tp, "; fp ", fp)
recall = tp / p
precision = tp / (tp + fp)
print("recall ", recall, "; precision ", precision)
f1_score = 2 * precision * recall / (precision + recall)
print("num_batches", num_batches)
print("correct", correct)
print("size", size)
test_loss /= num_batches
accuracy = correct / size
print(f"Test metrics: \n Accuracy: {accuracy:>6f}, F1 score: {f1_score:>6f}, Avg loss: {test_loss:>6f} \n")
return accuracy, f1_score
# + colab={"base_uri": "https://localhost:8080/"} id="wbnciyRK5kdW" outputId="d2592f9d-f074-465f-95f8-df3b759ae068"
test_dset = get_dataset(config["dataset"], 'test')
test_loader = DataLoader(test_dset, batch_size=config["batch_size"], shuffle=True, collate_fn=collate)
acc, f1 = test(model, test_loader)
acc, f1
|
experiments/tokenization_comparison/utils/torch_cnn_character.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (Data Science)
# language: python
# name: python3__SAGEMAKER_INTERNAL__arn:aws:sagemaker:eu-west-1:470317259841:image/datascience-1.0
# ---
# # Direct Marketing with Amazon SageMaker Autopilot
# ---
#
# ---
# ## Contents
#
# 1. [Introduction](#Introduction)
# 1. [Prerequisites](#Prerequisites)
# 1. [Downloading the dataset](#Downloading)
# 1. [Upload the dataset to Amazon S3](#Uploading)
# 1. [Setting up the SageMaker Autopilot Job](#Settingup)
# 1. [Launching the SageMaker Autopilot Job](#Launching)
# 1. [Tracking Sagemaker Autopilot Job Progress](#Tracking)
# 1. [Results](#Results)
# 1. [Cleanup](#Cleanup)
# ## Introduction
#
# Amazon SageMaker Autopilot is an automated machine learning (commonly referred to as AutoML) solution for tabular datasets. You can use SageMaker Autopilot in different ways: on autopilot (hence the name) or with human guidance, without code through SageMaker Studio, or using the AWS SDKs. This notebook, as a first glimpse, will use the AWS SDKs to simply create and deploy a machine learning model.
#
# A typical introductory task in machine learning (the "Hello World" equivalent) is one that uses a dataset to predict whether a customer will enroll for a term deposit at a bank, after one or more phone calls. For more information about the task and the dataset used, see [Bank Marketing Data Set](https://archive.ics.uci.edu/ml/datasets/bank+marketing).
#
# Direct marketing, through mail, email, phone, etc., is a common tactic to acquire customers. Because resources and a customer's attention are limited, the goal is to only target the subset of prospects who are likely to engage with a specific offer. Predicting those potential customers based on readily available information like demographics, past interactions, and environmental factors is a common machine learning problem. You can imagine that this task would readily translate to marketing lead prioritization in your own organization.
#
# This notebook demonstrates how you can use Autopilot on this dataset to get the most accurate ML pipeline through exploring a number of potential options, or "candidates". Each candidate generated by Autopilot consists of two steps. The first step performs automated feature engineering on the dataset and the second step trains and tunes an algorithm to produce a model. When you deploy this model, it follows similar steps. Feature engineering followed by inference, to decide whether the lead is worth pursuing or not. The notebook contains instructions on how to train the model as well as to deploy the model to perform batch predictions on a set of leads. Where it is possible, use the Amazon SageMaker Python SDK, a high level SDK, to simplify the way you interact with Amazon SageMaker.
#
# Other examples demonstrate how to customize models in various ways. For instance, models deployed to devices typically have memory constraints that need to be satisfied as well as accuracy. Other use cases have real-time deployment requirements and latency constraints. For now, keep it simple.
# ## Prerequisites
#
# Before you start the tasks in this tutorial, do the following:
#
# - The Amazon Simple Storage Service (Amazon S3) bucket and prefix that you want to use for training and model data. This should be within the same Region as Amazon SageMaker training. The code below will create, or if it exists, use, the default bucket.
# - The IAM role to give Autopilot access to your data. See the Amazon SageMaker documentation for more information on IAM roles: https://docs.aws.amazon.com/sagemaker/latest/dg/security-iam.html
# +
# cell 01
import sagemaker
import boto3
from sagemaker import get_execution_role
region = boto3.Session().region_name
session = sagemaker.Session()
bucket = session.default_bucket()
prefix = 'sagemaker/autopilot-dm'
role = get_execution_role()
sm = boto3.Session().client(service_name='sagemaker',region_name=region)
# -
# ## Downloading the dataset<a name="Downloading"></a>
# Download the [direct marketing dataset](!wget -N https://sagemaker-sample-data-us-west-2.s3-us-west-2.amazonaws.com/autopilot/direct_marketing/bank-additional.zip) from the sample data s3 bucket.
#
# \[Moro et al., 2014\] <NAME>, <NAME> and <NAME>. A Data-Driven Approach to Predict the Success of Bank Telemarketing. Decision Support Systems, Elsevier, 62:22-31, June 2014
# +
# cell 02
# !wget -N https://sagemaker-sample-data-us-west-2.s3-us-west-2.amazonaws.com/autopilot/direct_marketing/bank-additional.zip
# !conda install -y -c conda-forge unzip
# !unzip -o bank-additional.zip
local_data_path = './bank-additional/bank-additional-full.csv'
# + [markdown] toc-hr-collapsed=true
# ## Upload the dataset to Amazon S3<a name="Uploading"></a>
#
# Before you run Autopilot on the dataset, first perform a check of the dataset to make sure that it has no obvious errors. The Autopilot process can take long time, and it's generally a good practice to inspect the dataset before you start a job. This particular dataset is small, so you can inspect it in the notebook instance itself. If you have a larger dataset that will not fit in a notebook instance memory, inspect the dataset offline using a big data analytics tool like Apache Spark. [Deequ](https://github.com/awslabs/deequ) is a library built on top of Apache Spark that can be helpful for performing checks on large datasets. Autopilot is capable of handling datasets up to 5 GB.
#
# -
# Read the data into a Pandas data frame and take a look.
# +
# cell 03
import pandas as pd
data = pd.read_csv(local_data_path)
pd.set_option('display.max_columns', 500) # Make sure we can see all of the columns
pd.set_option('display.max_rows', 10) # Keep the output on one page
data
# -
# Note that there are 20 features to help predict the target column 'y'.
#
# Amazon SageMaker Autopilot takes care of preprocessing your data for you. You do not need to perform conventional data preprocssing techniques such as handling missing values, converting categorical features to numeric features, scaling data, and handling more complicated data types.
#
# Moreover, splitting the dataset into training and validation splits is not necessary. Autopilot takes care of this for you. You may, however, want to split out a test set. That's next, although you use it for batch inference at the end instead of testing the model.
#
# ### Reserve some data for calling batch inference on the model
#
# Divide the data into training and testing splits. The training split is used by SageMaker Autopilot. The testing split is reserved to perform inference using the suggested model.
#
# +
# cell 04
train_data = data.sample(frac=0.8,random_state=200)
test_data = data.drop(train_data.index)
test_data_no_target = test_data.drop(columns=['y'])
# -
# ### Upload the dataset to Amazon S3
# Copy the file to Amazon Simple Storage Service (Amazon S3) in a .csv format for Amazon SageMaker training to use.
# +
# cell 05
train_file = 'train_data.csv';
train_data.to_csv(train_file, index=False, header=True)
train_data_s3_path = session.upload_data(path=train_file, key_prefix=prefix + "/train")
print('Train data uploaded to: ' + train_data_s3_path)
test_file = 'test_data.csv';
test_data_no_target.to_csv(test_file, index=False, header=False)
test_data_s3_path = session.upload_data(path=test_file, key_prefix=prefix + "/test")
print('Test data uploaded to: ' + test_data_s3_path)
# -
# ## Setting up the SageMaker Autopilot Job<a name="Settingup"></a>
#
# After uploading the dataset to Amazon S3, you can invoke Autopilot to find the best ML pipeline to train a model on this dataset.
#
# The required inputs for invoking a Autopilot job are:
# * Amazon S3 location for input dataset and for all output artifacts
# * Name of the column of the dataset you want to predict (`y` in this case)
# * An IAM role
#
# Currently Autopilot supports only tabular datasets in CSV format. Either all files should have a header row, or the first file of the dataset, when sorted in alphabetical/lexical order, is expected to have a header row.
# +
# cell 06
input_data_config = [{
'DataSource': {
'S3DataSource': {
'S3DataType': 'S3Prefix',
'S3Uri': 's3://{}/{}/train'.format(bucket,prefix)
}
},
'TargetAttributeName': 'y'
}
]
output_data_config = {
'S3OutputPath': 's3://{}/{}/output'.format(bucket,prefix)
}
autoMLJobConfig={
'CompletionCriteria': {
'MaxCandidates': 5
}
}
autoMLJobObjective = {
"MetricName": "Accuracy"
}
# -
# You can also specify the type of problem you want to solve with your dataset (`Regression, MulticlassClassification, BinaryClassification`). In case you are not sure, SageMaker Autopilot will infer the problem type based on statistics of the target column (the column you want to predict).
#
# You have the option to limit the running time of a SageMaker Autopilot job by providing either the maximum number of pipeline evaluations or candidates (one pipeline evaluation is called a `Candidate` because it generates a candidate model) or providing the total time allocated for the overall Autopilot job. Under default settings, this job takes about four hours to run. This varies between runs because of the nature of the exploratory process Autopilot uses to find optimal training parameters.
# ## Launching the SageMaker Autopilot Job<a name="Launching"></a>
#
# You can now launch the Autopilot job by calling the `create_auto_ml_job` API. https://docs.aws.amazon.com/cli/latest/reference/sagemaker/create-auto-ml-job.html
# +
# cell 07
from time import gmtime, strftime, sleep
timestamp_suffix = strftime('%d-%H-%M-%S', gmtime())
auto_ml_job_name = 'automl-banking-' + timestamp_suffix
print('AutoMLJobName: ' + auto_ml_job_name)
sm.create_auto_ml_job(AutoMLJobName=auto_ml_job_name,
InputDataConfig=input_data_config,
OutputDataConfig=output_data_config,
AutoMLJobConfig=autoMLJobConfig,
AutoMLJobObjective=autoMLJobObjective,
ProblemType="BinaryClassification",
RoleArn=role)
# -
# ## Tracking SageMaker Autopilot job progress<a name="Tracking"></a>
# SageMaker Autopilot job consists of the following high-level steps :
# * Analyzing Data, where the dataset is analyzed and Autopilot comes up with a list of ML pipelines that should be tried out on the dataset. The dataset is also split into train and validation sets.
# * Feature Engineering, where Autopilot performs feature transformation on individual features of the dataset as well as at an aggregate level.
# * Model Tuning, where the top performing pipeline is selected along with the optimal hyperparameters for the training algorithm (the last stage of the pipeline).
# +
# cell 08
print ('JobStatus - Secondary Status')
print('------------------------------')
describe_response = sm.describe_auto_ml_job(AutoMLJobName=auto_ml_job_name)
print (describe_response['AutoMLJobStatus'] + " - " + describe_response['AutoMLJobSecondaryStatus'])
job_run_status = describe_response['AutoMLJobStatus']
while job_run_status not in ('Failed', 'Completed', 'Stopped'):
describe_response = sm.describe_auto_ml_job(AutoMLJobName=auto_ml_job_name)
job_run_status = describe_response['AutoMLJobStatus']
print (describe_response['AutoMLJobStatus'] + " - " + describe_response['AutoMLJobSecondaryStatus'])
sleep(30)
# + [markdown] toc-hr-collapsed=true
# ## Results
#
# Now use the describe_auto_ml_job API to look up the best candidate selected by the SageMaker Autopilot job.
# -
# cell 09
best_candidate = sm.describe_auto_ml_job(AutoMLJobName=auto_ml_job_name)['BestCandidate']
best_candidate_name = best_candidate['CandidateName']
print(best_candidate)
print('\n')
print("CandidateName: " + best_candidate_name)
print("FinalAutoMLJobObjectiveMetricName: " + best_candidate['FinalAutoMLJobObjectiveMetric']['MetricName'])
print("FinalAutoMLJobObjectiveMetricValue: " + str(best_candidate['FinalAutoMLJobObjectiveMetric']['Value']))
# + [markdown] toc-hr-collapsed=false
# ### Perform batch inference using the best candidate
#
# Now that you have successfully completed the SageMaker Autopilot job on the dataset, create a model from any of the candidates by using [Inference Pipelines](https://docs.aws.amazon.com/sagemaker/latest/dg/inference-pipelines.html).
# +
# cell 10
model_name = 'automl-banking-model-' + timestamp_suffix
model = sm.create_model(Containers=best_candidate['InferenceContainers'],
ModelName=model_name,
ExecutionRoleArn=role)
print('Model ARN corresponding to the best candidate is : {}'.format(model['ModelArn']))
# -
# You can use batch inference by using Amazon SageMaker batch transform. The same model can also be deployed to perform online inference using Amazon SageMaker hosting.
# +
# cell 11
transform_job_name = 'automl-banking-transform-' + timestamp_suffix
transform_input = {
'DataSource': {
'S3DataSource': {
'S3DataType': 'S3Prefix',
'S3Uri': test_data_s3_path
}
},
'ContentType': 'text/csv',
'CompressionType': 'None',
'SplitType': 'Line'
}
transform_output = {
'S3OutputPath': 's3://{}/{}/inference-results'.format(bucket,prefix),
}
transform_resources = {
'InstanceType': 'ml.m5.4xlarge',
'InstanceCount': 1
}
sm.create_transform_job(TransformJobName = transform_job_name,
ModelName = model_name,
TransformInput = transform_input,
TransformOutput = transform_output,
TransformResources = transform_resources
)
# -
# Watch the transform job for completion.
# +
# cell 12
print ('JobStatus')
print('----------')
describe_response = sm.describe_transform_job(TransformJobName = transform_job_name)
job_run_status = describe_response['TransformJobStatus']
print (job_run_status)
while job_run_status not in ('Failed', 'Completed', 'Stopped'):
describe_response = sm.describe_transform_job(TransformJobName = transform_job_name)
job_run_status = describe_response['TransformJobStatus']
print (job_run_status)
sleep(30)
# -
# Now let's view the results of the transform job:
# +
# cell 13
s3_output_key = '{}/inference-results/test_data.csv.out'.format(prefix);
local_inference_results_path = 'inference_results.csv'
s3 = boto3.resource('s3')
inference_results_bucket = s3.Bucket(session.default_bucket())
inference_results_bucket.download_file(s3_output_key, local_inference_results_path);
data = pd.read_csv(local_inference_results_path, sep=';')
pd.set_option('display.max_rows', 10) # Keep the output on one page
data
# -
# ### View other candidates explored by SageMaker Autopilot
# You can view all the candidates (pipeline evaluations with different hyperparameter combinations) that were explored by SageMaker Autopilot and sort them by their final performance metric.
# cell 14
candidates = sm.list_candidates_for_auto_ml_job(AutoMLJobName=auto_ml_job_name, SortBy='FinalObjectiveMetricValue')['Candidates']
index = 1
for candidate in candidates:
print (str(index) + " " + candidate['CandidateName'] + " " + str(candidate['FinalAutoMLJobObjectiveMetric']['Value']))
index += 1
# ### Candidate Generation Notebook
#
# Sagemaker AutoPilot also auto-generates a Candidate Definitions notebook. This notebook can be used to interactively step through the various steps taken by the Sagemaker Autopilot to arrive at the best candidate. This notebook can also be used to override various runtime parameters like parallelism, hardware used, algorithms explored, feature extraction scripts and more.
#
# The notebook can be downloaded from the following Amazon S3 location:
# cell 15
sm.describe_auto_ml_job(AutoMLJobName=auto_ml_job_name)['AutoMLJobArtifacts']['CandidateDefinitionNotebookLocation']
# ### Data Exploration Notebook
# Sagemaker Autopilot also auto-generates a Data Exploration notebook, which can be downloaded from the following Amazon S3 location:
# cell 16
sm.describe_auto_ml_job(AutoMLJobName=auto_ml_job_name)['AutoMLJobArtifacts']['DataExplorationNotebookLocation']
# ## Cleanup
#
# The Autopilot job creates many underlying artifacts such as dataset splits, preprocessing scripts, or preprocessed data, etc. This code, when un-commented, deletes them. This operation deletes all the generated models and the auto-generated notebooks as well.
# +
# cell 17
#s3 = boto3.resource('s3')
#bucket = s3.Bucket(bucket)
#job_outputs_prefix = '{}/output/{}'.format(prefix,auto_ml_job_name)
#bucket.objects.filter(Prefix=job_outputs_prefix).delete()
|
sagemaker_autopilot_direct_marketing.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Linear Regression
#
# This notebook demonstrate a generic training pipeline to train a machine learning model. For this, we use a simple linear regression model to regress house prices.
#
# 
#
# ## 1. Forward and Backward Pass
# The aim of this exercse is to implement a linear model with forward and backward pass to regresses the housing prices based on the dataset HousingPrices. Have a look at the ```house-prices-data-exploration.ipynb``` from last week's exercise to get a nice overview of the dataset. The notebook also explains the dataloading and pre-processing steps that we will use in this exercise for dataloading.
# $ $ A Linear Regression model is defined as follows:
# Let $\mathbf{X} \in \mathbb{R}^{N\times D}$ denote our data with $N$ samples and $D$ feature dimensions. Our targets, the housing prices, are given by $\mathbf{y} \in \mathbb{R}^{N\times 1}$. We want to estimate them with a linear model that predicts the price at which a house was sold based on a set of features, i.e., a model of the form
#
# $$ \hat{y_{i}} = \mathbf{x}_i \cdot \mathbf{w} + b, $$
#
# $ $ where $\mathbf{w}\in \mathbb{R}^{D \times 1}$ is the weight of our linear model, $b$ the bias, and the index $i$ denotes one sample. If we extend the our samples with a column of 1s $(\mathbf{X} \in \mathbb{R}^{N\times (D+1)})$, we can absorb the bias into the weight $\mathbf{w} \in \mathbb{R}^{(D+1) \times 1}$ (note the +1 in the feature dimension), such that we only have a matrix multiplication resulting in
#
# $$ \mathbf{y} = \mathbf{X} \mathbf{w} $$
#
# $ $ over all $N$ samples.
# Here you can see an example of a 1-D linear regression.
#
# <img src="https://upload.wikimedia.org/wikipedia/commons/thumb/3/3a/Linear_regression.svg/1200px-Linear_regression.svg.png" width="800">
# +
from exercise_code.data.csv_dataset import CSVDataset
from exercise_code.data.csv_dataset import FeatureSelectorAndNormalizationTransform
from exercise_code.data.dataloader import DataLoader
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
import seaborn as sns
pd.options.mode.chained_assignment = None # default='warn'
# %matplotlib inline
# %load_ext autoreload
# %autoreload 2
# -
# ### Load your data
# In the following, we apply preprocessing steps from ```house-prices-data-exploration.ipynb```. In machine learning, it is always important that any preprocessing step we apply on the training data is also applied on the validation and test data.
#
# The features are at very different scales and variances. Therfore, we normalize the features ranges with the minimum and maximum value of each numeric column. For filling in missing numeric values (if any), we need the mean value. These values we should pre-compute on the training set and feed them to the transform that is used on all dataset splits.
# $ $ For means of visualization, we only consider number of features $D=1$.
# +
target_column = "SalePrice"
i2dl_exercises_path = os.path.dirname(os.path.abspath(os.getcwd()))
root_path = os.path.join(i2dl_exercises_path, "datasets", 'housing')
download_url = 'https://cdn3.vision.in.tum.de/~dl4cv/housing_train.zip'
# Always make sure this line was run at least once before trying to
# access the data manually, as the data is downloaded in the
# constructor of CSVDataset.
train_dataset = CSVDataset(target_column=target_column, root=root_path, download_url=download_url, mode="train")
# -
# Compute min, max and mean for each feature column for the transforms.
# +
df = train_dataset.df
target_column = 'SalePrice'
# Select only 2 features to keep plus the target column.
selected_columns = ['GrLivArea', target_column]
mn, mx, mean = df.min(), df.max(), df.mean()
column_stats = {}
for column in selected_columns:
crt_col_stats = {'min' : mn[column],
'max' : mx[column],
'mean': mean[column]}
column_stats[column] = crt_col_stats
transform = FeatureSelectorAndNormalizationTransform(column_stats, target_column)
def rescale(data, key = "SalePrice", column_stats = column_stats):
""" Rescales input series y"""
mx = column_stats[key]["max"]
mn = column_stats[key]["min"]
return data * (mx - mn) + mn
# -
# Now, we perform the same transformation on the training, validation, and test data.
# +
# Always make sure this line was run at least once before trying to
# access the data manually, as the data is downloaded in the
# constructor of CSVDataset.
train_dataset = CSVDataset(mode="train", target_column=target_column, root=root_path, download_url=download_url, transform=transform)
val_dataset = CSVDataset(mode="val", target_column=target_column, root=root_path, download_url=download_url, transform=transform)
test_dataset = CSVDataset(mode="test", target_column=target_column, root=root_path, download_url=download_url, transform=transform)
print("Number of training samples:", len(train_dataset))
print("Number of validation samples:", len(val_dataset))
print("Number of test samples:", len(test_dataset))
# -
# $ $ We load the data into one matrix of shape $(N, D)$, similar for the targets.
# +
# load training data into a matrix of shape (N, D), same for targets resulting in the shape (N, 1)
X_train = [train_dataset[i]['features'] for i in range((len(train_dataset)))]
X_train = np.stack(X_train, axis=0)
y_train = [train_dataset[i]['target'] for i in range((len(train_dataset)))]
y_train = np.stack(y_train, axis=0)
print("train data shape:", X_train.shape)
print("train targets shape:", y_train.shape)
# load validation data
X_val = [val_dataset[i]['features'] for i in range((len(val_dataset)))]
X_val = np.stack(X_val, axis=0)
y_val = [val_dataset[i]['target'] for i in range((len(val_dataset)))]
y_val = np.stack(y_val, axis=0)
print("val data shape:", X_val.shape)
print("val targets shape:", y_val.shape)
# load test data
X_test = [test_dataset[i]['features'] for i in range((len(test_dataset)))]
X_test = np.stack(X_test, axis=0)
y_test = [test_dataset[i]['target'] for i in range((len(test_dataset)))]
y_test = np.stack(y_test, axis=0)
print("test data shape:", X_test.shape)
print("test targets shape:", y_test.shape)
# -
# ### Set up a linear model
# $ $ We define a linear model in ```exercise_code/networks/linear_model.py```.
# Your task is now to implement the forward pass in method ```forward()``` and the backward pass in ```backward()``` in the Network class ```LinearModel```.
# +
from exercise_code.networks.linear_model import LinearModel
model = LinearModel(num_features=1)
# choose weights for initialization
weights = np.array([[0.8, 0]]).transpose()
model.initialize_weights(weights)
# forward pass
y_out, _ = model(X_train)
# -
X = X_train
batch_size, _ = X.shape
X = np.concatenate((X, np.ones((batch_size, 1))), axis=1)
X
a = np.array([[1,2],[3,4],[5,6]])
b = np.array([[10,11],[30,31],[50,51]])
np.mean(a*b, 0, keepdims = True)
# We visualize the result of the forward pass in the following. Note that we choose the weights for the initialization of our model. As you can see, by choosing a good prior, you can already get good results.
# plot the predictions
plt.scatter(X_train, y_train)
plt.plot(X_train, np.squeeze(y_out), color='r')
plt.xlabel('GrLivArea');
plt.ylabel('SalePrice');
# $ $ Since we normalized our dataset, the predictions are still in the range $[0, 1]$. Let's scale them back to the original range with min, max and mean from above:
# +
# compute predictions by resacling the predicted values
X_rescaled = rescale(X_train, key = "GrLivArea")
y_rescaled = rescale(y_out, key = "SalePrice")
# plot the predictions
plt.scatter(df['GrLivArea'], df['SalePrice'])
plt.plot(X_rescaled, y_rescaled, color='r')
plt.xlabel('GrLivArea');
plt.ylabel('SalePrice');
# -
# In the aforementioned visualisation we already initialised the weigths of model wiith a good guess. But what do we have to do, if we do not know the model weights yet. For linear regression we can solve the problem analytically. However, this is not possible for more complex models such as neural networks. Therefore, we have to compute a solution nummericially.
#
#
# ## 2. Gradient Descent
#
# In order to train our model we will discuss the following steps in this exercise:
#
# - Implement a **loss function** for your model
# - **Compute the gradient** of your loss function
# - **Check your implementation** with numerical gradient
# - **Optimize** the loss function with **gradient descent**
# ### Loss Function
#
# In order to apply and train you model you have to first define a loss or objective fucntion to which respect you want to optimize your model to. For the task of regression, we usually consider $ L_{1}$,
# $$ L_{1} = |y - \hat y|, $$
# and mean squared error (MSE),
# MSE loss function is:
# $$ MSE = (y - \hat y)^2, $$
# $ $ where $y$ is the output of your model, and $\hat y$ is the ground truth of the data.
# Now it is time to implement your loss function in `exercise_code/networks/loss.py` and write the forward and backward pass of $ L_{1}$ and MSE as `forward` and `backward` function. The backward pass of the loss is needed to later optimize your weights of the model.
#
#
# Once you have implemented you loss function you can import your functions.
# +
from exercise_code.networks.loss import L1, MSE
l1_loss = L1()
mse_loss = MSE()
# -
# ### Forward and Backward Check
#
# Once you have finished implementation of L1 and MSE loss class, you can run the following code to check whether your forward result and backward gradient are correct. You should expect your relative error to be lower than 1e-8. (Note that gradients at 0 of $ L_{1}$ loss is also 0! )
#
# Here we will use a numeric gradient check to debug the backward pass:
#
# $$ \frac {df(x)}{dx} = \frac{f(x+h) - f(x-h)}{2h} $$
#
# where $h$ is a very small number, in practice approximately 1e-5 or so.
#
from exercise_code.tests.loss_tests import *
print (L1Test(l1_loss)())
print (MSETest(mse_loss)())
# When the tests were successful, you can continue with implementing gradient descent and your optimizer.
#
#
# ### Optimizer and Gradient Descent
#
# Previously, we have successfully dealt with the loss function, which is a mathematical tool for predicting the prediction effect.
#
# During the training process, we will adjust the parameters (weights) of the model to try to minimize the loss function and try to optimizer the predictions of our model.
#
# This is where the optimizer comes in. They update the model with respect to the output of the loss function, thereby linking the loss function and model parameters together. In short, the optimizer shapes and models the most accurate form by updating weights. The loss function is a guide to the terrain and can tell the optimizer when to move in the right or wrong direction.
#
# Any discussion about optimizers needs to begin with the most popular one, and it is called Gradient Descent. This algorithm is used across all types of Machine Learning (and other math problems) to optimize. It is fast, robust, and flexible. Here is how it works:
#
# 1. Calculate the gradient in each individual weight would do to the loss function;
# 2. Ubdate each weight based on its gradient;
# 3. Iterativily doing step 1 and step 2 till convergence.
#
# Gradient descent follows the following mathematical form:
#
# $$ W = W - \alpha \frac {dL}{dW}, $$
# $ $ where $W$ is weight of your model, $\alpha$ is the learning rate you need to set before training (we will discuss that more in the comming lectures), $ \frac {dL}{dW}$ is the gradients of your loss function with respect to the weight.
# ### Implement a Naive Optimizer using Gradient Descent
#
# Here we will use gradient descent method to update our loss function to see how it changes when updating our weights in the model. Open the file `exercise_code/networks/optimizer.py` and implement the gradients descent step into the `step()` function.
#
# +
from exercise_code.networks.optimizer import *
X_train = X_train
X_val = X_val
# Implement Linear Model and initialize the weights.
model = LinearModel(num_features=1)
model.initialize_weights()
print(np.shape(X_train))
# forward pass
y_out, _ = model(X_train)
# plot the prediction
plt.scatter(X_train, y_train)
plt.plot(X_train, y_out, color='r')
plt.show()
# -
# As you can see, the prediciton of the model are really bad when we randomly initialise the weights. Let's see how this changes, when we apply gradient descent to the weights.
# +
# Hyperparameter Setting, we will specify the loss function we use, and implement
# the optimizer we finished in the last step.
loss_func = MSE()
learning_rate = 5e-1
loss_history = []
opt = Optimizer(model,learning_rate)
steps = 100
# Full batch Gradient Descent
for i in range(steps):
# Enable your model to store the gradient.
model.train()
# Compute the output and gradients with repect to weight of your model for the input dataset.
model_forward,model_backward = model(X_train)
# Compute the loss and gradients with repect to output of the model.
loss, loss_grad = loss_func(model_forward, y_train)
# Use back prop method to get the gradients of loss with respect to the weights.
grad = loss_grad * model_backward
#Compute the average gradient over your batch
grad = np.mean(grad, 0, keepdims = True)
# After obtaining the gradients of loss with respect to the weights, we can use optimizer to
# do gradient descent step.
opt.step(grad.T)
# Average over the loss of the entire dataset and store it.
average_loss = np.mean(loss)
loss_history.append(average_loss)
print("Epoch ",i,"--- Average Loss: ", average_loss)
# Plot the loss history to see how it goes after several steps of
# gradient descent.
plt.plot(loss_history)
plt.xlabel('iteration')
plt.ylabel('training loss')
plt.title('Training Loss history')
plt.show()
# forward pass
y_out, _ = model(X_train)
# plot the prediction
plt.scatter(X_train, y_train)
plt.plot(X_train, y_out, color='r')
plt.show()
# -
# We can see that our loss decreases and the linear model improves to ecplain the variables.
# # 3. Solver
#
# Now that you've learned how to build your own neural regressors and classifiers, let's put everything together.
# This part of the exercise will require you to complete the missing code in the 'Solver' class, and use that to train your models end to end.
#
# The purpose of a solver is to mainly to provide an abstraction for all the gritty details behind training your parameters, such as logging your progress, optimizing your model, and handling your data.
#
# In order to explore the full generality of the solver, we will address two common tasks in machine learning of machine learning, namely classification and regression.
# ### Implement a Solver
#
# Open the file `exercise_code/solver.py` and finalize the `_step()`function.
#
# Note that we will initialize our solver with given training and validation set and perform loss update by calling `_step()`, here you may use the `backward()` or `__call()__` function in your loss and model to calculate the gradients of the loss with respect to the weight, and finally perform gradient descent with the help of `step()` function in your optimizer.
# +
from exercise_code import solver
# Select the number of features, you want your task to train on.
# Feel free to play with the sizes.
num_features = 1
# Use a simple linear model to perform linear regression
# on real-valued labels.
model = LinearModel(num_features=num_features)
model.initialize_weights()
# Build the actual dataset.
# Notice how we use an 80-20 train validation split.
# You're welcome to experiment with other splits
X_val = X_val
y_out, _ = model(X_val)
data = {'X_train': X_train, 'y_train': y_train,
'X_val': X_val, 'y_val': y_val}
# We are going to use the L2 loss for this task, but L1
# is also an appropriate loss function for this task.
l1_loss = L1()
mse_loss = MSE()
learning_rate = 1e-3
epochs = 25000
print("L1 loss on test set BEFORE training: {:,.0f}".format(l1_loss(rescale(y_out), rescale(y_val))[0].mean() ))
print("MSE loss on test set BEFORE training: {:,.0f}".format(mse_loss(rescale(y_out), rescale(y_val))[0].mean() ))
if np.shape(X_test)[1]==1:
plt.scatter(X_val, y_val, label = "Ground Truth")
inds = X_val.argsort(0).flatten()
plt.plot(X_val[inds], y_out[inds], color='r', label = "Prediction")
plt.legend()
plt.show()
# Setup for the actual solver that's going to do the job of training
# the model on the given data. set 'verbose=True' to see real time
# progress of the training.
solver = solver.Solver(model, data, mse_loss,
learning_rate, verbose=True, print_every= 1000)
# Train the model, and look at the results.
solver.train(epochs)
plt.plot(solver.val_loss_history, label = "Validation Loss")
plt.plot(solver.train_loss_history, label = "Train Loss")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.legend()
plt.show()
# Test final performance
y_out, _ = model(X_test)
print("L1 loss on test set AFTER training: {:,.0f}".format(l1_loss(rescale(y_out), rescale(y_test))[0].mean() ))
print("MSE loss on test set AFTER training: {:,.0f}".format(mse_loss(rescale(y_out), rescale(y_test))[0].mean() ))
if np.shape(X_test)[1]==1:
plt.scatter(X_test, y_test, label = "Ground Truth")
inds = X_test.argsort(0).flatten()
plt.plot(X_test[inds], y_out[inds], color='r', label = "Prediction")
plt.legend()
plt.show()
# -
# You can now play around with the different loss functions and use more than one feature to do the regression. Does the result improve? Note that if you want to add more features you need to build your own testing set with correspoing dimensions of the features, since here our testing set has only one feature.
# Once this notebook is running and you have understood everything what is in there you can go to next notebook `2_logistic_regression.ipynb`.
|
exercise_04/1_linear_regression.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Slicing
#
# Objects in scipp can be sliced in two ways. The general way to do this is by [positional indexing](#Positional-indexing) using indices as in numpy.
# A second approach is to use [label-based indexing](#Label-based-indexing) which is uses actual coordinate values for selection.
# ## Positional indexing
#
# ### Overview
#
# Data in a [variable](../generated/scipp.Variable.rst#scipp.Variable), [data array](../generated/scipp.DataArray.rst#scipp.DataArray), or [dataset](../generated/scipp.Dataset.rst#scipp.Dataset) can be indexed in a similar manner to NumPy and xarray.
# The dimension to be sliced is specified using a dimension label and, in contrast to NumPy, positional dimension lookup is not available.
# Positional indexing with an integer or an integer range is made via `__getitem__` and `__setitem__` with a dimension label as first argument.
# This is available for variables, data arrays, and datasets.
# In all cases a *view* is returned, i.e., just like when slicing a [numpy.ndarray](https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html#numpy.ndarray) no copy is performed.
#
# ### Variables
#
# Consider the following variable:
# +
import numpy as np
import scipp as sc
var = sc.array(
dims=['z', 'y', 'x'],
values=np.random.rand(2, 3, 4),
variances=np.random.rand(2, 3, 4))
sc.show(var)
# -
# As when slicing a `numpy.ndarray`, the dimension `'x'` is removed since no range is specified:
s = var['x', 1]
sc.show(s)
print(s.dims, s.shape)
# When a range is specified, the dimension is kept, even if it has extent 1:
# +
s = var['x', 1:3]
sc.show(s)
print(s.dims, s.shape)
s = var['x', 1:2]
sc.show(s)
print(s.dims, s.shape)
# -
# Slicing can be chained arbitrarily:
# + tags=[]
s = var['x', 1:4]['y', 2]['x', 1]
sc.show(s)
print(s.dims, s.shape)
# -
# The `copy()` method turns a view obtained from a slice into an independent object:`
s = var['x', 1:2].copy()
s += 1000
var
# ### Data arrays
#
# Slicing for data arrays works in the same way, but some additional rules apply.
# Consider:
a = sc.DataArray(
data=sc.array(dims=['y', 'x'], values=np.random.rand(2, 3)),
coords={
'x': sc.array(dims=['x'], values=np.arange(3.0), unit=sc.units.m),
'y': sc.array(dims=['y'], values=np.arange(2.0), unit=sc.units.m)},
masks={
'mask': sc.array(dims=['x'], values=[True, False, False])},
attrs={
'aux_x': sc.array(dims=['x'], values=np.arange(3.0), unit=sc.units.m),
'aux_y': sc.array(dims=['y'], values=np.arange(2.0), unit=sc.units.m)})
sc.show(a)
a
# As when slicing a variable, the sliced dimension is removed when slicing without range, and kept when slicing with range.
#
# When slicing a data array the following additional rule applies:
#
# - Meta data (coords, masks, attrs) that *do not depend on the slice dimension* are marked as *readonly*
# - Slicing **without range**:
# - The *coordinates* for the sliced dimension are *removed* and inserted as *attributes* instead.
# - Slicing **with a range**:
# - The *coordinates* for the sliced dimension are *kept*.
#
# The rationale behind this mechanism is as follows.
# Meta data is often of a lower dimensionality than data, such as in this example where coords, masks, and attrs are 1-D whereas data is 2-D.
# Elements of meta data entries are thus shared by many data elements, and we must be careful to not apply operations to subsets of data while unintentionally modifying meta data for other unrelated data elements:
a['x', 0:1].coords['x'] *= 2 # ok, modifies only coord value "private" to this x-slice
try:
a['x', 0:1].coords['y'] *= 2 # not ok, would modify coord value "shared" by all x-slices
except sc.VariableError as e:
print(f'\'y\' is shared with other \'x\'-slices and should not be modified by the slice, so we get an error:\n{e}')
# In practice, a much more dangerous issue this mechanism protects from is unintentional changes to masks.
# Consider
val = a['x', 1]['y', 1].copy()
val
# If we now assign this scalar `val` to a slice at `y=0`, using `=` we need to update the mask.
# However, the mask in this example depends only on `x` so it also applies to the slices `y=1`.
# If we would allow updating the mask, the following would *unmask data for all* `y`:
try:
a['y', 0] = val
except sc.DimensionError as e:
print(e)
# Since we cannot update the mask in a consistent manner the entire operation fails.
# Data is not modified.
# The same mechanism is applied for binary arithmetic operations such as `+=` where the masks would be updated using a logical OR operation.
#
# The purpose for turning coords into attrs when slicing *without* a range is to support useful operations such as:
# + tags=[]
a - a['x', 1] # compute difference compared to data at x=1
# -
# If `a['x', 0]` had an `x` coordinate this would fail due to a coord mismatch.
# If coord checking is required, use a range-slice such as `a['x', 1:2]`. Compare the two cases shown in the following and make sure to inspect the `dims` and `shape` of all variables (data and coordinates) of the resulting slice views (note the tooltip shown when moving the mouse over the name also contains this information):
sc.show(a['y', 1:2]) # Range of length 1
a['y', 1:2]
sc.show(a['y', 1]) # No range
a['y', 1]
# ### Datasets
#
# Slicing for datasets works just like for data arrays.
# In addition to changing certain coords into attrs and marking certain meta data entries as read-only, slicing a dataset also marks lower-dimensional *data entries* readonly.
# Consider a dataset `d`:
d = sc.Dataset(
{'a': sc.array(dims=['y', 'x'], values=np.random.rand(2, 3)),
'b': sc.array(dims=['x', 'y'], values=np.random.rand(3, 2)),
'c': sc.array(dims=['y'], values=np.random.rand(2)),
'0d-data': sc.scalar(1.0)},
coords={
'x': sc.array(dims=['x'], values=np.arange(3.0), unit=sc.units.m),
'y': sc.array(dims=['y'], values=np.arange(2.0), unit=sc.units.m)})
sc.show(d)
# and a slice of `d`:
sc.show(d['y', 0])
# By marking lower-dimensional entries in the slice as read-only we prevent unintentional multiple modifications of the same scalar:
try:
d['y', 0] += 1 # would add 1 to `0d-data`
d['y', 1] += 2 # would add 2 to `0d-data`
except sc.VariableError as e:
print(e)
# This is an important aspect and it is worthwhile to take some time and think through the mechanism.
#
# Slicing a data item of a dataset should not bring any surprises.
# Essentially this behaves like slicing a data array:
sc.show(d['a']['x', 1:2])
# Slicing and item access can be done in arbitrary order with identical results:
d['x', 1:2]['a'] == d['a']['x', 1:2]
d['x', 1:2]['a'].coords['x'] == d.coords['x']['x', 1:2]
# ## Label-based indexing
#
# ### Overview
#
# Data in a [dataset](../generated/scipp.Dataset.rst#scipp.Dataset) or [data array](../generated/scipp.DataArray.rst#scipp.DataArray) can be selected by the coordinate value.
# This is similar to pandas [pandas.DataFrame.loc](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.loc.html).
# Scipp leverages its ubiquitous support for physical units to provide label-based indexing in an intuitive manner, using the same syntax as [positional indexing](#Positional-indexing).
# For example:
#
# - `array['x', 0:3]` selects positionally, i.e., returns the first three element along `'x'`.
# - `array['x', 1.2*sc.units.m:1.3*sc.units.m]` selects by label, i.e., returns the elements along `'x'` falling between `1.2 m` and `1.3 m`.
#
# That is, label-based indexing is made via `__getitem__` and `__setitem__` with a dimension label as first argument and a scalar [variable](../generated/scipp.Variable.rst#scipp.Variable) or a Python `slice()` as created by the colon operator `:` from two scalar variables.
# In all cases a *view* is returned, i.e., just like when slicing a [numpy.ndarray](https://docs.scipy.org/doc/numpy/reference/generated/numpy.ndarray.html#numpy.ndarray) no copy is performed.
#
# Consider:
da = sc.DataArray(
data=sc.array(dims=['year','x'], values=np.random.random((3, 7))),
coords={
'x': sc.array(dims=['x'], values=np.linspace(0.1, 0.9, num=7), unit=sc.units.m),
'year': sc.array(dims=['year'], values=[2020,2023,2027])})
sc.show(da)
da
# We can select a slice of `da` based on the `'year'` labels:
year = sc.scalar(2023)
da['year', year]
# In this case `2023` is the second element of the coordinate so this is equivalent to positionally slicing `data['year', 1]` and [the usual rules](#Positional-indexing) regarding dropping dimensions and converting dimension coordinates to attributes apply:
assert sc.identical(da['year', year], da['year', 1])
# <div class="alert alert-warning">
#
# **Warning**
#
# It is **essential** to not mix up integers and scalar scipp variables containing an integer.
# As in above example, positional indexing yields different slices than label-based indexing.
#
# </div>
#
# <div class="alert alert-info">
#
# **Note**
#
# Here, we created `year` using `sc.scalar`.
# Alternatively, we could use `year = 2023 * sc.units.dimensionless` which is useful for dimensionful coordinates like `'x'` in this case, see below.
#
# </div>
#
# For floating-point-valued coordinates selecting a single point would require an exact match, which is typically not feasible in practice.
# Scipp does *not* do fuzzy matching in this case, instead an `IndexError` is raised:
# + tags=[]
x = 0.23 * sc.units.m # No x coordinate value at this point. Equivalent of sc.scalar(0.23, unit=sc.units.m)
try:
da['x', x]
except IndexError as e:
print(str(e))
# + [markdown] tags=[]
# For such coordinates we may thus use an *interval* to select a *range* of values using the `:` operator:
# -
x_left = 0.1 * sc.units.m
x_right = 0.4 * sc.units.m
da['x', x_left:x_right]
# The selection includes the bounds on the "left" but excludes the bounds on the "right", i.e., we select the half-open interval $x \in [x_{\text{left}},x_{\text{right}})$, closed on the left and open on the right.
#
# The half-open interval implies that we can select consecutive intervals without including any data point in both intervals:
x_mid = 0.2 * sc.units.m
sc.to_html(da['x', x_left:x_mid])
sc.to_html(da['x', x_mid:x_right])
# Just like when slicing positionally one of the bounds can be omitted, to include either everything from the start, or everything until the end:
da['x', :x_right]
# Coordinates used for label-based indexing must be monotonically ordered.
# While it is natural to think of slicing in terms of ascending coordinates, the slicing mechanism also works for descending coordinates.
#
# ### Bin-edge coordinates
#
# Bin-edge coordinates are handled slightly differently from standard coordinates in label-based indexing.
# Consider:
da = sc.DataArray(
data = sc.array(dims=['x'], values=np.random.random(7)),
coords={
'x': sc.array(dims=['x'], values=np.linspace(1.0, 2.0, num=8), unit=sc.units.m)})
da
# Here `'x'` is a bin-edge coordinate, i.e., its length exceeds the array dimensions by one.
# Label-based slicing with a single coord value finds and returns the bin that contains the given coord value:
x = 1.5 * sc.units.m
da['x', x]
# If an interval is provided when slicing with a bin-edge coordinate, the range of bins *containing* the interval bounds (*including* the left as well as the right bin) is selected:
x_left = 1.3 * sc.units.m
x_right = 1.7 * sc.units.m
da['x', x_left:x_right]
# ### Limitations
#
# Label-based indexing *not* supported for:
#
# - Multi-dimensional coordinates.
# - Non-monotonic coordinates.
#
# The first is a fundamental limitation since a slice cannot be defined in such as case.
# The latter two will likely be supported in the future to some extent.
|
docs/user-guide/slicing.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Psych81.09
# language: python
# name: psych81.09
# ---
# # Post Grad Income Data by State:
# ## Investigating whether or not different properties of colleges change based on location of the school
# ### <NAME> & <NAME>
# ### Importing the data
# +
import pandas as pd
import numpy as np
import plotly_express as px
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.figure_factory as ff
# -
college_income_stats = pd.read_csv('Most-Recent-Cohorts-All-Data-Elements.csv')
college_income_stats.head()
# ### Cleaning the data
college_income_stats.columns
#Chose to only look at income information 10 years after graduation.
x = ['UNITID','INSTNM','STABBR','ZIP','LATITUDE','LONGITUDE','PCT_WHITE','PCT_BLACK','PCT_ASIAN','PCT_HISPANIC','MN_EARN_WNE_P10','MD_EARN_WNE_P10']
college_income_stats[x]
clean_data = college_income_stats[x].set_index(['UNITID','INSTNM'])
clean_data2 = clean_data.fillna(0)
clean_data2.head()
# ### Descriptive statistics
#First, we converted the data into numeric data
numeric_data = clean_data2[['PCT_WHITE','PCT_BLACK','PCT_ASIAN','PCT_HISPANIC','MN_EARN_WNE_P10','MD_EARN_WNE_P10']] = clean_data2[['PCT_WHITE','PCT_BLACK','PCT_ASIAN','PCT_HISPANIC','MN_EARN_WNE_P10','MD_EARN_WNE_P10']].apply(pd.to_numeric, errors='coerce')
numeric_data.head()
#Then, we calculated the max, min, mean and median
numeric_data.max()
numeric_data.min()
numeric_data.median()
numeric_data.mean()
# ### Data Visualization
# +
x1 = clean_data2['STABBR']
final_data = numeric_data.copy()
final_data['STATE']= x1.tolist()
final_data.head()
# -
by_state=final_data.groupby('STATE').aggregate(np.mean)
by_state.head(10)
# +
#Stacked bar plot of average state demographic composition
state = by_state.index
White = by_state['PCT_WHITE']
Black = by_state['PCT_BLACK']
Asian = by_state['PCT_ASIAN']
Hispanic = by_state['PCT_HISPANIC']
ind = [x for x, _ in enumerate(state)]
stackedbarplot = plt.figure(figsize=(18,8))
plt.bar(ind, White, width=0.5, label='White', color='Red', bottom=Black+Asian+Hispanic)
plt.bar(ind, Black, width=0.5, label='Black', color='Blue', bottom=Asian+Hispanic)
plt.bar(ind, Asian, width=0.5, label='Asian', color='Yellow', bottom=Hispanic)
plt.bar(ind, Hispanic, width=0.5, label='Hispanic', color='Green')
plt.xticks(ind, state)
plt.ylabel("Demographics")
plt.xlabel("State")
plt.legend(loc="upper right")
plt.title("Institution Demographics by State")
plt.show()
# +
#Bar plot of average median income by state
state = by_state.index
median = by_state['MD_EARN_WNE_P10']
state_pos = [i for i, _ in enumerate(state)]
barplot = plt.figure(figsize=(18,8))
plt.bar(state_pos, median,width=0.5, color='Purple')
plt.xlabel("State")
plt.ylabel("Average Median Income")
plt.title("Average Median Income by State")
plt.xticks(state_pos, state)
plt.show()
# -
#States with 10 highest average median incomes
by_state_sorted = by_state.sort_values(['MD_EARN_WNE_P10'], ascending=False)
by_state_sorted.head(10)
#Examining the relationship between demographic percentages and median income
wpop=by_state['PCT_WHITE']
bpop=by_state['PCT_BLACK']
asianpop=by_state['PCT_ASIAN']
hispanicpop=by_state['PCT_HISPANIC']
x=by_state['MD_EARN_WNE_P10']
plt.figure(figsize=(18,8))
plt.scatter(x, wpop, color='red')
plt.scatter(x, bpop, color='blue')
plt.scatter(x, asianpop,color='yellow')
plt.scatter(x, hispanicpop, color='green')
plt.xlabel('Avg Median Income')
plt.ylabel('Percent')
plt.title('Demographics vs. Avg Median Income')
plt.legend(loc="upper right")
plt.show()
# ### Comparing top 10 earning states and top 10 in each demographic category
by_state_sorted2= by_state.sort_values(['PCT_WHITE'], ascending=False)
by_state_sorted2.head(10)
# States that fall in the top 10 for both median income and percent white population include:
# MN, VT, IA, NE
#
by_state_sorted3= by_state.sort_values(['PCT_BLACK'], ascending=False)
by_state_sorted3.head(10)
# States that fall in the top 10 for both median income and percent black population include: DC
by_state_sorted4= by_state.sort_values(['PCT_ASIAN'], ascending=False)
by_state_sorted4.head(10)
# States that fall in the top 10 for both median income and percent asian population include: NY
by_state_sorted5= by_state.sort_values(['PCT_HISPANIC'], ascending=False)
by_state_sorted5.head(10)
# States that fall in the top 10 for both median income and percent asian population include: N/A
map_data = numeric_data.copy()
map_data['LATITUDE']=clean_data2['LATITUDE']
map_data['LONGITUDE']=clean_data2['LONGITUDE']
map_data['STATE']=clean_data2['STABBR']
map_data.head()
|
data-stories/college-income-data/Zweifach_Sizemore_ Minihack#1.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + slideshow={"slide_type": "-"}
# Created by Dr. <NAME> to teach Robotics online during covid 19 outbreak
import sympy as sy
import numpy as np
sy.init_printing()
# + slideshow={"slide_type": "-"}
# Link parameters
a1 = 1
a2 = 0
a3 = sy.sqrt(2)
alpha1 = 0
alpha2 = sy.pi/4
alpha3 = 0
# + slideshow={"slide_type": "-"}
# Joint parameters
theta1 = sy.Symbol(r'\theta_1')
theta2 = sy.Symbol(r'\theta_2')
theta3 = sy.Symbol(r'\theta_3')
theta4 = sy.Symbol(r'\theta_4')
d1 = 0
d2 = 0
d3 = sy.sqrt(2)
d4 = 0
# + slideshow={"slide_type": "-"}
temp1 = 34.567
temp2 = np.pi/4
temp3 = sy.Symbol(r'\mu_k')
temp4 = sy.pi/4
[temp1, temp2, temp3, temp4]
# + slideshow={"slide_type": "-"}
temp5 = sy.expand((temp3+temp4)**2)
temp6 = sy.expand((temp3+temp2)**2)
temp7 = sy.expand((temp1+temp2)**2)
[temp5, temp6, temp7]
# + slideshow={"slide_type": "-"}
# transformation from of i' frame with respect to i frame
def link_transform(a_i, alpha_i):
Link_T = sy.Matrix([[1, 0, 0, a_i], [0, sy.cos(alpha_i), -sy.sin(alpha_i), 0], [0, sy.sin(alpha_i), sy.cos(alpha_i), 0], \
[0,0,0,1] ])
return Link_T
# + slideshow={"slide_type": "-"}
# transformation of i frame with respect to (i-1)' frame'
def joint_transform(d_i, theta_i):
Joint_T = sy.Matrix([[sy.cos(theta_i), -sy.sin(theta_i), 0, 0],
[sy.sin(theta_i), sy.cos(theta_i), 0, 0],
[0, 0, 1, d_i],
[0,0,0,1] ])
return Joint_T
# + slideshow={"slide_type": "-"}
# Computation of transformation matricies of different link frames with respect to the ground frame
T_0 = sy.Identity(4)
T_0_1 = joint_transform(d1, theta1)
T_1_2 = sy.trigsimp( link_transform(a1, alpha1)*joint_transform(d2, theta2) )
T_0_2 = sy.trigsimp( T_0_1* T_1_2);
T_2_3 = sy.trigsimp(link_transform(a2, alpha2)*joint_transform(d3, theta3) )
T_0_3 = sy.trigsimp( T_0_2* T_2_3);
T_3_4 = sy.trigsimp(link_transform(a3, alpha3)*joint_transform(d4, theta4) )
T_0_4 = sy.trigsimp( T_0_3* T_3_4);
# + slideshow={"slide_type": "-"}
T_0_1, T_0_2, T_0_3, T_0_4 # Transformation matricies of first, second, third and fourth bodies
# + slideshow={"slide_type": "-"}
T_0_4[2,3] # (3,4)th element of trnasformation matrix for 4 frame
# + slideshow={"slide_type": "-"}
# Extraction of Rotation matrices
R_0_1= T_0_1[0:3,0:3]
R_1_2= T_1_2[0:3,0:3]
R_2_3= T_2_3[0:3,0:3]
R_3_4= T_3_4[0:3,0:3]
r_0_1=T_0_1[0:3,3]
r_1_2=T_1_2[0:3,3]
r_2_3=T_2_3[0:3,3]
r_3_4=T_3_4[0:3,3]
# + slideshow={"slide_type": "-"}
def cross_product(a,b):
c=sy.Matrix([
[a[1,0]*b[2,0]-a[2,0]*b[1,0]],
[a[2,0]*b[0,0]-a[0,0]*b[2,0]],
[a[0,0]*b[1,0]-a[1,0]*b[0,0]]
])
return c
# + slideshow={"slide_type": "-"}
m=sy.Matrix([[0],[0],[1]])
n=sy.Matrix([[1],[0],[0]])
p = cross_product(m,n)
p
# + slideshow={"slide_type": "-"}
d_d1=0
d_d2=0
d_d3=0
d_d4=0
d_theta1 = sy.Symbol(r'\dot{\theta}_1')
d_theta2 = sy.Symbol(r'\dot{\theta}_2')
d_theta3 = sy.Symbol(r'\dot{\theta}_3')
d_theta4 = sy.Symbol(r'\dot{\theta}_4')
d_d1, d_d2, d_d3, d_d4, d_theta1, d_theta2, d_theta3, d_theta4
# + slideshow={"slide_type": "-"}
omega_0_0 = sy.Matrix([[0],[0],[0]])
v_0_0 = sy.Matrix([[0],[0],[0]])
# + slideshow={"slide_type": "-"}
omega_1_1= R_0_1.T*(omega_0_0)+sy.Matrix([[0],[0],[d_theta1] ])
v_1_1 = R_0_1.T*(v_0_0 + cross_product(omega_0_0,r_0_1))+sy.Matrix([[0],[0],[d_d1] ])
omega_1_1, v_1_1
# + slideshow={"slide_type": "-"}
omega_2_2= R_1_2.T*(omega_1_1)+sy.Matrix([[0],[0],[d_theta2] ])
v_2_2 = R_1_2.T*(v_1_1 + cross_product(omega_1_1,r_1_2))+sy.Matrix([[0],[0],[d_d2] ])
omega_2_2, v_2_2
# + slideshow={"slide_type": "-"}
omega_3_3= R_2_3.T*(omega_2_2)+sy.Matrix([[0],[0],[d_theta3] ])
v_3_3 = R_2_3.T*(v_2_2 + cross_product(omega_2_2,r_2_3))+sy.Matrix([[0],[0],[d_d3] ])
omega_3_3, v_3_3
# + slideshow={"slide_type": "-"}
omega_4_4= R_3_4.T*(omega_3_3)+sy.Matrix([[0],[0],[d_theta4] ])
v_4_4 = R_3_4.T*(v_3_3 + cross_product(omega_3_3,r_3_4))+sy.Matrix([[0],[0],[d_d4] ])
omega_4_4, v_4_4
# + slideshow={"slide_type": "-"}
R_0_4= T_0_4[0:3,0:3]
v_0_4=sy.trigsimp(R_0_4*v_4_4)
omega_0_4 = sy.trigsimp(R_0_4*omega_4_4)
# + slideshow={"slide_type": "slide"}
mu_0_4 = sy.Matrix([v_0_4, omega_0_4])
mu_0_4
# +
a1= mu_0_4.subs([(d_theta1, 1), (d_theta2,0), (d_theta3, 0), (d_theta4,0)])
a2= mu_0_4.subs([(d_theta1, 0), (d_theta2,1), (d_theta3, 0), (d_theta4,0)])
a3= mu_0_4.subs([(d_theta1, 0), (d_theta2,0), (d_theta3, 1), (d_theta4,0)])
a4= mu_0_4.subs([(d_theta1, 0), (d_theta2,0), (d_theta3, 0), (d_theta4,1)])
# -
a1
J=a1
J=J.col_insert(1,a2)
J=J.col_insert(2,a3)
J=J.col_insert(3,a4)
J
J_num_1 = J.subs([(theta1, 0), (theta2, sy.pi/2), (theta3, -sy.pi/2), (theta4,0)])
J_num_1
|
code.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] _uuid="fca6815e12081ccb8df84000c8965c59783cd250"
# <a href="https://www.bigdatauniversity.com"><img src = "https://ibm.box.com/shared/static/ugcqz6ohbvff804xp84y4kqnvvk3bq1g.png" width = 300, align = "center"></a>
#
#
#
#
# <h1 align=center><font size = 5>TUPLES IN PYTHON</font></h1>
# + [markdown] _uuid="34ccf5303ea8b6d86848126a0648d44b2267b8bd"
# <a id="ref0"></a>
# <center><h2>About the Dataset</h2></center>
# + [markdown] _uuid="ae53d463acf63d10afd303716d269cafe6f24f52"
#
# ## Table of Contents
#
#
# <div class="alert alert-block alert-info" style="margin-top: 20px">
# <li><a href="#ref0">About the Dataset</a></li>
# <li><a href="#ref1">Tuples</a></li>
# <li><a href="#ref2">Quiz on Tuples</a></li>
#
# <p></p>
# Estimated Time Needed: <strong>15 min</strong>
# </div>
#
# <hr>
# + [markdown] _uuid="1d819c67c9e8ed38a9d73fd6ebbed0c27a6f1b79"
# Imagine you received album recommendations from your friends and compiled all of the recomendations into a table, with specific information about each album.
#
# The table has one row for each movie and several columns:
#
# - **artist** - Name of the artist
# - **album** - Name of the album
# - **released_year** - Year the album was released
# - **length_min_sec** - Length of the album (hours,minutes,seconds)
# - **genre** - Genre of the album
# - **music_recording_sales_millions** - Music recording sales (millions in USD) on [SONG://DATABASE](http://www.song-database.com/)
# - **claimed_sales_millions** - Album's claimed sales (millions in USD) on [SONG://DATABASE](http://www.song-database.com/)
# - **date_released** - Date on which the album was released
# - **soundtrack** - Indicates if the album is the movie soundtrack (Y) or (N)
# - **rating_of_friends** - Indicates the rating from your friends from 1 to 10
# <br>
# <br>
#
# The dataset can be seen below:
#
# <font size="1">
# <table font-size:xx-small style="width:25%">
# <tr>
# <th>Artist</th>
# <th>Album</th>
# <th>Released</th>
# <th>Length</th>
# <th>Genre</th>
# <th>Music recording sales (millions)</th>
# <th>Claimed sales (millions)</th>
# <th>Released</th>
# <th>Soundtrack</th>
# <th>Rating (friends)</th>
# </tr>
# <tr>
# <td><NAME></td>
# <td>Thriller</td>
# <td>1982</td>
# <td>00:42:19</td>
# <td>Pop, rock, R&B</td>
# <td>46</td>
# <td>65</td>
# <td>30-Nov-82</td>
# <td></td>
# <td>10.0</td>
# </tr>
# <tr>
# <td>AC/DC</td>
# <td>Back in Black</td>
# <td>1980</td>
# <td>00:42:11</td>
# <td>Hard rock</td>
# <td>26.1</td>
# <td>50</td>
# <td>25-Jul-80</td>
# <td></td>
# <td>8.5</td>
# </tr>
# <tr>
# <td><NAME></td>
# <td>The Dark Side of the Moon</td>
# <td>1973</td>
# <td>00:42:49</td>
# <td>Progressive rock</td>
# <td>24.2</td>
# <td>45</td>
# <td>01-Mar-73</td>
# <td></td>
# <td>9.5</td>
# </tr>
# <tr>
# <td><NAME></td>
# <td>The Bodyguard</td>
# <td>1992</td>
# <td>00:57:44</td>
# <td>Soundtrack/R&B, soul, pop</td>
# <td>26.1</td>
# <td>50</td>
# <td>25-Jul-80</td>
# <td>Y</td>
# <td>7.0</td>
# </tr>
# <tr>
# <td>Meat Loaf</td>
# <td>Bat Out of Hell</td>
# <td>1977</td>
# <td>00:46:33</td>
# <td>Hard rock, progressive rock</td>
# <td>20.6</td>
# <td>43</td>
# <td>21-Oct-77</td>
# <td></td>
# <td>7.0</td>
# </tr>
# <tr>
# <td>Eagles</td>
# <td>Their Greatest Hits (1971-1975)</td>
# <td>1976</td>
# <td>00:43:08</td>
# <td>Rock, soft rock, folk rock</td>
# <td>32.2</td>
# <td>42</td>
# <td>17-Feb-76</td>
# <td></td>
# <td>9.5</td>
# </tr>
# <tr>
# <td><NAME></td>
# <td>Saturday Night Fever</td>
# <td>1977</td>
# <td>1:15:54</td>
# <td>Disco</td>
# <td>20.6</td>
# <td>40</td>
# <td>15-Nov-77</td>
# <td>Y</td>
# <td>9.0</td>
# </tr>
# <tr>
# <td>Fleetwood Mac</td>
# <td>Rumours</td>
# <td>1977</td>
# <td>00:40:01</td>
# <td>Soft rock</td>
# <td>27.9</td>
# <td>40</td>
# <td>04-Feb-77</td>
# <td></td>
# <td>9.5</td>
# </tr>
# </table></font>
# + [markdown] _uuid="e5a00e2992d2ec5d47aa0fca1f82175f88580a85"
# <hr>
# + [markdown] _uuid="e3622d6f946d9a5c6ab1e30059c04999f4d1fe80"
# <a id="ref1"></a>
# <center><h2>Tuples</h2></center>
#
# In Python, there are different data types: string, integer and float. These data types can all be contained in a tuple as follows:
#
#
# + [markdown] _uuid="c7c03d2491fc91ecdaf8be64903f5f70e0b7b707"
# <img src = "https://ibm.box.com/shared/static/t2jw5ia78ulp8twr71j6q7055hykz10c.png" width = 750, align = "center"></a>
#
#
# + _uuid="5852d099ebc147c96a7ec86340e145f2966117e3"
tuple1=("disco",10,1.2 )
tuple1
# + [markdown] _uuid="d7a66f8ed1910962bbef704cd086e2acf10af467"
# The type of variable is a **tuple**.
# + _uuid="f4d37878d46d015bbd8a93fc5780b56f4514fe6e"
type(tuple1)
# + [markdown] _uuid="73b1c2bf2e600160243047f5423937dc84366ecd"
# Each element of a tuple can be accessed via an index. The following table represents the relationship between the index and the items in the tuple. Each element can be obtained by the name of the tuple followed by a square bracket with the index number:
# + [markdown] _uuid="025a156c475e7937becd6378bcdbd787ed014b37"
# <img src = "https://ibm.box.com/shared/static/83kpang0opwen5e5gbwck6ktqw7btwoe.gif" width = 750, align = "center"></a>
#
#
# + [markdown] _uuid="68d2660fc8ae6a8bc708e5b05da43b48dbefe291"
# We can print out each value in the tuple:
# + _uuid="024dcf6114b584181383696cebc81c6f370e4aa9"
print( tuple1[0])
print( tuple1[1])
print( tuple1[2])
# + [markdown] _uuid="9b8a69db88b4c0f2e51c1d79af41c274dca90625"
# We can print out the **type** of each value in the tuple:
#
# + _uuid="787b639a5c059cd4e5fb1e376aea10c0d801fc0f"
print( type(tuple1[0]))
print( type(tuple1[1]))
print( type(tuple1[2]))
# + [markdown] _uuid="c82255f0dae6deb3a6995c4c2ac711bb418fd2fd"
# We can also use negative indexing. We use the same table above with corresponding negative values:
# + [markdown] _uuid="6d8e776ce8d09dfa767a655e490dafeb1763f043"
# <img src = "https://ibm.box.com/shared/static/uwlfzo367bekwg0p5s5odxlz7vhpojyj.png" width = 750, align = "center"></a>
#
# + [markdown] _uuid="30c99bd52490a1b1613d2847a2195d9f9820c221"
# We can obtain the last element as follows (this time we will not use the print statement to display the values):
# + _uuid="7930f19feb897f42ac79ce22f2844c992ade298b"
tuple1[-1]
# + [markdown] _uuid="0d8bfeac211c57f2ce206150c5ba8c50b83668c4"
# We can display the next two elements as follows:
# + _uuid="181d689b360e9552d36a7dba9f56222f847bfaa3"
tuple1[-2]
# + _uuid="c45923783b76b83bd9e3d7e7845539ba317cd268"
tuple1[-3]
# + [markdown] _uuid="17450e3f24608116b23bd8e8797b73111827ead8"
# We can concatenate or combine tuples by using the **+** sign:
# + _uuid="4810be6d66feab44bf47dafdef958e77614f7de4"
tuple2=tuple1+("hard rock", 10)
tuple2
# + [markdown] _uuid="14e8d2d435d36232b20dc9d32f9d75f79d224acd"
# We can slice tuples obtaining multiple values as demonstrated by the figure below:
# + [markdown] _uuid="d4fbba8f2bd700781baac0573d0cca4f83535002"
# <img src = "https://ibm.box.com/shared/static/s9nofy728bcnsgnx3vh159bu16w7frnc.gif" width = 750, align = "center"></a>
#
# + [markdown] _uuid="04be7b596983167f3413c803e56cf8e57b0a97de"
# We can slice tuples, obtaining new tuples with the corresponding elements:
# + _uuid="5f7224b7bb23bb50322ef5cb947cea391b87c17c"
tuple2[0:3]
# + [markdown] _uuid="3153445eec15306dc8273917a5674824e0f4878f"
# We can obtain the last two elements of the tuple:
# + _uuid="be73698aeb0a08fa3c9d124462c8eba265e1151f"
tuple2[3:5]
# + [markdown] _uuid="e3ce4061a9a8267d7ad17f84f20c5255abf6fd9d"
# We can obtain the length of a tuple using the length command:
# + _uuid="cc93139255429f1cf732372852ae391d3f8c164e"
len(tuple2)
# + [markdown] _uuid="8f5d9a850259579efd0ca3fe79591fb3222e3980"
# This figure shows the number of elements:
# + [markdown] _uuid="dd540b0376ce7694168c779cae03a39017fd51c1"
#
# <img src = "https://ibm.box.com/shared/static/apxe8l3w42f597yjhizg305merlm4ijf.png" width = 750, align = "center"></a>
#
# + [markdown] _uuid="0d96c9877e8abe79a58a51775cf20dd8e1d1dd54"
# Consider the following tuple:
# + _uuid="7063320b7b5cecd4836ff08dc7b0edd617a164fe"
Ratings =(0,9,6,5,10,8,9,6,2)
# + [markdown] _uuid="c78630317292fba4f5db364c73d8aab37b7470bc"
# We can assign the tuple to a 2nd variable:
#
# + _uuid="90332267a3407ae94d0ac7f24b50595c4cf4fcce"
Ratings1=Ratings
Ratings
# + [markdown] _uuid="ce3b005e5317148a78bfc9f4422e4ae782fc1d64"
# We can sort the values in a tuple and save it to a new tuple:
# + _uuid="a56a51d77dbedcc9fd29f1106211d0620781d9eb"
RatingsSorted=sorted(Ratings )
RatingsSorted
# + [markdown] _uuid="113738f360ae3fc640a9ee31a23e82054cd4f871"
# A tuple can contain another tuple as well as other more complex data types. This process is called 'nesting'. Consider the following tuple with several elements:
# + _uuid="fdda6c60da5c0d8bc087526a552d0425cd0eeaf0"
NestedT =(1, 2, ("pop", "rock") ,(3,4),("disco",(1,2)))
# + [markdown] _uuid="3023ed5fd6b3e7900bb8bccb14c59520a322bb11"
# Each element in the tuple including other tuples can be obtained via an index as shown in the figure:
# + [markdown] _uuid="f2d12d52a76e69db2bf79e483e1e2887b02b6429"
# <img src = "https://ibm.box.com/shared/static/estqe2bczv5weocc4ag4mx9dtqy952fp.png" width = 750, align = "center"></a>
#
# + _uuid="bae8583a649395c18de36c85246886dde7a44756"
print("Element 0 of Tuple: ", NestedT[0])
print("Element 1 of Tuple: ", NestedT[1])
print("Element 2 of Tuple: ", NestedT[2])
print("Element 3 of Tuple: ", NestedT[3])
print("Element 4 of Tuple: ", NestedT[4])
# + [markdown] _uuid="ec5338996bfa1507a2cbf78e42c67b94aa309c39"
# We can use the second index to access other tuples as demonstrated in the figure:
# + [markdown] _uuid="2146f7c124205ea44ac63430c8c5481279861480"
# <img src = "https://ibm.box.com/shared/static/j1orgjuasaaj3d0feymedrnoqv8trqyo.png" width = 750, align = "center"></a>
#
# + [markdown] _uuid="2f171fea0db2939d2daeb71bf06c16f46840dcdb"
# We can access the nested tuples :
# + _uuid="4c64f1743a44f9fa51245d5c5506d62abab39828"
print("Element 2,0 of Tuple: ", NestedT[2][0])
print("Element 2,1 of Tuple: ", NestedT[2][1])
print("Element 3,0 of Tuple: ", NestedT[3][0])
print("Element 3,1 of Tuple: ", NestedT[3][1])
print("Element 4,0 of Tuple: ", NestedT[4][0])
print("Element 4,1 of Tuple: ", NestedT[4][1])
# + [markdown] _uuid="f4fd365c326442de6bfaf5384a7ae2e5b122f3e0"
# We can access strings in the second nested tuples using a third index:
# + _uuid="a7c19345fcdef3acadee85b810551ecf9e8b6d5e"
NestedT[2][1][0]
# + _uuid="3ea5ce3e1702731cf94603d6f08beca8dec28cb4"
NestedT[2][1][1]
# + [markdown] _uuid="6f7e7418cfcdf16a5df79fc919281d6f248faf29"
# We can use a tree to visualise the process. Each new index corresponds to a deeper level in the tree:
# + [markdown] _uuid="6daa3bd7da43c892faa0763f380ae424dfda9ce8"
# <img src ='https://ibm.box.com/shared/static/vjvsygpzpwcr6czsucgno1wukyhk5vxq.gif' width = 750, align = "center"></a>
# + [markdown] _uuid="9299b89d7e37be102e529dd7f6406714bf79a527"
# Similarly, we can access elements nested deeper in the tree with a fourth index:
# + _uuid="ecee7bf25cc02826490200fd4ae3ddd86c0c72af"
NestedT[4][1][0]
# + _uuid="bad4eaef0780a0d7c48481f8aa518576bed33223"
NestedT[4][1][1]
# + [markdown] _uuid="9987c3c1e834a5d1ca234f716a7248785a83ddff"
# The following figure shows the relationship of the tree and the element **NestedT[4][1][1]**:
# + [markdown] _uuid="b58e4f365882ad4854ac7f1c54a5fd649428a684"
# <img src ='https://ibm.box.com/shared/static/9y5s7515zwzc9v6i4f67yj3np2fv9evs.gif'width = 750, align = "center"></a>
# + [markdown] _uuid="c383591d4c30178473a6dfc4ad1dc66ed63ace72"
# <a id="ref2"></a>
# <h2 align=center> Quiz on Tuples </h2>
# + [markdown] _uuid="ca171a1e58d7efe3723b597cb96072adcf0b6241"
# Consider the following tuple:
# + _uuid="b853e24e981250dd82f4a5fb0b1f1af799561736"
genres_tuple = ("pop", "rock", "soul", "hard rock", "soft rock", \
"R&B", "progressive rock", "disco")
genres_tuple
# + [markdown] _uuid="2f08c52aebcfaf907e5fee347797872c21f4cf4d"
# #### Find the length of the tuple, "genres_tuple":
# + _uuid="7cdcdc0c6bf69d2c23bf1dd97a2a4dda849a0eaf"
# + [markdown] _uuid="900e55299f16b2ff1f8d983d2fb31224cfcaa99d"
# <div align="right">
# <a href="#String1" class="btn btn-default" data-toggle="collapse">Click here for the solution</a>
#
# </div>
#
#
# <div id="String1" class="collapse">
#
# "len(genres_tuple)"
# <a ><img src = "https://ibm.box.com/shared/static/n4969qbta8hhsycs2dc4n8jqbf062wdw.png" width = 1100, align = "center"></a>
# ```
#
#
# ```
# </div>
#
# + [markdown] _uuid="2d32affc27daa7e740d46934a78173c7fe183c45"
# #### Access the element, with respect to index 3:
# + _uuid="1e329438819dd6fd9518dda215a58fb7d9ed4f94"
# + [markdown] _uuid="09841a5db2d7ed78ca52330103cd813ab9baac16"
# <div align="right">
# <a href="#2" class="btn btn-default" data-toggle="collapse">Click here for the solution</a>
#
# </div>
#
#
# <div id="2" class="collapse">
#
#
# <a ><img src = "https://ibm.box.com/shared/static/s6r8v2uy6wifmaqv53w6adabqci47zme.png" width = 1100, align = "center"></a>
#
# </div>
#
#
#
# + [markdown] _uuid="4a17444679c1504a15654503781b5fe51439816f"
# #### Use slicing to obtain indexes 3, 4 and 5:
# + _uuid="d0a33d8ec31c6776949c0b04f5c761819592cb30"
# + [markdown] _uuid="23fc678dcbf715e2e53e0e69a576c4851863cee1"
#
# <div align="right">
# <a href="#3" class="btn btn-default" data-toggle="collapse">Click here for the solution</a>
#
# </div>
#
#
# <div id="3" class="collapse">
#
#
# <a ><img src = "https://ibm.box.com/shared/static/nqo84vydw6eixdex0trybuvactcw7ffi.png" width = 1100, align = "center"></a>
#
# </div>
# + [markdown] _uuid="ceb5bf43fb50ec70b3687890e752fc8af03d5876"
# #### Find the first two elements of the tuple "genres_tuple":
# + _uuid="003a786a98ac6f1d13c0e2231d967a01a6cc3086"
# + [markdown] _uuid="a188f9463acc7a4d792f72c42656c28a89e92a49"
# <div align="right">
# <a href="#q5" class="btn btn-default" data-toggle="collapse">Click here for the solution</a>
# </div>
# <div id="q5" class="collapse">
# ```
# genres_tuple[0:2]
#
# ```
#
# + [markdown] _uuid="291ebeaec57433d1cdb45051ee5ca3ec3daa71be"
# #### Find the first index of 'disco':
# + _uuid="4ce46f2755e10f25f54080f1ce77628f6db2d570"
# + [markdown] _uuid="1968147021882aa834e1b69e6f08c7b64357cc19"
# <div align="right">
# <a href="#q6" class="btn btn-default" data-toggle="collapse">Click here for the solution</a>
# </div>
# <div id="q6" class="collapse">
# ```
# genres_tuple.index("disco")
#
# ```
# + [markdown] _uuid="84fa62f76633583737ceec3b3fc27e99520919a5"
# <hr>
# + [markdown] _uuid="52317c3e80cba27f15e28c7761109913e6443315"
# #### Generate a sorted List from the Tuple C_tuple=(-5,1,-3):
# + _uuid="af4a431e98eacc1e51f81383dcbc42ab545eef9e"
# + [markdown] _uuid="ea25ad687c9f216fc89e2f19e223e2b5bdf52369"
# <div align="right">
# <a href="#q7" class="btn btn-default" data-toggle="collapse">Click here for the solution</a>
# </div>
# <div id="q7" class="collapse">
# ```
# C_tuple = (-5,1,-3)
# C_list = sorted(C_tuple)
# C_list
#
# ```
# + [markdown] _uuid="4d538b9092a2fef4058effbce7c0759fbf950abf"
# <hr></hr>
# <div class="alert alert-success alertsuccess" style="margin-top: 20px">
# <h4> [Tip] Saving the Notebook </h4>
#
# Your notebook saves automatically every two minutes. You can manually save by going to **File** > **Save and Checkpoint**. You can come back to this notebook anytime by clicking this notebook under the "**Recent Notebooks**" list on the right-hand side.
#
#
# </div>
# <hr></hr>
# <div class="alert alert-success alertsuccess" style="margin-top: 20px">
# <h4> [Tip] Notebook Features </h4>
#
# Did you know there are other **notebook options**? Click on the **>** symbol to the left of the notebook:
#
# <img src =https://ibm.box.com/shared/static/otu40m0kkzz5hropxah1nnzd2j01itom.png width = 35%>
#
#
# <p></p>
#
# </div>
# <hr></hr>
# + [markdown] _uuid="78e53751d40ea5c18601781349d6f6f17feb47ec"
# <a href="http://cocl.us/NotebooksPython101bottom"><img src = "https://ibm.box.com/shared/static/irypdxea2q4th88zu1o1tsd06dya10go.png" width = 750, align = "center"></a>
#
#
# + [markdown] _uuid="3b6b7616f3a6299e2ee86f515ce2f28ad17b691f"
# # About the Authors:
#
# [<NAME>]( https://www.linkedin.com/in/joseph-s-50398b136/) has a PhD in Electrical Engineering, his research focused on using machine learning, signal processing, and computer vision to determine how videos impact human cognition. Joseph has been working for IBM since he completed his PhD.
#
# + [markdown] _uuid="a0701e9fe536f68d8fbf5482b0f16405e5b3513e"
# <hr>
# Copyright © 2017 [cognitiveclass.ai](cognitiveclass.ai?utm_source=bducopyrightlink&utm_medium=dswb&utm_campaign=bdu). This notebook and its source code are released under the terms of the [MIT License](https://bigdatauniversity.com/mit-license/).
# + _uuid="d8d39fd6146ceeb8ce3418e277f631afd7c2c393"
|
Jupyter notebook/IBM/python for data science/python tuples.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # An old FiPy solution to 1D BL
# I'm not really sure if the result is correct.
# The results is indeed incorrect. See the updated code a few cells below that might work slightly better.
# +
from fipy import *
u = 1.e-3
L = 100.
nx = 200
dt = 200.
dx = L/nx
muo = 0.002
muw = 0.001
mesh = Grid1D(dx = L/nx, nx = nx)
x = mesh.cellCenters
sw = CellVariable(mesh=mesh, name="saturation", hasOld=True, value = 0.)
sw.setValue(1,where = x<=dx)
sw.constrain(1.,mesh.facesLeft)
#sw.constrain(0., mesh.facesRight)
sw.faceGrad.constrain([0], mesh.facesRight)
eq = TransientTerm(coeff=1) + UpwindConvectionTerm(coeff = u
*(sw**2./muw)/(sw**2./muw+(1-sw)**2./muo) * [[1]]) == 0
sw.constrain(1.,mesh.facesLeft)
#sw.constrain(0., mesh.facesRight)
sw.faceGrad.constrain([0], mesh.facesRight)
steps = 100
viewer = Viewer(vars = sw, datamax=1.1, datamin=-0.1)
for step in range(steps):
sw.updateOld()
swres = 1.0e6
while swres > 1e-5:
swres = eq.sweep(dt = dt, var = sw)
print(swres)
viewer.plot()
# -
# ## Updates solution on October 8th, 2019
# +
from fipy import *
# relperm parameters
swc = 0.1
sor = 0.1
krw0 = 0.3
kro0 = 1.0
nw = 2.0
no = 2.0
# domain and boundaries
u = 1.e-3
L = 100.
nx = 50
dt = 200.
dx = L/nx
# fluid properties
muo = 0.002
muw = 0.001
# define the fractional flow functions
def krw(sw):
res = krw0*((sw-swc)/(1-swc-sor))**nw
return res
def kro(sw):
res = kro0*((1-sw-sor)/(1-swc-sor))**no
return res
def fw(sw):
res = krw(sw)/muw/(krw(sw)/muw+kro(sw)/muo)
return res
import matplotlib.pyplot as plt
import numpy as np
sw_plot = np.linspace(swc, 1-sor, 50)
# -
# ## Visualize the relative permeability and fractional flow curves
# +
krw_plot = [krw(sw) for sw in sw_plot]
kro_plot = [kro(sw) for sw in sw_plot]
fw_plot = [fw(sw) for sw in sw_plot]
plt.figure(1)
plt.plot(sw_plot, krw_plot, sw_plot, kro_plot)
plt.show()
plt.figure(2)
plt.plot(sw_plot, fw_plot)
plt.show()
# +
# create the grid
mesh = Grid1D(dx = L/nx, nx = nx)
x = mesh.cellCenters
# create the cell variables and boundary conditions
sw = CellVariable(mesh=mesh, name="saturation", hasOld=True, value = 0.)
# sw.setValue(1,where = x<=dx)
sw.constrain(1-sor,mesh.facesLeft)
#sw.constrain(0., mesh.facesRight)
sw.faceGrad.constrain([0], mesh.facesRight)
# -
# ## Some tests on fipy numerix
krw_cell = krw(sw)
krw_cell.value
# It works fine
# +
eq = TransientTerm(coeff=1) + UpwindConvectionTerm(coeff = u
*(sw**2./muw)/(sw**2./muw+(1-sw)**2./muo) * [[1]]) == 0
sw.constrain(1.,mesh.facesLeft)
#sw.constrain(0., mesh.facesRight)
sw.faceGrad.constrain([0], mesh.facesRight)
steps = 100
viewer = Viewer(vars = sw, datamax=1.1, datamin=-0.1)
for step in range(steps):
sw.updateOld()
swres = 1.0e6
while swres > 1e-5:
swres = eq.sweep(dt = dt, var = sw)
print(swres)
viewer.plot()
|
python/.ipynb_checkpoints/BL_1D_fipy_fixed-checkpoint.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + colab={} colab_type="code" id="5EPC_hl-aFf-"
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
# + colab={"base_uri": "https://localhost:8080/", "height": 204} colab_type="code" executionInfo={"elapsed": 934, "status": "ok", "timestamp": 1569951764035, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "11962716709001588350"}, "user_tz": 240} id="XprT3U8TaFgC" outputId="d5b26016-f237-4486-f61f-efc0542cd4c3"
#Loading Data
df = pd.read_csv('Twotonguetwisters.txt', sep="\n", header=None)
df.columns = ["Description"]
df.head()
#Adding the Categories to the dataframe
categories = ['C1', 'C1', 'C1', 'C2', 'C2']
df['Category'] = categories
df.head()
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 523, "status": "ok", "timestamp": 1569951768924, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "11962716709001588350"}, "user_tz": 240} id="8LHr16xsaFgH" outputId="ab4f0fa1-0a8c-4258-ee81-785d2b0954e7"
print(f'There are {len(df)} records')
# + colab={"base_uri": "https://localhost:8080/", "height": 68} colab_type="code" executionInfo={"elapsed": 288, "status": "ok", "timestamp": 1569951770458, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "11962716709001588350"}, "user_tz": 240} id="985rThXzaFgL" outputId="d09d0670-7ba2-459f-8b9f-fd1e5b56d7b2"
#Checking for missing values, ensuring data is complete.
df.isnull().sum()
# + colab={"base_uri": "https://localhost:8080/", "height": 300} colab_type="code" executionInfo={"elapsed": 385, "status": "ok", "timestamp": 1569951771660, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "11962716709001588350"}, "user_tz": 240} id="DB74SoZHaFgN" outputId="1d8fa1cc-af5b-4ded-8924-47a4d2db650a"
#Visualizing the split between the data categories
sns.countplot(df.Category)
# + colab={"base_uri": "https://localhost:8080/", "height": 68} colab_type="code" executionInfo={"elapsed": 241, "status": "ok", "timestamp": 1569951773478, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "11962716709001588350"}, "user_tz": 240} id="kSc8PxwqaFgP" outputId="1d18d30f-30b9-4e87-ccb9-2f6b9ceafb5d"
df['Category'].value_counts()
# + [markdown] colab_type="text" id="FMFBMLhuaFgR"
# #We see that out of 5, 3 of them are classified as C1.
# #Our model must perform better than 67% to beat random chance.
# + colab={} colab_type="code" id="46PeLdzfaFgS"
#Splitting the data into train & test sets:
from sklearn.model_selection import train_test_split
x = df['Description']
y = df['Category']
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.33, random_state=42)
y_train = np.where(y_train=='C1',1,0)
y_test = np.where(y_test=='C1',1,0)
# + colab={} colab_type="code" id="UQsUcQJYaFgU"
#Creating a bag of words with word counts using SckKit Learn's CountVectorizer
#Defining a tokenizer for use by scikit-learn
import re
re_tok = re.compile('[^a-zA-z]')
def tokenize(s):
return re_tok.sub(' ', s).split()
# + colab={"base_uri": "https://localhost:8080/", "height": 136} colab_type="code" executionInfo={"elapsed": 662, "status": "ok", "timestamp": 1569951781998, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "11962716709001588350"}, "user_tz": 240} id="Hdf9LjpkaFgW" outputId="0d85d042-cd53-4047-89d1-39d820907a57"
#Using SciKit Learn's CountVectorizer
from sklearn.feature_extraction.text import CountVectorizer
#Tokenizing and removing stop words using CountVectorizer's Built-In English List
count_vect = CountVectorizer(tokenizer = tokenize, stop_words='english')
print(count_vect)
# + colab={} colab_type="code" id="pg0dUFQ7aFgZ"
tf_train = count_vect.fit_transform(x_train)
tf_test = count_vect.transform(x_test)
# + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" executionInfo={"elapsed": 305, "status": "ok", "timestamp": 1569951784389, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "11962716709001588350"}, "user_tz": 240} id="dEciMvXhaFgb" outputId="6721bb5c-6a4a-490a-c414-c27cf742bbd6"
print(f'Train term frequency contains {tf_train.shape[0]} documents and {tf_train.shape[1]} tokens.\nEach row represents a document and each column how many times a token appears on the document')
# + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" executionInfo={"elapsed": 657, "status": "ok", "timestamp": 1569951785610, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "11962716709001588350"}, "user_tz": 240} id="m5hiQouCaFgd" outputId="3d614c7d-89a4-40d8-a63a-1674058af6fb"
tf_train
# + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" executionInfo={"elapsed": 235, "status": "ok", "timestamp": 1569951785945, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "11962716709001588350"}, "user_tz": 240} id="znxL2KZFaFgf" outputId="2782c6a2-6326-4a0d-bb68-557d3a5867cb"
tf_train[0]
# + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" executionInfo={"elapsed": 220, "status": "ok", "timestamp": 1569951786732, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "11962716709001588350"}, "user_tz": 240} id="3TIuQexPaFgh" outputId="a212fcb3-bb3f-418b-8de9-43eeda4707d7"
tf_test
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 359, "status": "ok", "timestamp": 1569951787753, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "11962716709001588350"}, "user_tz": 240} id="8SBec5sJaFgk" outputId="aa48d7c1-ca8e-497f-f11d-86650a8fbc8c"
#Getting vocabulary
vocab = count_vect.get_feature_names()
len(vocab)
# + colab={"base_uri": "https://localhost:8080/", "height": 170} colab_type="code" executionInfo={"elapsed": 665, "status": "ok", "timestamp": 1569951789503, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "11962716709001588350"}, "user_tz": 240} id="0zrtcYsLaFgm" outputId="57c80a26-5565-40a0-dc28-0dde9d4a2d0a"
vocab
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 270, "status": "ok", "timestamp": 1569951790592, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "11962716709001588350"}, "user_tz": 240} id="8cM1WvGRaFgo" outputId="488ed0bd-8ac9-4f24-dcf2-2e146645f6e6"
# Displaying the Categories of the training data
y_train
# + colab={"base_uri": "https://localhost:8080/", "height": 85} colab_type="code" executionInfo={"elapsed": 363, "status": "ok", "timestamp": 1569951791779, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "11962716709001588350"}, "user_tz": 240} id="LZv0H0SqaFgr" outputId="b77f8827-338c-47c9-c851-05b60bb4cba5"
#Generating and saving bags of words for training data
pd.DataFrame(tf_train.toarray(), columns=vocab).to_csv(r'Train-bagOfWords.csv')
pd.DataFrame(tf_train.toarray(), columns=vocab).to_csv
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 487, "status": "ok", "timestamp": 1569951793247, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "11962716709001588350"}, "user_tz": 240} id="uYQF1p5CaFgv" outputId="e8792f23-8d7a-42b8-dbf5-bd50c92fc582"
#Displaying the Categories of the test data
y_test
# + colab={"base_uri": "https://localhost:8080/", "height": 68} colab_type="code" executionInfo={"elapsed": 321, "status": "ok", "timestamp": 1569951794280, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "11962716709001588350"}, "user_tz": 240} id="0F2GdnvXaFgx" outputId="a6376e95-cceb-47fa-ce88-b7ab329c8ebf"
#Generating and saving bag of words for test data
pd.DataFrame(tf_test.toarray(), columns=vocab).to_csv(r'Test-bagOfWords.csv')
pd.DataFrame(tf_test.toarray(), columns=vocab).to_csv
# + colab={} colab_type="code" id="ylJqjxAcaFg1"
#Training and testing a multinomial NB model using Scikit-learn and word counts
from sklearn.naive_bayes import MultinomialNB
model = MultinomialNB().fit(tf_train, y_train)
# + colab={} colab_type="code" id="v1-klF0oaFg5"
# get probabilities for positive class
probs = model.predict_proba(tf_test)[:,1]
# + colab={} colab_type="code" id="O53vr60ZaFg8"
# get predictions
predictions = model.predict(tf_test)
# + colab={} colab_type="code" id="lzLjTXFYaFg-"
#Evaluation
from sklearn.metrics import accuracy_score, precision_score, recall_score, roc_auc_score
from sklearn.metrics import confusion_matrix,roc_curve, auc
# + colab={"base_uri": "https://localhost:8080/", "height": 187} colab_type="code" executionInfo={"elapsed": 281, "status": "ok", "timestamp": 1569951813145, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "11962716709001588350"}, "user_tz": 240} id="4meh4DLPaFhE" outputId="57ae03df-1903-4efb-9a4a-9f9093a9fdae"
# Report the predctive performance metrics
# evaluate predictions
accuracy = accuracy_score(y_test, predictions)
precision=precision_score(y_test, predictions, )
recall=recall_score(y_test, predictions)
specificity=recall_score(y_test, predictions, pos_label=0)
roc = roc_auc_score(y_test, probs)
print("\n")
print(f"Accuracy.........: {accuracy * 100.0:.4f}")
print(f"Precision........: {precision *100:.4f}")
print(f"Recall...........: {recall * 100:.4f}")
print(f"FP Rate...........:{(1-specificity) * 100:.4f}")
print(f"ROC AUC (probs)..: {roc:.6f}")
cm = confusion_matrix(y_test, predictions)
print(f"Confusion matrix.:\n {cm}")
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" executionInfo={"elapsed": 364, "status": "ok", "timestamp": 1569951816961, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "11962716709001588350"}, "user_tz": 240} id="Vmt6kPJnaFhN" outputId="5d0b2450-9b1a-4a92-f140-a85b5590858a"
#Predicting whether the following document belongs to category C1 or C2
#"The woodchuck picked peppers and chucked wood."
print(model.predict(count_vect.transform(["The woodchuck picked peppers and chucked wood."])))
# + colab={} colab_type="code" id="E5LTIK9naFhP"
#According to the result, the document would be classified as a C2 Category.
|
Tounge Twisters - Text Classification and Naive Bayes/Group6-NLP-04-Assignment.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.7 Jupyter
# language: python
# name: 3.7jupyter
# ---
import os
#os.chdir('..')
input_folder = "Input"
output_folder = "Output"
basepath = os.getcwd()
input_path = os.path.join(basepath, input_folder)
output_path = os.path.join(basepath, output_folder)
image_input = input_path + '\\'
image_output = output_path + '\\'
print(basepath)
print(input_path)
os.chdir(basepath)
os.chdir('DeOldify')
# +
#HIDE
import io
import PIL
from PIL import Image
from deoldify import device
from deoldify.device_id import DeviceId
import ipywidgets as widgets
from ipywidgets import interact, interactive, fixed, interact_manual
import requests
import torch
import shutil
device.set(device=DeviceId.GPU0)
if not torch.cuda.is_available():
print('GPU not available.')
# +
#HIDE
import fastai
from deoldify.visualize import *
import warnings
warnings.filterwarnings("ignore", category=UserWarning, message=".*?Your .*? set is empty.*?")
# +
#HIDE
colorizer = get_image_colorizer(artistic=True)
# +
#HIDE
grand = widgets.IntSlider(min=15,max=39)
url_pic = widgets.Text(placeholder='Please put url here: ')
file = widgets.FileUpload(
accept='', # Accepted file extension e.g. '.txt', '.pdf', 'image/*', 'image/*,.pdf'
multiple=False # True to accept multiple files upload else False
)
button = widgets.Button(description='Colorize',
tooltip='Click me',
icon='check' # (FontAwesome names without the `fa-` prefix)
)
uploader = widgets.FileUpload(
accept='image/*', # Accepted file extension e.g. '.txt', '.pdf', 'image/*', 'image/*,.pdf'
multiple=True # True to accept multiple files upload else False
)
restoreBox = widgets.Checkbox(
value=False,
description='Restore',
disabled=False,
indent=False
)
def clear_output_folder():
os.chdir(output_path)
file_list = os.listdir()
for image in file_list:
try:
os.remove(image)
except:
os.rmdir(image)
os.chdir(basepath+'\\DeOldify\\result_images')
file_list = os.listdir()
for image in file_list:
try:
os.remove(image)
except:
os.rmdir(image)
os.chdir(basepath)
def clear_input_folder():
os.chdir(input_path)
file_list = os.listdir()
for image in file_list:
try:
os.remove(image)
except:
os.rmdir(image)
os.chdir(basepath)
# +
import anvil.server
anvil.server.connect("NDXLEIAYPNCKPZZZJ67YHJ3C-4DOOCAYYMZN2YEXK")
# +
import anvil.media
import io
import random
from io import BytesIO
from PIL import Image
@anvil.server.callable
def upload_image(file):
try:
os.chdir(input_path)
picture = Image.open(io.BytesIO(file.get_bytes()))
picture = picture.convert("RGB")
picture = picture.save('image.jpg', 'JPEG')
os.chdir(basepath)
return 'File Uploaded.'
except:
clear_input_folder()
return 'File Type Not Supported, Please try a different file.'
@anvil.server.callable
def url_image(source_url):
try:
os.chdir(input_path)
img_data = requests.get(source_url).content
with open('image.jpg', 'wb') as handler:
handler.write(img_data)
return 'URL Image Loaded.'
except:
clear_input_folder()
return 'URL Image Not Supported, Please try a different URL.'
os.chdir(basepath)
@anvil.server.callable
def image_colorise(r_factor):
os.chdir(input_path)
images = os.listdir()
os.chdir(output_path)
for image in images:
p = image_input + 'image.jpg'
os.chdir(basepath+'\\DeOldify')
image_path = colorizer.plot_transformed_image(path=p, render_factor=r_factor,
compare=True, watermarked=False)
picture = Image.open(image_path)
os.chdir(output_path)
picture = picture.save('COLORIZED_' + image)
colored = Image.open('COLORIZED_' + image)
byte_io = BytesIO()
colored.save(byte_io, 'JPEG')
os.chdir(basepath)
clear_input_folder()
clear_output_folder()
return anvil.BlobMedia("image/jpeg", byte_io.getvalue() , name='Colorized_Image' +
str(random.randint(0,9999)) + '.jpg')
# -
anvil.server.wait_forever()
|
WebserverVersion3.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Fibonacci
#Esta línea se ocupa para que las gráficas que se generen queden embebidas dentro de la página
# %pylab inline
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def fibonacciIterativo(ene):
a = 0
b = 1
contador = 1
fibo = 0
while contador < ene:
fibo = b + a
a = b
b = fibo
contador += 1
return contador
# +
#Datos de entrada, se generan 10 puntos entre 0 y 5
x = linspace(0, 100, 100)
y = []
for val in x:
y.append(str(fibonacciIterativo(int(val))))
fig, ax = plt.subplots(facecolor='w', edgecolor='k')
ax.plot(x, y, marker="o", color="r", linestyle='None')
ax.grid(True)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.grid(True)
plt.title('Puntos')
plt.show()
fig.savefig("graph.png")
# -
def fibonacciRecursivo(ene):
if (ene == 0):
return 1
if (ene == 1):
return 1
return fibonacciRecursivo(ene - 1) + fibonacciRecursivo(ene - 2)
# +
#Datos de entrada, se generan 10 puntos entre 0 y 5
x = linspace(0, 35, 35)
y = []
for val in x:
y.append(str(fibonacciRecursivo(int(val))))
fig, ax = plt.subplots(facecolor='w', edgecolor='k')
ax.plot(x, y, marker="o", color="r", linestyle='None')
ax.grid(True)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.grid(True)
plt.title('Fibonacci recursivo')
plt.show()
fig.savefig("graph.png")
# -
|
EDyA_I/theme_2/code/fibonacci/fibonacci.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:tf14_p36]
# language: python
# name: conda-env-tf14_p36-py
# ---
# # **Traffic Sign Recognition**
#
# ---
#
# **Build a Traffic Sign Recognition Project**
#
# The goals / steps of this project are the following:
# * Load the data set (see below for links to the project data set)
# * Explore, summarize and visualize the data set
# * Design, train and test a model architecture
# * Use the model to make predictions on new images
# * Analyze the softmax probabilities of the new images
# * Summarize the results with a written report
#
#
# [//]: # (Image References)
#
# [image1]: ./examples/visualization.jpg "Visualization"
# [image2]: ./examples/grayscale.jpg "Grayscaling"
# [image3]: ./examples/random_noise.jpg "Random Noise"
# [image4]: ./examples/placeholder.png "Traffic Sign 1"
# [image5]: ./examples/placeholder.png "Traffic Sign 2"
# [image6]: ./examples/placeholder.png "Traffic Sign 3"
# [image7]: ./examples/placeholder.png "Traffic Sign 4"
# [image8]: ./examples/placeholder.png "Traffic Sign 5"
#
# ## Rubric Points
# ### Rubric points [rubric points](https://review.udacity.com/#!/rubrics/481/view).
#
# #### Files Submitted
#
# | Files | Submitted |
# |:---------------------:|:---------------------------------------------:|
# | Ipython notebook with code | Yes |
# | HTML output of the code | Yes |
# | A writeup report (either pdf or markdown) | Yes (Markdown) |
#
#
# ---
# ## Data Set Summary & Exploration
#
# ### 1. Provide a basic summary of the data set. In the code, the analysis should be done using python, numpy and/or pandas methods rather than hardcoding results manually.
#
# I used pandas dataframe to view simple dataset stats
#
# 
#
# ### 2. Include an exploratory visualization of the dataset.
#
# Here is an exploratory visualization of the data set. Following is the categorical overview of the dataset. I've randomly selected for each class.
#
# 
#
# Following is the data distribution of the classes.
#
# 
#
#
# ---
# ## Design and Test a Model Architecture
#
# ### 1. Describe how you preprocessed the image data.
# #### What techniques were chosen and why did you choose these techniques? Consider including images showing the output of each preprocessing technique. Pre-processing refers to techniques such as converting to grayscale, normalization, etc. (OPTIONAL: As described in the "Stand Out Suggestions" part of the rubric, if you generated additional data for training, describe why you decided to generate additional data, how you generated the data, and provide example images of the additional data. Then describe the characteristics of the augmented training set like number of images in the set, number of images for each class, etc.)
#
# As a first step, I decided to augment the dataset to have reasonable enough data
# First I've merged the splitted dataset ( train, valid, test ) again to use them as a image pool to generate new image.
#
# The distribution of the merged image pool look as below.
#
# 
#
# I've sampled images from the pool, applied intensity changes, rotation, translate, sheering transformation.
# Following shows examples of generated images. This way we could consider more reasonable real world examples.
#
# 
#
# Following shows the count of each category in the augmented dataset.
#
# 
#
# Note that orange distribution is the merged image pool
# And the uniform blue distribution is the newly generated augmented dataset
#
# I've splitted the augmented dataset into train / validation / test dataset.
# Each of them also has uniform distribution.
# Getting more data and making each class uniform, it also helps avoiding overfitting.
#
# 
#
# And following shows the categorical sample images of the augmented dataset.
#
# 
#
# I've also considered normalization of images.
# There are couple of ways to do this.
# One could compute overal mean and std from the whole dataset.
# But to cope with more real nature, I've decided to apply in-image normalization.
# I've used tensorflow api for that, rather than doing this directly on numpy array level or using opencv api.
# I've applied the normalization in the my model, so to make it easier to use.
#
# #### Ref. tensorflow.per_image_standardization : https://www.tensorflow.org/versions/r1.0/api_docs/python/tf/image/per_image_standardization
#
#
#
# ### 2. Describe what your final model architecture looks like including model type, layers, layer sizes, connectivity, etc.) Consider including a diagram and/or table describing the final model.
#
# I've used the LeNet MNIST model we've used in the course and tweaked the model as shown below.
# My final model consisted of the following layers:
#
# | Layer | Description |
# |:---------------------:|:---------------------------------------------:|
# | Input | 32x32x3 RGB image |
# | Batch normalization | 32x32x3 RGB image -> 32x32x3 normalized imgs |
# | Convolution 5x5 | 1x1 stride, same padding, outputs 28x28x6 |
# | RELU | |
# | Max pooling | 2x2 stride, outputs 14x14x6 |
# | Convolution 5x5 | 1x1 stride, same padding, outputs 10x10x16 |
# | Max pooling | 2x2 stride, outputs 5x5x16 |
# | Flatten | Flatten. Input = 5x5x16. Output = 400. |
# | Fully connected | 400 x 120 |
# | Apply Dropout | dropout regularization |
# | Fully connected | 120 x 84 |
# | Softmax | 84 x 42 output |
#
#
#
# ### 3. Describe how you trained your model. The discussion can include the type of optimizer, the batch size, number of epochs and any hyperparameters such as learning rate.
#
# I've used the same setup as we've used in the LeNet MNIST model,
# except that I've applied dropout regularization in the first fully connected layer.
# cross_entropy was used as the loss function, and applied Adam optimizaer with rate = 0.001
# as for dropout I've applied about 0.6.
#
# In general, batch_size = 128, epoch = 25 showed a good result.
#
#
# ### 4. Describe the approach taken for finding a solution and getting the validation set accuracy to be at least 0.93. Include in the discussion the results on the training, validation and test sets and where in the code these were calculated. Your approach may have been an iterative process, in which case, outline the steps you took to get to the final solution and why you chose those steps. Perhaps your solution involved an already well known implementation or architecture. In this case, discuss why you think the architecture is suitable for the current problem.
#
# Given the setup explained in above section 3, the training accurary showed following learning curve. The dashed red line is the validation accurary of 0.93
#
# It crossed the requirement around epoch 10.
# after 25 epoch, it shoed 0.947 validation accurary.
#
# 
#
# test_set accurary of the model was 0.875
#
# As it was required in the project description, I've started the training with LeNetLab.
#
# As I don't use graysacle image as input, I've changed the input placeholder tensor so to take 3 channel images.
#
# Then, after several iteration, I've applied image normalization using tensorflow per image standardization.
#
# To make it better learn I've applied dropout regularization on fully connected layers, then I figured it out applying dropout in the first fully connected layer is just good enough. Tried different values for dropout and 0.6 showed a good result.
#
# I computed confusion matrix,
#
# 
#
# As one can see we clearly see the thick diagonal line,
# Therefore, it verifies that the model was trained well.
#
# I also computed confusion matrix over whole merged original dataset.
# (the pickled train / valid / test set )
#
# 
#
# I also computed confusion matrix over whole dataset,
#
# 
#
# In this case, accuracy decreases for the classes that originally had way less samples than the thicker ones. It is related to the dataset quality, as for those classes less accurate, the majority of their sample were artificially generated ones. ( with distortions such as rotation, translate, sheer, intensity change, etc. )
#
# Following is the prediction result of categorically sampled images from the whole augmented dataset.
#
# 
#
#
# ---
# ## Test a Model on New Images
#
# ### 1. Choose five German traffic signs found on the web and provide them in the report. For each image, discuss what quality or qualities might be difficult to classify.
#
# Here are 8 German traffic signs that I found on the web:
#
# 
#
#
#
# ### 2. Discuss the model's predictions on these new traffic signs and compare the results to predicting on the test set. At a minimum, discuss what the predictions were, the accuracy on these new predictions, and compare the accuracy to the accuracy on the test set (OPTIONAL: Discuss the results in more detail as described in the "Stand Out Suggestions" part of the rubric).
#
# Here are the results of the prediction:
#
# 
#
# The model was able to correctly guess 7 of the 8 traffic signs, which gives an accuracy of 87%.
# This compares favorably to the accuracy on the test set of 0.875 ( exactly the same )
#
#
#
# ### 3. Describe how certain the model is when predicting on each of the five new images by looking at the softmax probabilities for each prediction. Provide the top 5 softmax probabilities for each image along with the sign type of each probability. (OPTIONAL: as described in the "Stand Out Suggestions" part of the rubric, visualizations can also be provided such as bar charts)
#
# Here are the results of the prediction with top 5 ranks for each images.
#
# 
#
# 
#
#
# ---
# ## (Optional) Visualizing the Neural Network (See Step 4 of the Ipython notebook for more details)
# ### 1. Discuss the visual output of your trained network's feature maps. What characteristics did the neural network use to make classifications?
#
# I changed the featuremap code a bit, so it can select any tensorflow element in the graph given a name
#
# 
#
# Following shows feature map of conv2d of my selected 8 traffic signs.
#
# 
#
# As we can see, the convolutional layer is well activated so it can capture dominant visual aspect of given input images.
#
# 
#
# The convolutional layer passing max pool, it became smaller while it captures the dominant visual aspect still
#
# 
#
# 
#
#
|
Writeup_md.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# # In-Class Coding Lab: Lists
#
# The goals of this lab are to help you understand:
#
# - List indexing and slicing
# - List methods such as insert, append, find, delete
# - How to iterate over lists with loops
#
# ## Python Lists work like Real-Life Lists
#
# In real life, we make lists all the time. To-Do lists. Shopping lists. Reading lists. These lists are collections of items, for example here's my shopping list:
#
# ```
# Milk, Eggs, Bread, Beer
# ```
#
# There are 4 items in this list.
#
# Likewise, we can make a similar list in Python, and count the number of items in the list using the `len()` function:
shopping_list = [ 'Milk', 'Eggs', 'Bread', 'Beer']
item_count = len(shopping_list)
print("List: %s has %d items" % (shopping_list, item_count))
# ## Enumerating Your List Items
#
# In real-life, we *enumerate* lists all the time. We go through the items on our list one at a time and make a decision, for example: "Did I add that to my shopping cart yet?"
#
# In Python we go through items in our lists with the `for` loop. We use `for` because the number of items in pre-determined and thus a **definite** loop is the appropriate choice.
#
# Here's an example:
for item in shopping_list:
print("I need to buy some %s " % (item))
# ## Now You Try It!
#
# Write code in the space below to print each stock on its own line.
stocks = [ 'IBM', 'AAPL', 'GOOG', 'MSFT', 'TWTR', 'FB']
#TODO: Write code here
# ## Indexing Lists
#
# Sometimes we refer to our items by their place in the list. For example "Milk is the first item on the list" or "Beer is the last item on the list."
#
# We can also do this in Python, and it is called *indexing* the list.
#
# **IMPORTANT** The first item in a Python lists starts at index **0**.
print("The first item in the list is:", shopping_list[0])
print("The last item in the list is:", shopping_list[3])
print("This is also the last item in the list:", shopping_list[-1])
print("This is the second to last item in the list:", shopping_list[-2])
# ## For Loop with Index
#
# You can also loop through your Python list using an index. In this case we use the `range()` function to determine how many times we should loop:
for i in range(len(shopping_list)):
print("I need to buy some %s " % (shopping_list[i]))
# ## Now You Try It!
#
# Write code to print the 2nd and 4th stocks in the list variable `stocks`. For example:
#
# `AAPL MSFT`
# +
#TODO: Write code here
# -
# ## Lists are Mutable
#
# Unlike strings, lists are mutable. This means we can change a value in the list.
#
# For example, I want `'Craft Beer'` not just `'Beer'`:
print(shopping_list)
shopping_list[-1] = 'Craft Beer'
print(shopping_list)
# ## List Methods
#
# In your readings and class lecture, you encountered some list methods. These allow us to maniupulate the list by adding or removing items.
# +
print("Shopping List: %s" %(shopping_list))
print("Adding 'Cheese' to the end of the list...")
shopping_list.append('Cheese') #add to end of list
print("Shopping List: %s" %(shopping_list))
print("Adding 'Cereal' to position 0 in the list...")
shopping_list.insert(0,'Cereal') # add to the beginning of the list (position 0)
print("Shopping List: %s" %(shopping_list))
print("Removing 'Cheese' from the list...")
shopping_list.remove('Cheese') # remove 'Cheese' from the list
print("Shopping List: %s" %(shopping_list))
print("Removing item from position 0 in the list...")
del shopping_list[0] # remove item at position 0
print("Shopping List: %s" %(shopping_list))
# -
# ## Now You Try It!
#
# Write a program to remove the following stocks: `IBM` and `TWTR`
#
# Then add this stock to the end `NFLX` and this stock to the beginning `TSLA`
#
# Print your list when you are done. It should look like this:
#
# `['TSLA', 'AAPL', 'GOOG', 'MSFT', 'FB', 'NFLX']`
#
# TODO: Write Code here
# ## Sorting
#
# Since Lists are mutable. You can use the `sort()` method to re-arrange the items in the list alphabetically (or numerically if it's a list of numbers)
print("Before Sort:", shopping_list)
shopping_list.sort()
print("After Sort:", shopping_list)
# # Putting it all together
#
# Winning Lotto numbers. When the lotto numbers are drawn, they are in any order, when they are presented they're allways sorted. Let's write a program to input 5 numbers then output them sorted
#
# ```
# 1. for i in range(5)
# 2. input a number
# 3. append the number you input to the lotto_numbers list
# 4. sort the lotto_numbers list
# 5. print the lotto_numbers list like this:
# 'today's winning numbers are [1, 5, 17, 34, 56]'
# ```
# +
## TODO: Write program here:
lotto_numbers = [] # start with an empty list
|
content/lessons/09/Class-Coding-Lab/CCL-Lists.ipynb
|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] colab_type="text" id="view-in-github"
# <a href="https://colab.research.google.com/github/ZER-0-NE/OrcaCNN-Demo/blob/master/Orca_detection_small_more_epochs.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + colab={} colab_type="code" id="5PbC5DJnAZoB"
from google.colab import auth
auth.authenticate_user()
# + colab={"base_uri": "https://localhost:8080/", "height": 394} colab_type="code" id="W58rhRHGBJ2u" outputId="3ab3e346-9a1f-4268-bc77-196c903694f6"
# !pip install PyDrive
from pydrive.auth import GoogleAuth
from pydrive.drive import GoogleDrive
from google.colab import auth
from oauth2client.client import GoogleCredentials
# Authenticate and create the PyDrive client.
# This only needs to be done once in a notebook.
auth.authenticate_user()
gauth = GoogleAuth()
gauth.credentials = GoogleCredentials.get_application_default()
drive = GoogleDrive(gauth)
# + colab={"base_uri": "https://localhost:8080/", "height": 2516} colab_type="code" id="2HKwXDKnBKSV" outputId="f6a31eb9-2210-419a-c5cb-6205f7617c66"
# https://drive.google.com/open?id=1l5Ct50CYl2o3MDKosn-omi2M2CYIdstn
fileId = drive.CreateFile({'id': '1l5Ct50CYl2o3MDKosn-omi2M2CYIdstn'}) #DRIVE_FILE_ID is file id example: 1iytA1n2z4go3uVCwE_vIKouTKyIDjEq
print(fileId['title']) # folder_data.zip
fileId.GetContentFile('val_orca.zip') # Save Drive file as a local file
# !unzip val_orca.zip -d ./
# + colab={"base_uri": "https://localhost:8080/", "height": 9945} colab_type="code" id="SG3jQ7oeBMmF" outputId="5a36a6d1-93c9-450e-8f72-26fbe59fb537"
# https://drive.google.com/open?id=1-ZxyOc9JMkl8-U5z0ybXjn1rUjldx2TL
fileId = drive.CreateFile({'id': '1-ZxyOc9JMkl8-U5z0ybXjn1rUjldx2TL'}) #DRIVE_FILE_ID is file id example: 1iytA1n2z4go3uVCwE_vIKouTKyIDjEq
print(fileId['title']) # folder_data.zip
fileId.GetContentFile('train_orca.zip') # Save Drive file as a local file
# !unzip train_orca.zip -d ./
# + colab={"base_uri": "https://localhost:8080/", "height": 3349} colab_type="code" id="y-smM91pvAjP" outputId="cef740b3-7320-4ef3-dd04-4e9c9862981d"
# https://drive.google.com/open?id=1t68WsTO8E1ORUYBZqAmUhozg5Vk-aRwl
fileId = drive.CreateFile({'id': '1t68WsTO8E1ORUYBZqAmUhozg5Vk-aRwl'}) #DRIVE_FILE_ID is file id example: 1iytA1n2z4go3uVCwE_vIKouTKyIDjEq
print(fileId['title']) # folder_data.zip
fileId.GetContentFile('test_orca.zip') # Save Drive file as a local file
# !unzip test_orca.zip -d ./
# + colab={} colab_type="code" id="DB8SyEw0BPWV"
from keras import models
from keras import layers
from keras import optimizers
from keras.applications import VGG16
from keras.applications import InceptionResNetV2
import sys, numpy
import os
from keras.preprocessing.image import ImageDataGenerator
from keras import optimizers, regularizers
from keras.models import Sequential
from keras.layers import Dropout, Flatten, Dense, Activation, Input
from keras import callbacks, regularizers
from keras.models import load_model
import matplotlib.pyplot as plt
from keras.layers.normalization import BatchNormalization
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D
from keras.layers import Activation, Dropout, Flatten, Dense
from keras import backend as K
from keras.engine import Model
from keras.layers import Conv2D, GlobalAveragePooling2D
from sklearn import metrics
# + colab={} colab_type="code" id="fa3_Fx3ZBRll"
train_data_path = 'train_dan/'
validation_data_path = 'val_dan/'
test_data_path = 'test_orca/'
#Parametres
img_width, img_height = 300, 500
nb_train_samples = 579
nb_validation_samples = 142
epochs = 150
batch_size = 32
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="51BYfVEGOfKx" outputId="ec20f5b2-aac3-45c2-e3e9-b49244f29718"
# cd /content/
# + colab={} colab_type="code" id="e6zJN7IzLaC5"
def precision(y_true, y_pred):
"""Precision metric.
Only computes a batch-wise average of precision.
Computes the precision, a metric for multi-label classification of
how many selected items are relevant.
"""
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
def recall(y_true, y_pred):
"""Recall metric.
Only computes a batch-wise average of recall.
Computes the recall, a metric for multi-label classification of
how many relevant items are selected.
"""
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
def fbeta_score(y_true, y_pred, beta=1):
"""Computes the F score.
The F score is the weighted harmonic mean of precision and recall.
Here it is only computed as a batch-wise average, not globally.
This is useful for multi-label classification, where input samples can be
classified as sets of labels. By only using accuracy (precision) a model
would achieve a perfect score by simply assigning every class to every
input. In order to avoid this, a metric should penalize incorrect class
assignments as well (recall). The F-beta score (ranged from 0.0 to 1.0)
computes this, as a weighted mean of the proportion of correct class
assignments vs. the proportion of incorrect class assignments.
With beta = 1, this is equivalent to a F-measure. With beta < 1, assigning
correct classes becomes more important, and with beta > 1 the metric is
instead weighted towards penalizing incorrect class assignments.
"""
if beta < 0:
raise ValueError('The lowest choosable beta is zero (only precision).')
# If there are no true positives, fix the F score at 0 like sklearn.
if K.sum(K.round(K.clip(y_true, 0, 1))) == 0:
return 0
p = precision(y_true, y_pred)
r = recall(y_true, y_pred)
bb = beta ** 2
fbeta_score = (1 + bb) * (p * r) / (bb * p + r + K.epsilon())
return fbeta_score
def fmeasure(y_true, y_pred):
"""Computes the f-measure, the harmonic mean of precision and recall.
Here it is only computed as a batch-wise average, not globally.
"""
return fbeta_score(y_true, y_pred, beta=1)
def sensitivity(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
return true_positives / (possible_positives + K.epsilon())
def specificity(y_true, y_pred):
true_negatives = K.sum(K.round(K.clip((1-y_true) * (1-y_pred), 0, 1)))
possible_negatives = K.sum(K.round(K.clip(1-y_true, 0, 1)))
return true_negatives / (possible_negatives + K.epsilon())
# + colab={"base_uri": "https://localhost:8080/", "height": 938} colab_type="code" id="wRuHIEsPBTSn" outputId="314918a5-486f-4c2d-e983-f53995797a31"
if K.image_data_format() == 'channels_first':
input_shape = (3, img_width, img_height)
else:
input_shape = (img_width, img_height, 3)
# import keras_metrics
model = Sequential()
model.add(Conv2D(128, (5, 5), padding = 'same', strides = 2, input_shape=input_shape))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.5))
model.add(Conv2D(64, (5, 5), padding = 'same', strides = 2))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.6))
model.add(Conv2D(64, (5, 5), padding = 'same', strides = 2))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.6))
model.add(Conv2D(64, (5, 5), padding = 'same', strides = 2))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.6))
model.add(Flatten())
model.add(Dense(32))
model.add(Activation('relu'))
model.add(Dropout(0.7))
model.add(Dense(1))
model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer=optimizers.Adam(lr=3e-4),
metrics=['accuracy'])
# model = load_model('orca_detection.h5')
model.summary()
# + colab={"base_uri": "https://localhost:8080/", "height": 5171} colab_type="code" id="SqJda_CYLjRF" outputId="50d109e2-3c0f-494b-9c19-7fe17e4c3cd7"
from keras.callbacks import ReduceLROnPlateau, ModelCheckpoint
checkpoint = ModelCheckpoint(filepath='checkpoint_adam-{epoch:02d}-{val_loss:.2f}.h5', monitor='val_loss', verbose=0, save_best_only=True)
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.1,
patience=100, min_lr=1e-8)
train_datagen = ImageDataGenerator(rescale=1. / 255,
shear_range=0.2,
zoom_range=0.2)
# this is the augmentation configuration we will use for testing:
# only rescaling
test_datagen = ImageDataGenerator(rescale=1. / 255)
# Change the batchsize according to your system RAM
train_batchsize = 32
val_batchsize = 32
train_generator = train_datagen.flow_from_directory(
train_data_path,
target_size=(img_width, img_height),
batch_size=train_batchsize,
class_mode='binary',
shuffle=True)
# train_generator.reset()
# validation_generator.reset()
validation_generator = test_datagen.flow_from_directory(
validation_data_path,
target_size=(img_width, img_height),
batch_size=val_batchsize,
class_mode='binary',
shuffle=False)
# validation_generator.reset()
history = model.fit_generator(
train_generator,
steps_per_epoch=nb_train_samples // batch_size,
epochs=epochs,
validation_data=validation_generator,
validation_steps=nb_validation_samples // batch_size,
callbacks=[checkpoint, reduce_lr])
# model.save_weights('orca_detection_3.h5')
model.save('orca_detection_adam_.h5')
# + colab={"base_uri": "https://localhost:8080/", "height": 545} colab_type="code" id="1_EJI-qst21a" outputId="22eb36a2-16b3-4d3f-8ddc-a8eb2fe9f985"
# loss and accuracy curves.
acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(acc))
plt.plot(epochs, acc, 'b', label='Training acc')
plt.plot(epochs, val_acc, 'r', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'b', label='Training loss')
plt.plot(epochs, val_loss, 'r', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
# + colab={} colab_type="code" id="Fqf1Gxbut_jS"
# !mkdir test_orca
# # !rm -rf test_orca1
# + colab={"base_uri": "https://localhost:8080/", "height": 68} colab_type="code" id="52KIIbGKt6am" outputId="bd1a02de-bf42-4e7f-ec15-9e06a4241b50"
# !gsutil -m cp -R orca_detection_adam_.h5 gs://ngos-gsoc-axiom
# + colab={"base_uri": "https://localhost:8080/", "height": 28101} colab_type="code" id="FDG65w5wUW9P" outputId="d1435485-a297-44e7-ba31-68cf20fbc3f2"
# !gsutil -m cp -R neg gs://ngos-gsoc-axiom/2008_
# + colab={} colab_type="code" id="WNrsit1_Cvs9"
import numpy as np
from keras.preprocessing import image
test_data_path = 'test_orca/'
# model = load_model('orca_detection_20_1.h5', custom_objects= {'sensitivity': sensitivity})
validation_datagen = ImageDataGenerator(rescale=1. / 255)
# Create a generator for prediction
validation_generator = validation_datagen.flow_from_directory(
test_data_path,
target_size=(img_width, img_height),
batch_size=32,
class_mode='binary',
shuffle=False)
# Get the filenames from the generator
fnames = validation_generator.filenames
# Get the ground truth from generator
ground_truth = validation_generator.classes
# Get the label to class mapping from the generator
label2index = validation_generator.class_indices
# Getting the mapping from class index to class label
idx2label = dict((v,k) for k,v in label2index.items())
# Get the predictions from the model using the generator
predictions = model.predict_generator(validation_generator, steps=validation_generator.samples/validation_generator.batch_size,verbose=1)
predicted_classes = (predictions < 0.5).astype(np.int)
# predicted_classes = np.argmax(predictions,axis=1)
# print(predicted_classes)
# print(ground_truth)
errors = np.where(predicted_classes != ground_truth)[0]
# print(errors)
print("No of errors = {}/{}".format(len(errors),validation_generator.samples))
# Show the errors
# for i in range(len(errors)):
# pred_class = np.argmax(predictions[errors[i]])
# pred_label = idx2label[pred_class]
# title = 'Original label:{}, Prediction :{}, confidence : {:.3f}'.format(
# fnames[errors[i]].split('/')[0],
# pred_label,
# predictions[errors[i]][pred_class])
# original = image.load_img('{}/{}'.format(test_data_path,fnames[errors[i]]))
# plt.figure(figsize=[7,7])
# plt.axis('off')
# plt.title(title)
# plt.imshow(original)
# plt.show()
# + colab={} colab_type="code" id="CRbbVAd9X8YM"
from sklearn.metrics import confusion_matrix
import numpy as np
test_generator = ImageDataGenerator()
test_data_generator = test_generator.flow_from_directory(
test_data_path,
target_size=(img_width, img_height),
batch_size=32,
shuffle=False)
test_steps_per_epoch = numpy.math.ceil(test_data_generator.samples / test_data_generator.batch_size)
model_path = 'orca_detection_adam_.h5'
model = load_model(model_path)
predictions = model.predict_generator(test_data_generator, steps=test_steps_per_epoch)
predicted_classes = (predictions < 0.5).astype(np.int)
# print(predictions)
# Get most likely class
# predicted_classes = numpy.argmax(predictions, axis=1)
# print(predicted_classes)
true_classes = test_data_generator.classes
class_labels = list(test_data_generator.class_indices.keys())
report = metrics.classification_report(true_classes, predicted_classes, target_names=class_labels)
print(report)
cm = confusion_matrix(true_classes, predicted_classes)
total = sum(sum(cm))
acc = (cm[0, 0] + cm[1, 1]) / total
sensitivity = cm[0, 0] / (cm[0, 0] + cm[0, 1])
specificity = cm[1, 1] / (cm[1, 0] + cm[1, 1])
# show the confusion matrix, accuracy, sensitivity, and specificity
print(cm)
print("acc: {:.4f}".format(acc))
print("sensitivity: {:.4f}".format(sensitivity))
print("specificity: {:.4f}".format(specificity))
# + colab={} colab_type="code" id="oCX3mq4cKLj0"
# !mkdir t_orca
# # !rm -rf test_orca1
# + colab={} colab_type="code" id="jh9y0y18KRji"
# !gsutil -m cp -R gs://ngos-gsoc-axiom/PreProcessed_image_1s/2007\ field\ acoustic\ recordings t_orca/
# + colab={} colab_type="code" id="wo6jygSpKhPI"
# !mkdir test1_orca
# # !rm -rf test1_orca
# + colab={} colab_type="code" id="L_7-nCiHKcqL"
import shutil
import os
file_path_image= 't_orca/2008 field acoustic recordings'
for dirs, subdirs, files in os.walk(file_path_image):
for file in files:
f_name = os.path.join(dirs, file)
# print(f_name)
shutil.copy(f_name, 'test1_orca')
# + colab={"base_uri": "https://localhost:8080/", "height": 136} colab_type="code" id="8qsahyN_hKgE" outputId="e800e981-f18c-4a69-efd1-d0efc775ec3c"
# # !pip3 install pillow
from keras.models import load_model
from keras.preprocessing import image
import numpy as np
import os
from PIL import Image
from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img
from keras.models import Sequential
# image folder
# folder_path = 'test/PreProcessed_image_1s'
folder_path = 'test1_orca'
# path to model
# model_path = 'checkpoint_SGD-47-0.32.h5'
# dimensions of images
# img_width, img_height = 224, 224
# load the trained model
# model = load_model('orca_detectionsgd_.h5')
# load all images into a list
images = []
# file_path_image= 'test/PreProcessed_image_1s'
for dirs, subdirs, files in os.walk(folder_path):
for file in files:
f_name = os.path.join(dirs, file)
img = image.load_img(f_name, target_size=(img_width, img_height))
img = img_to_array(img)
img = np.expand_dims(img, axis=0)
images.append(img)
# print(images)
# stack up images list to pass for prediction
images = np.vstack(images)
classes = model.predict_classes(images, batch_size=10)
print(classes)
# print(classes[0])
# print(classes[0][0])
# + colab={} colab_type="code" id="p5XQ5iOpLDmP"
# !mkdir pos neg
# # !rm -rf pos neg
# + colab={} colab_type="code" id="xLUWIsw7LPp0"
f = []
for i in os.listdir('test1_orca'):
f.append(i)
# + colab={} colab_type="code" id="In2VtVAqLU8y"
folder_path = 'test1_orca/'
import shutil, os
i=0
for i in range(len(classes)):
f_n = os.path.join(folder_path, f[i])
# print(f_n)
# print(classes[i][0])
if classes[i][0] == 1:
shutil.copy(f_n, 'pos')
elif classes[i][0] == 0:
shutil.copy(f_n, 'neg')
# + colab={} colab_type="code" id="-n4MZWRiLa34"
# !mkdir pos neg
# + colab={} colab_type="code" id="S45JUGH8Lm3h"
# !rm -rf pos neg
# + colab={"base_uri": "https://localhost:8080/", "height": 2992} colab_type="code" id="NB9Lk8E-Mlpv" outputId="e303b0c3-045a-403d-ef81-d377c16a4f1e"
# !ls -l pos
# + colab={} colab_type="code" id="8uJQuV_qipY3"
|
app/Detection/Labelling/Orca_detection_small_more_epochs.ipynb
|