code stringlengths 38 801k | repo_path stringlengths 6 263 |
|---|---|
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## The Transformer Network for the Traveling Salesman Problem
#
# <NAME>, <NAME>, Feb 2021<br>
#
# Arxiv : https://arxiv.org/pdf/2103.03012.pdf<br>
# Talk : https://ipam.wistia.com/medias/0jrweluovs<br>
# Slides : https://t.co/ySxGiKtQL5<br>
#
# This code visualizes transformer and concorde solutions
#
# +
###################
# Libs
###################
import torch
import torch.nn as nn
import time
import argparse
import os
import datetime
from torch.distributions.categorical import Categorical
# visualization
# %matplotlib inline
from IPython.display import set_matplotlib_formats, clear_output
set_matplotlib_formats('png2x','pdf')
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
try:
import networkx as nx
from scipy.spatial.distance import pdist, squareform
from concorde.tsp import TSPSolver # !pip install -e pyconcorde
except:
pass
import warnings
warnings.filterwarnings("ignore", category=UserWarning)
# +
###################
# Hardware : CPU / GPU(s)
###################
device = torch.device("cpu"); gpu_id = -1 # select CPU
gpu_id = '0' # select a single GPU
#gpu_id = '2,3' # select multiple GPUs
os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_id)
if torch.cuda.is_available():
device = torch.device("cuda")
print('GPU name: {:s}, gpu_id: {:s}'.format(torch.cuda.get_device_name(0),gpu_id))
print(device)
# +
###################
# Hyper-parameters
###################
class DotDict(dict):
def __init__(self, **kwds):
self.update(kwds)
self.__dict__ = self
args = DotDict()
args.nb_nodes = 20 # TSP20
args.nb_nodes = 50 # TSP50
#args.nb_nodes = 100 # TSP100
args.bsz = 512 # TSP20 TSP50
args.dim_emb = 128
args.dim_ff = 512
args.dim_input_nodes = 2
args.nb_layers_encoder = 6
args.nb_layers_decoder = 2
args.nb_heads = 8
args.nb_epochs = 10000
args.nb_batch_per_epoch = 2500
args.nb_batch_eval = 20
args.gpu_id = gpu_id
args.lr = 1e-4
args.tol = 1e-3
args.batchnorm = True # if batchnorm=True than batch norm is used
#args.batchnorm = False # if batchnorm=False than layer norm is used
args.max_len_PE = 1000
print(args)
# +
###################
# Network definition
# Notation :
# bsz : batch size
# nb_nodes : number of nodes/cities
# dim_emb : embedding/hidden dimension
# nb_heads : nb of attention heads
# dim_ff : feed-forward dimension
# nb_layers : number of encoder/decoder layers
###################
def compute_tour_length(x, tour):
"""
Compute the length of a batch of tours
Inputs : x of size (bsz, nb_nodes, 2) batch of tsp tour instances
tour of size (bsz, nb_nodes) batch of sequences (node indices) of tsp tours
Output : L of size (bsz,) batch of lengths of each tsp tour
"""
bsz = x.shape[0]
nb_nodes = x.shape[1]
arange_vec = torch.arange(bsz, device=x.device)
first_cities = x[arange_vec, tour[:,0], :] # size(first_cities)=(bsz,2)
previous_cities = first_cities
L = torch.zeros(bsz, device=x.device)
with torch.no_grad():
for i in range(1,nb_nodes):
current_cities = x[arange_vec, tour[:,i], :]
L += torch.sum( (current_cities - previous_cities)**2 , dim=1 )**0.5 # dist(current, previous node)
previous_cities = current_cities
L += torch.sum( (current_cities - first_cities)**2 , dim=1 )**0.5 # dist(last, first node)
return L
class Transformer_encoder_net(nn.Module):
"""
Encoder network based on self-attention transformer
Inputs :
h of size (bsz, nb_nodes+1, dim_emb) batch of input cities
Outputs :
h of size (bsz, nb_nodes+1, dim_emb) batch of encoded cities
score of size (bsz, nb_nodes+1, nb_nodes+1) batch of attention scores
"""
def __init__(self, nb_layers, dim_emb, nb_heads, dim_ff, batchnorm):
super(Transformer_encoder_net, self).__init__()
assert dim_emb == nb_heads* (dim_emb//nb_heads) # check if dim_emb is divisible by nb_heads
self.MHA_layers = nn.ModuleList( [nn.MultiheadAttention(dim_emb, nb_heads) for _ in range(nb_layers)] )
self.linear1_layers = nn.ModuleList( [nn.Linear(dim_emb, dim_ff) for _ in range(nb_layers)] )
self.linear2_layers = nn.ModuleList( [nn.Linear(dim_ff, dim_emb) for _ in range(nb_layers)] )
if batchnorm:
self.norm1_layers = nn.ModuleList( [nn.BatchNorm1d(dim_emb) for _ in range(nb_layers)] )
self.norm2_layers = nn.ModuleList( [nn.BatchNorm1d(dim_emb) for _ in range(nb_layers)] )
else:
self.norm1_layers = nn.ModuleList( [nn.LayerNorm(dim_emb) for _ in range(nb_layers)] )
self.norm2_layers = nn.ModuleList( [nn.LayerNorm(dim_emb) for _ in range(nb_layers)] )
self.nb_layers = nb_layers
self.nb_heads = nb_heads
self.batchnorm = batchnorm
def forward(self, h):
# PyTorch nn.MultiheadAttention requires input size (seq_len, bsz, dim_emb)
h = h.transpose(0,1) # size(h)=(nb_nodes, bsz, dim_emb)
# L layers
for i in range(self.nb_layers):
h_rc = h # residual connection, size(h_rc)=(nb_nodes, bsz, dim_emb)
h, score = self.MHA_layers[i](h, h, h) # size(h)=(nb_nodes, bsz, dim_emb), size(score)=(bsz, nb_nodes, nb_nodes)
# add residual connection
h = h_rc + h # size(h)=(nb_nodes, bsz, dim_emb)
if self.batchnorm:
# Pytorch nn.BatchNorm1d requires input size (bsz, dim, seq_len)
h = h.permute(1,2,0).contiguous() # size(h)=(bsz, dim_emb, nb_nodes)
h = self.norm1_layers[i](h) # size(h)=(bsz, dim_emb, nb_nodes)
h = h.permute(2,0,1).contiguous() # size(h)=(nb_nodes, bsz, dim_emb)
else:
h = self.norm1_layers[i](h) # size(h)=(nb_nodes, bsz, dim_emb)
# feedforward
h_rc = h # residual connection
h = self.linear2_layers[i](torch.relu(self.linear1_layers[i](h)))
h = h_rc + h # size(h)=(nb_nodes, bsz, dim_emb)
if self.batchnorm:
h = h.permute(1,2,0).contiguous() # size(h)=(bsz, dim_emb, nb_nodes)
h = self.norm2_layers[i](h) # size(h)=(bsz, dim_emb, nb_nodes)
h = h.permute(2,0,1).contiguous() # size(h)=(nb_nodes, bsz, dim_emb)
else:
h = self.norm2_layers[i](h) # size(h)=(nb_nodes, bsz, dim_emb)
# Transpose h
h = h.transpose(0,1) # size(h)=(bsz, nb_nodes, dim_emb)
return h, score
def myMHA(Q, K, V, nb_heads, mask=None, clip_value=None):
"""
Compute multi-head attention (MHA) given a query Q, key K, value V and attention mask :
h = Concat_{k=1}^nb_heads softmax(Q_k^T.K_k).V_k
Note : We did not use nn.MultiheadAttention to avoid re-computing all linear transformations at each call.
Inputs : Q of size (bsz, dim_emb, 1) batch of queries
K of size (bsz, dim_emb, nb_nodes+1) batch of keys
V of size (bsz, dim_emb, nb_nodes+1) batch of values
mask of size (bsz, nb_nodes+1) batch of masks of visited cities
clip_value is a scalar
Outputs : attn_output of size (bsz, 1, dim_emb) batch of attention vectors
attn_weights of size (bsz, 1, nb_nodes+1) batch of attention weights
"""
bsz, nb_nodes, emd_dim = K.size() # dim_emb must be divisable by nb_heads
if nb_heads>1:
# PyTorch view requires contiguous dimensions for correct reshaping
Q = Q.transpose(1,2).contiguous() # size(Q)=(bsz, dim_emb, 1)
Q = Q.view(bsz*nb_heads, emd_dim//nb_heads, 1) # size(Q)=(bsz*nb_heads, dim_emb//nb_heads, 1)
Q = Q.transpose(1,2).contiguous() # size(Q)=(bsz*nb_heads, 1, dim_emb//nb_heads)
K = K.transpose(1,2).contiguous() # size(K)=(bsz, dim_emb, nb_nodes+1)
K = K.view(bsz*nb_heads, emd_dim//nb_heads, nb_nodes) # size(K)=(bsz*nb_heads, dim_emb//nb_heads, nb_nodes+1)
K = K.transpose(1,2).contiguous() # size(K)=(bsz*nb_heads, nb_nodes+1, dim_emb//nb_heads)
V = V.transpose(1,2).contiguous() # size(V)=(bsz, dim_emb, nb_nodes+1)
V = V.view(bsz*nb_heads, emd_dim//nb_heads, nb_nodes) # size(V)=(bsz*nb_heads, dim_emb//nb_heads, nb_nodes+1)
V = V.transpose(1,2).contiguous() # size(V)=(bsz*nb_heads, nb_nodes+1, dim_emb//nb_heads)
attn_weights = torch.bmm(Q, K.transpose(1,2))/ Q.size(-1)**0.5 # size(attn_weights)=(bsz*nb_heads, 1, nb_nodes+1)
if clip_value is not None:
attn_weights = clip_value * torch.tanh(attn_weights)
if mask is not None:
if nb_heads>1:
mask = torch.repeat_interleave(mask, repeats=nb_heads, dim=0) # size(mask)=(bsz*nb_heads, nb_nodes+1)
#attn_weights = attn_weights.masked_fill(mask.unsqueeze(1), float('-inf')) # size(attn_weights)=(bsz*nb_heads, 1, nb_nodes+1)
attn_weights = attn_weights.masked_fill(mask.unsqueeze(1), float('-1e9')) # size(attn_weights)=(bsz*nb_heads, 1, nb_nodes+1)
attn_weights = torch.softmax(attn_weights, dim=-1) # size(attn_weights)=(bsz*nb_heads, 1, nb_nodes+1)
attn_output = torch.bmm(attn_weights, V) # size(attn_output)=(bsz*nb_heads, 1, dim_emb//nb_heads)
if nb_heads>1:
attn_output = attn_output.transpose(1,2).contiguous() # size(attn_output)=(bsz*nb_heads, dim_emb//nb_heads, 1)
attn_output = attn_output.view(bsz, emd_dim, 1) # size(attn_output)=(bsz, dim_emb, 1)
attn_output = attn_output.transpose(1,2).contiguous() # size(attn_output)=(bsz, 1, dim_emb)
attn_weights = attn_weights.view(bsz, nb_heads, 1, nb_nodes) # size(attn_weights)=(bsz, nb_heads, 1, nb_nodes+1)
attn_weights = attn_weights.mean(dim=1) # mean over the heads, size(attn_weights)=(bsz, 1, nb_nodes+1)
return attn_output, attn_weights
class AutoRegressiveDecoderLayer(nn.Module):
"""
Single decoder layer based on self-attention and query-attention
Inputs :
h_t of size (bsz, 1, dim_emb) batch of input queries
K_att of size (bsz, nb_nodes+1, dim_emb) batch of query-attention keys
V_att of size (bsz, nb_nodes+1, dim_emb) batch of query-attention values
mask of size (bsz, nb_nodes+1) batch of masks of visited cities
Output :
h_t of size (bsz, nb_nodes+1) batch of transformed queries
"""
def __init__(self, dim_emb, nb_heads):
super(AutoRegressiveDecoderLayer, self).__init__()
self.dim_emb = dim_emb
self.nb_heads = nb_heads
self.Wq_selfatt = nn.Linear(dim_emb, dim_emb)
self.Wk_selfatt = nn.Linear(dim_emb, dim_emb)
self.Wv_selfatt = nn.Linear(dim_emb, dim_emb)
self.W0_selfatt = nn.Linear(dim_emb, dim_emb)
self.W0_att = nn.Linear(dim_emb, dim_emb)
self.Wq_att = nn.Linear(dim_emb, dim_emb)
self.W1_MLP = nn.Linear(dim_emb, dim_emb)
self.W2_MLP = nn.Linear(dim_emb, dim_emb)
self.BN_selfatt = nn.LayerNorm(dim_emb)
self.BN_att = nn.LayerNorm(dim_emb)
self.BN_MLP = nn.LayerNorm(dim_emb)
self.K_sa = None
self.V_sa = None
def reset_selfatt_keys_values(self):
self.K_sa = None
self.V_sa = None
# For beam search
def reorder_selfatt_keys_values(self, t, idx_top_beams):
bsz, B = idx_top_beams.size()
zero_to_B = torch.arange(B, device=idx_top_beams.device) # [0,1,...,B-1]
B2 = self.K_sa.size(0)// bsz
self.K_sa = self.K_sa.view(bsz, B2, t+1, self.dim_emb) # size(self.K_sa)=(bsz, B2, t+1, dim_emb)
K_sa_tmp = self.K_sa.clone()
self.K_sa = torch.zeros(bsz, B, t+1, self.dim_emb, device=idx_top_beams.device)
for b in range(bsz):
self.K_sa[b, zero_to_B, :, :] = K_sa_tmp[b, idx_top_beams[b], :, :]
self.K_sa = self.K_sa.view(bsz*B, t+1, self.dim_emb) # size(self.K_sa)=(bsz*B, t+1, dim_emb)
self.V_sa = self.V_sa.view(bsz, B2, t+1, self.dim_emb) # size(self.K_sa)=(bsz, B, t+1, dim_emb)
V_sa_tmp = self.V_sa.clone()
self.V_sa = torch.zeros(bsz, B, t+1, self.dim_emb, device=idx_top_beams.device)
for b in range(bsz):
self.V_sa[b, zero_to_B, :, :] = V_sa_tmp[b, idx_top_beams[b], :, :]
self.V_sa = self.V_sa.view(bsz*B, t+1, self.dim_emb) # size(self.K_sa)=(bsz*B, t+1, dim_emb)
# For beam search
def repeat_selfatt_keys_values(self, B):
self.K_sa = torch.repeat_interleave(self.K_sa, B, dim=0) # size(self.K_sa)=(bsz.B, t+1, dim_emb)
self.V_sa = torch.repeat_interleave(self.V_sa, B, dim=0) # size(self.K_sa)=(bsz.B, t+1, dim_emb)
def forward(self, h_t, K_att, V_att, mask):
bsz = h_t.size(0)
h_t = h_t.view(bsz,1,self.dim_emb) # size(h_t)=(bsz, 1, dim_emb)
# embed the query for self-attention
q_sa = self.Wq_selfatt(h_t) # size(q_sa)=(bsz, 1, dim_emb)
k_sa = self.Wk_selfatt(h_t) # size(k_sa)=(bsz, 1, dim_emb)
v_sa = self.Wv_selfatt(h_t) # size(v_sa)=(bsz, 1, dim_emb)
# concatenate the new self-attention key and value to the previous keys and values
if self.K_sa is None:
self.K_sa = k_sa # size(self.K_sa)=(bsz, 1, dim_emb)
self.V_sa = v_sa # size(self.V_sa)=(bsz, 1, dim_emb)
else:
self.K_sa = torch.cat([self.K_sa, k_sa], dim=1)
self.V_sa = torch.cat([self.V_sa, v_sa], dim=1)
# compute self-attention between nodes in the partial tour
h_t = h_t + self.W0_selfatt( myMHA(q_sa, self.K_sa, self.V_sa, self.nb_heads)[0] ) # size(h_t)=(bsz, 1, dim_emb)
h_t = self.BN_selfatt(h_t.squeeze()) # size(h_t)=(bsz, dim_emb)
h_t = h_t.view(bsz, 1, self.dim_emb) # size(h_t)=(bsz, 1, dim_emb)
# compute attention between self-attention nodes and encoding nodes in the partial tour (translation process)
q_a = self.Wq_att(h_t) # size(q_a)=(bsz, 1, dim_emb)
h_t = h_t + self.W0_att( myMHA(q_a, K_att, V_att, self.nb_heads, mask)[0] ) # size(h_t)=(bsz, 1, dim_emb)
h_t = self.BN_att(h_t.squeeze()) # size(h_t)=(bsz, dim_emb)
h_t = h_t.view(bsz, 1, self.dim_emb) # size(h_t)=(bsz, 1, dim_emb)
# MLP
h_t = h_t + self.W2_MLP(torch.relu(self.W1_MLP(h_t)))
h_t = self.BN_MLP(h_t.squeeze(1)) # size(h_t)=(bsz, dim_emb)
return h_t
class Transformer_decoder_net(nn.Module):
"""
Decoder network based on self-attention and query-attention transformers
Inputs :
h_t of size (bsz, 1, dim_emb) batch of input queries
K_att of size (bsz, nb_nodes+1, dim_emb*nb_layers_decoder) batch of query-attention keys for all decoding layers
V_att of size (bsz, nb_nodes+1, dim_emb*nb_layers_decoder) batch of query-attention values for all decoding layers
mask of size (bsz, nb_nodes+1) batch of masks of visited cities
Output :
prob_next_node of size (bsz, nb_nodes+1) batch of probabilities of next node
"""
def __init__(self, dim_emb, nb_heads, nb_layers_decoder):
super(Transformer_decoder_net, self).__init__()
self.dim_emb = dim_emb
self.nb_heads = nb_heads
self.nb_layers_decoder = nb_layers_decoder
self.decoder_layers = nn.ModuleList( [AutoRegressiveDecoderLayer(dim_emb, nb_heads) for _ in range(nb_layers_decoder-1)] )
self.Wq_final = nn.Linear(dim_emb, dim_emb)
# Reset to None self-attention keys and values when decoding starts
def reset_selfatt_keys_values(self):
for l in range(self.nb_layers_decoder-1):
self.decoder_layers[l].reset_selfatt_keys_values()
# For beam search
def reorder_selfatt_keys_values(self, t, idx_top_beams):
for l in range(self.nb_layers_decoder-1):
self.decoder_layers[l].reorder_selfatt_keys_values(t, idx_top_beams)
# For beam search
def repeat_selfatt_keys_values(self, B):
for l in range(self.nb_layers_decoder-1):
self.decoder_layers[l].repeat_selfatt_keys_values(B)
def forward(self, h_t, K_att, V_att, mask):
for l in range(self.nb_layers_decoder):
K_att_l = K_att[:,:,l*self.dim_emb:(l+1)*self.dim_emb].contiguous() # size(K_att_l)=(bsz, nb_nodes+1, dim_emb)
V_att_l = V_att[:,:,l*self.dim_emb:(l+1)*self.dim_emb].contiguous() # size(V_att_l)=(bsz, nb_nodes+1, dim_emb)
if l<self.nb_layers_decoder-1: # decoder layers with multiple heads (intermediate layers)
h_t = self.decoder_layers[l](h_t, K_att_l, V_att_l, mask)
else: # decoder layers with single head (final layer)
q_final = self.Wq_final(h_t)
bsz = h_t.size(0)
q_final = q_final.view(bsz, 1, self.dim_emb)
attn_weights = myMHA(q_final, K_att_l, V_att_l, 1, mask, 10)[1]
prob_next_node = attn_weights.squeeze(1)
return prob_next_node
def generate_positional_encoding(d_model, max_len):
"""
Create standard transformer PEs.
Inputs :
d_model is a scalar correspoding to the hidden dimension
max_len is the maximum length of the sequence
Output :
pe of size (max_len, d_model), where d_model=dim_emb, max_len=1000
"""
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-torch.log(torch.tensor(10000.0)) / d_model))
pe[:,0::2] = torch.sin(position * div_term)
pe[:,1::2] = torch.cos(position * div_term)
return pe
class TSP_net(nn.Module):
"""
The TSP network is composed of two steps :
Step 1. Encoder step : Take a set of 2D points representing a fully connected graph
and encode the set with self-transformer.
Step 2. Decoder step : Build the TSP tour recursively/autoregressively,
i.e. one node at a time, with a self-transformer and query-transformer.
Inputs :
x of size (bsz, nb_nodes, dim_emb) Euclidian coordinates of the nodes/cities
deterministic is a boolean : If True the salesman will chose the city with highest probability.
If False the salesman will chose the city with Bernouilli sampling.
Outputs :
tours of size (bsz, nb_nodes) : batch of tours, i.e. sequences of ordered cities
tours[b,t] contains the idx of the city visited at step t in batch b
sumLogProbOfActions of size (bsz,) : batch of sum_t log prob( pi_t | pi_(t-1),...,pi_0 )
"""
def __init__(self, dim_input_nodes, dim_emb, dim_ff, nb_layers_encoder, nb_layers_decoder, nb_heads, max_len_PE,
batchnorm=True):
super(TSP_net, self).__init__()
self.dim_emb = dim_emb
# input embedding layer
self.input_emb = nn.Linear(dim_input_nodes, dim_emb)
# encoder layer
self.encoder = Transformer_encoder_net(nb_layers_encoder, dim_emb, nb_heads, dim_ff, batchnorm)
# vector to start decoding
self.start_placeholder = nn.Parameter(torch.randn(dim_emb))
# decoder layer
self.decoder = Transformer_decoder_net(dim_emb, nb_heads, nb_layers_decoder)
self.WK_att_decoder = nn.Linear(dim_emb, nb_layers_decoder* dim_emb)
self.WV_att_decoder = nn.Linear(dim_emb, nb_layers_decoder* dim_emb)
self.PE = generate_positional_encoding(dim_emb, max_len_PE)
def forward(self, x, B, greedy, beamsearch):
# some parameters
bsz = x.shape[0]
nb_nodes = x.shape[1]
zero_to_bsz = torch.arange(bsz, device=x.device) # [0,1,...,bsz-1]
# For beam search
zero_to_B = torch.arange(B, device=x.device) # [0,1,...,B-1]
# input embedding layer
h = self.input_emb(x) # size(h)=(bsz, nb_nodes, dim_emb)
# concat the nodes and the input placeholder that starts the decoding
h = torch.cat([h, self.start_placeholder.repeat(bsz, 1, 1)], dim=1) # size(start_placeholder)=(bsz, nb_nodes+1, dim_emb)
# encoder layer
h_encoder, _ = self.encoder(h) # size(h)=(bsz, nb_nodes+1, dim_emb)
# key and value for decoder
K_att_decoder = self.WK_att_decoder(h_encoder) # size(K_att)=(bsz, nb_nodes+1, dim_emb*nb_layers_decoder)
V_att_decoder = self.WV_att_decoder(h_encoder) # size(V_att)=(bsz, nb_nodes+1, dim_emb*nb_layers_decoder)
# starting node in tour
self.PE = self.PE.to(x.device)
# For beam search
tours_greedy = torch.zeros(2, nb_nodes, device=x.device)
tours_beamsearch = torch.zeros(2, nb_nodes, device=x.device)
scores_greedy = torch.zeros(2, device=x.device)
scores_beamsearch = torch.zeros(2, device=x.device)
# Greedy search
if greedy:
#print('Greedy decoding')
deterministic = True
# list that will contain Long tensors of shape (bsz,) that gives the idx of the cities chosen at time t
tours = []
# list that will contain Float tensors of shape (bsz,) that gives the neg log probs of the choices made at time t
sumLogProbOfActions = []
# input placeholder that starts the decoding
idx_start_placeholder = torch.Tensor([nb_nodes]).long().repeat(bsz).to(x.device)
h_start = h_encoder[zero_to_bsz, idx_start_placeholder, :] + self.PE[0].repeat(bsz,1) # size(h_start)=(bsz, dim_emb)
# initialize mask of visited cities
mask_visited_nodes = torch.zeros(bsz, nb_nodes+1, device=x.device).bool() # False
mask_visited_nodes[zero_to_bsz, idx_start_placeholder] = True
# clear key and val stored in the decoder
self.decoder.reset_selfatt_keys_values()
# construct tour recursively
h_t = h_start
for t in range(nb_nodes):
# compute probability over the next node in the tour
prob_next_node = self.decoder(h_t, K_att_decoder, V_att_decoder, mask_visited_nodes) # size(prob_next_node)=(bsz, nb_nodes+1)
# choose node with highest probability or sample with Bernouilli
if deterministic:
idx = torch.argmax(prob_next_node, dim=1) # size(query)=(bsz,)
else:
idx = Categorical(prob_next_node).sample() # size(query)=(bsz,)
# compute logprobs of the action items in the list sumLogProbOfActions
ProbOfChoices = prob_next_node[zero_to_bsz, idx]
sumLogProbOfActions.append( torch.log(ProbOfChoices) ) # size(query)=(bsz,)
# update embedding of the current visited node
h_t = h_encoder[zero_to_bsz, idx, :] # size(h_start)=(bsz, dim_emb)
h_t = h_t + self.PE[t+1].expand(bsz, self.dim_emb)
# update tour
tours.append(idx)
# update masks with visited nodes
mask_visited_nodes = mask_visited_nodes.clone()
mask_visited_nodes[zero_to_bsz, idx] = True
# logprob_of_choices = sum_t log prob( pi_t | pi_(t-1),...,pi_0 )
sumLogProbOfActions = torch.stack(sumLogProbOfActions,dim=1).sum(dim=1) # size(sumLogProbOfActions)=(bsz,)
# convert the list of nodes into a tensor of shape (bsz,num_cities)
tours = torch.stack(tours,dim=1) # size(col_index)=(bsz, nb_nodes)
tours_greedy = tours
scores_greedy = sumLogProbOfActions
# Beamsearch
if beamsearch:
#print('Beam search decoding')
# clear key and val stored in the decoder
self.decoder.reset_selfatt_keys_values()
K_att_decoder_tmp = K_att_decoder # size(K_att_decoder_tmp)=(bsz, nb_nodes, dim_emb*nb_layers_decoder)
V_att_decoder_tmp = V_att_decoder # size(V_att_decoder_tmp)=(bsz, nb_nodes, dim_emb*nb_layers_decoder)
for t in range(nb_nodes):
#if not t%10:
# print('t: {}, GPU reserved mem: {:.2f}, GPU allocated mem: {:.2f}'.format(t,torch.cuda.memory_reserved(0)/1e9,torch.cuda.memory_allocated(0)/1e9))
if t==0: # at t=0, there are at most B_{t=0}=nb_nodes beams
B_t0 = min(B, nb_nodes)
# input placeholder that starts the decoding
idx_start_placeholder = torch.Tensor([nb_nodes]).long().repeat(bsz).to(x.device)
h_start = h_encoder[zero_to_bsz, idx_start_placeholder, :] + self.PE[0].repeat(bsz,1) # size(h_start)=(bsz, dim_emb)
h_t = h_start # size(h_start)=(bsz, dim_emb)
mask_visited_nodes = torch.zeros(bsz, nb_nodes+1, device=x.device).bool() # False, size(mask_visited_nodes)=(bsz, nb_nodes+1) # initialize mask of visited cities
mask_visited_nodes[zero_to_bsz, idx_start_placeholder] = True
# compute probability over the next node in the tour
prob_next_node = self.decoder(h_t, K_att_decoder, V_att_decoder, mask_visited_nodes) # size(prob_next_node)=(bsz, nb_nodes+1)
# compute score_t + sum_t score_{t-1} for all beams
score_t = torch.log(prob_next_node) # size(score_t)=(bsz, nb_nodes+1) for t=0
sum_scores = score_t # size(score_t)=(bsz, nb_nodes+1)
# choose nodes with top-B sumScores
top_val, top_idx = torch.topk(sum_scores, B_t0, dim=1) # size(sumScores)=(bsz, B_t0)
# update sum_t score_{t} for all beams
sum_scores = top_val # size(sumScores)=(bsz, B_t0)
zero_to_B_t0 = torch.arange(B_t0, device=x.device) # [0,1,...,B_t0-1]
mask_visited_nodes = mask_visited_nodes.unsqueeze(1) # size(mask_visited_nodes)=(bsz, 1, nb_nodes+1)
mask_visited_nodes = torch.repeat_interleave(mask_visited_nodes, B_t0, dim=1)
for b in range(bsz):
mask_visited_nodes[b, zero_to_B_t0, top_idx[b]] = True # size(mask_visited_nodes)=(bsz, B_t0, nb_nodes+1)
tours = torch.zeros(bsz, B_t0, nb_nodes, device=x.device).long() # size(tours)=(bsz, B_t0, nb_nodes)
tours[:,:,t] = top_idx # size(tours)=(bsz, B_t0, nb_nodes)
# update embedding of the current visited node
h_t = torch.zeros(bsz, B_t0, self.dim_emb, device=x.device) # size(tours)=(bsz, B_t0, dim_emb)
for b in range(bsz):
h_t[b, :, :] = h_encoder[b, top_idx[b], :] # size(h_t)=(bsz, B_t0, dim_emb)
h_t = h_t + self.PE[t+1].expand(bsz, B_t0, self.dim_emb) # size(h_t)=(bsz, B_t0, dim_emb)
self.decoder.repeat_selfatt_keys_values(B_t0)
K_att_decoder = torch.repeat_interleave(K_att_decoder_tmp, B_t0, dim=0) # size(K_att_decoder)=(bsz*B_t0, nb_nodes+1, dim_emb*nb_layers_decoder)
V_att_decoder = torch.repeat_interleave(V_att_decoder_tmp, B_t0, dim=0) # size(V_att_decoder)=(bsz*B_t0, nb_nodes+1, dim_emb*nb_layers_decoder)
elif t==1: # at t=1, there are at most B_{t=1}=nb_nodes^2 beams
# compute probability over the next node in the tour
h_t = h_t.view(bsz*B_t0, self.dim_emb)
mask_visited_nodes = mask_visited_nodes.view(bsz*B_t0, nb_nodes+1)
prob_next_node = self.decoder(h_t, K_att_decoder, V_att_decoder, mask_visited_nodes) # size(prob_next_node)=(bsz.B_t0, nb_nodes+1)
prob_next_node = prob_next_node.view(bsz, B_t0, nb_nodes+1) # size(prob_next_node)=(bsz, B_t0, nb_nodes+1)
mask_visited_nodes = mask_visited_nodes.view(bsz, B_t0, nb_nodes+1)
h_t = h_t.view(bsz, B_t0, self.dim_emb)
# compute score_t + sum_t score_{t-1} for all beams
score_t = torch.log(prob_next_node) # size(score_t)=(bsz, B, nb_nodes+1)
sum_scores = score_t + sum_scores.unsqueeze(2) # size(score_t)=(bsz, B, nb_nodes+1)
sum_scores_flatten = sum_scores.view(bsz, -1) # size(sumScores_next_node)=(bsz, B.(nb_nodes+1))
# choose nodes with top-B sumScores
top_val, top_idx = torch.topk(sum_scores_flatten, B, dim=1)
idx_top_beams = top_idx // (nb_nodes+1) # size(idx_beam_topB)=(bsz, B)
idx_in_beams = top_idx - idx_top_beams* (nb_nodes+1) # size(idx_in_beams)=(bsz, B)
# update sum_t score_{t} for all beams
sum_scores = top_val
# update beam masks with visited nodes
mask_visited_nodes_tmp = mask_visited_nodes.clone() # size(mask_visited_nodes_tmp)=(bsz, B_t0, nb_nodes+1)
mask_visited_nodes = torch.zeros(bsz, B, nb_nodes+1, device=x.device).bool() # size(mask_visited_nodes)=(bsz, B, nb_nodes+1)
for b in range(bsz):
mask_visited_nodes[b, zero_to_B, :] = mask_visited_nodes_tmp[b, idx_top_beams[b], :] # size(mask_visited_nodes)=(bsz, B, nb_nodes+1)
for b in range(bsz):
mask_visited_nodes[b, zero_to_B, idx_in_beams[b]] = True # size(mask_visited_nodes)=(bsz, B, nb_nodes+1)
# update beam tours with visited nodes
tours_tmp = tours.clone()
tours = torch.zeros(bsz, B, nb_nodes, device=x.device).long() # size(tours)=(bsz, B, nb_nodes)
for b in range(bsz):
tours[b, zero_to_B, :] = tours_tmp[b, idx_top_beams[b], :]
tours[:,:,t] = idx_in_beams # size(tours)=(bsz, B, nb_nodes)
# update embedding of the current visited node
h_t = torch.zeros(bsz, B, self.dim_emb, device=x.device) # size(tours)=(bsz, B_t0, dim_emb)
for b in range(bsz):
h_t[b, :, :] = h_encoder[b, idx_in_beams[b], :] # size(h_t)=(bsz, B, dim_emb)
h_t = h_t + self.PE[t+1].expand(bsz, B, self.dim_emb) # size(h_t)=(bsz, B, dim_emb)
# update self-attention embeddings of partial tours
self.decoder.reorder_selfatt_keys_values(t, idx_top_beams) # size(K_att_decoder)=(bsz*B_t0, nb_nodes+1, dim_emb*nb_layers_decoder)
K_att_decoder = torch.repeat_interleave(K_att_decoder_tmp, B, dim=0) # size(K_att_decoder)=(bsz*B, nb_nodes+1, dim_emb*nb_layers_decoder)
V_att_decoder = torch.repeat_interleave(V_att_decoder_tmp, B, dim=0) # size(V_att_decoder)=(bsz*B, nb_nodes+1, dim_emb*nb_layers_decoder)
else: # at t>=2, we arbitrary decide to have at most B_{t>=2}=nb_nodes^2 beams
# compute probability over the next node in the tour
h_t = h_t.view(bsz*B, self.dim_emb)
mask_visited_nodes = mask_visited_nodes.view(bsz*B, nb_nodes+1)
prob_next_node = self.decoder(h_t, K_att_decoder, V_att_decoder, mask_visited_nodes) # size(prob_next_node)=(bsz.B, nb_nodes+1)
prob_next_node = prob_next_node.view(bsz, B, nb_nodes+1) # size(prob_next_node)=(bsz, B, nb_nodes+1)
mask_visited_nodes = mask_visited_nodes.view(bsz, B, nb_nodes+1)
h_t = h_t.view(bsz, B, self.dim_emb)
# compute score_t + sum_t score_{t-1} for all beams
score_t = torch.log(prob_next_node) # size(score_t)=(bsz, B, nb_nodes+1)
sum_scores = score_t + sum_scores.unsqueeze(2) # size(score_t)=(bsz, B, nb_nodes+1)
sum_scores_flatten = sum_scores.view(bsz, -1) # size(sumScores_next_node)=(bsz, B.(nb_nodes+1))
# choose nodes with top-B sumScores
top_val, top_idx = torch.topk(sum_scores_flatten, B, dim=1)
idx_top_beams = top_idx // (nb_nodes+1) # size(idx_beam_topB)=(bsz, B)
idx_in_beams = top_idx - idx_top_beams* (nb_nodes+1) # size(idx_in_beams)=(bsz, B)
# update sum_t score_{t} for all beams
sum_scores = top_val
# update beam masks with visited nodes
mask_visited_nodes_tmp = mask_visited_nodes.clone()
for b in range(bsz):
mask_visited_nodes[b, zero_to_B, :] = mask_visited_nodes_tmp[b, idx_top_beams[b], :]
for b in range(bsz):
mask_visited_nodes[b, zero_to_B, idx_in_beams[b]] = True
# update beam tours with visited nodes
tours_tmp = tours.clone()
for b in range(bsz):
tours[b, zero_to_B, :] = tours_tmp[b, idx_top_beams[b], :]
tours[:,:,t] = idx_in_beams # size(tours)=(bsz, B, nb_nodes)
# update embedding of the current visited node
for b in range(bsz):
h_t[b, :, :] = h_encoder[b, idx_in_beams[b], :] # size(h_t)=(bsz, B, dim_emb)
h_t = h_t + self.PE[t+1].expand(bsz, B, self.dim_emb) # size(h_t)=(bsz, B, dim_emb)
# update self-attention embeddings of partial tours
self.decoder.reorder_selfatt_keys_values(t, idx_top_beams)
# sum_t log prob( pi_t | pi_0,...pi_(t-1) )
sum_scores = sum_scores[:,0] # size(sumScores)=(bsz)
tours_beamsearch = tours
scores_beamsearch = sum_scores
return tours_greedy, tours_beamsearch, scores_greedy, scores_beamsearch
###################
# Instantiate a training network and a baseline network
###################
try:
del model_baseline # remove existing model
except:
pass
model_baseline = TSP_net(args.dim_input_nodes, args.dim_emb, args.dim_ff,
args.nb_layers_encoder, args.nb_layers_decoder, args.nb_heads, args.max_len_PE,
batchnorm=args.batchnorm)
# # uncomment these lines if trained with multiple GPUs
# print(torch.cuda.device_count())
# if torch.cuda.device_count()>1:
# model_baseline = nn.DataParallel(model_baseline)
# # uncomment these lines if trained with multiple GPUs
model_baseline = model_baseline.to(device)
model_baseline.eval()
print(args); print('')
###################
# Load checkpoint
###################
checkpoint_file = "checkpoint/checkpoint_21-03-01--17-25-00-n50-gpu0.pkl"
checkpoint = torch.load(checkpoint_file, map_location=device)
epoch_ckpt = checkpoint['epoch'] + 1
tot_time_ckpt = checkpoint['tot_time']
plot_performance_train = checkpoint['plot_performance_train']
plot_performance_baseline = checkpoint['plot_performance_baseline']
model_baseline.load_state_dict(checkpoint['model_baseline'])
print('Load checkpoint file={:s}\n Checkpoint at epoch= {:d} and time={:.3f}min\n'.format(checkpoint_file,epoch_ckpt-1,tot_time_ckpt/60))
del checkpoint
mystring_min = 'Epoch: {:d}, tot_time_ckpt: {:.3f}day, L_train: {:.3f}, L_base: {:.3f}\n'.format(
epoch_ckpt, tot_time_ckpt/3660/24, plot_performance_train[-1][1], plot_performance_baseline[-1][1])
print(mystring_min)
# +
def plot_tsp(x_coord, x_path, plot_concorde=False, plot_dist_pair=False):
"""
Helper function to plot TSP tours.
"""
# pytorch detach
x_coord = x_coord.detach().cpu()
x_path = x_path.detach().cpu()
# compute TSP lengths
length_tsp = compute_tour_length(x_coord, x_path)
# preparation
x_coord = np.array(x_coord)
x_path = np.array(x_path)
nb_nodes = x_coord.shape[1]
G = nx.from_numpy_matrix(np.zeros((nb_nodes,nb_nodes)))
colors = ['g'] + ['b'] * (nb_nodes - 1) # Green for 0th node, blue for others
batch_size = x_coord.shape[0]
max_nb_plots = 3**2 # max number of TSP plots, x^2 for x rows and x cols
nb_plots = batch_size if batch_size<max_nb_plots else max_nb_plots
nb_rows = nb_cols = int(nb_plots**0.5)
if plot_concorde: nb_cols *= 2 # double nb of cols if concorde is plotted
f = plt.figure(figsize=(30, 15)) if plot_concorde else plt.figure(figsize=(15, 15)) # figure size
# gap
running_time = 0
gap = 0
L_concorde = []
# loop over TSPs
for i in range(nb_plots):
x_coord_i = x_coord[i]
pos_i = dict(zip(range(len(x_coord_i)), x_coord_i.tolist()))
if plot_dist_pair: # Compute pairwise distances matrix for better visualization
dist_pair_i = squareform(pdist(x_coord_i, metric='euclidean'))
G = nx.from_numpy_matrix(dist_pair_i)
x_path_i = x_path[i]
length_tsp_i = length_tsp[i]
nodes_pair_tsp_i = []
for r in range(nb_nodes-1): # compute consecutive nodes in the solution
nodes_pair_tsp_i.append((x_path_i[r], x_path_i[r+1]))
nodes_pair_tsp_i.append((x_path_i[nb_nodes-1], x_path_i[0]))
if plot_concorde: # run concorde solver
start = time.time()
graph = pd.DataFrame({'lat' : x_coord_i[:,0]}); graph['lon'] = x_coord_i[:,1]
solver = TSPSolver.from_data( graph.lat, graph.lon, norm="GEO" )
solution = solver.solve().tour
running_time += time.time()-start
nodes_pair_concorde_i = []
for r in range(nb_nodes-1):
nodes_pair_concorde_i.append((solution[r], solution[r+1]))
nodes_pair_concorde_i.append((solution[nb_nodes-1], solution[0]))
length_concorde = compute_tour_length(torch.tensor(x_coord_i).unsqueeze(0),torch.tensor(solution).long().unsqueeze(0))
gap += length_tsp_i/length_concorde - 1.0
L_concorde.append(length_concorde)
if plot_concorde:
subf = f.add_subplot(nb_rows,nb_cols,2*i+1)
nx.draw_networkx_nodes(G, pos_i, node_color=colors, node_size=20)
nx.draw_networkx_edges(G, pos_i, edgelist=nodes_pair_tsp_i, alpha=1, width=1, edge_color='r')
if plot_dist_pair:
nx.draw_networkx_edges(G, pos_i, alpha=0.3, width=0.5)
subf.set_title('Length w/ NNetwork : ' + str(length_tsp_i.item())[:5])
subf = f.add_subplot(nb_rows,nb_cols,2*i+2)
nx.draw_networkx_nodes(G, pos_i, node_color=colors, node_size=20)
nx.draw_networkx_edges(G, pos_i, edgelist=nodes_pair_concorde_i, alpha=1, width=1, edge_color='b') #, style='dashed'
if plot_dist_pair:
nx.draw_networkx_edges(G, pos_i, alpha=0.3, width=0.5)
subf.set_title('Length w/ Concorde : ' + str(length_concorde.item())[:5])
else:
subf = f.add_subplot(nb_rows,nb_cols,i+1)
nx.draw_networkx_nodes(G, pos_i, node_color=colors, node_size=20)
nx.draw_networkx_edges(G, pos_i, edgelist=nodes_pair_tsp_i, alpha=1, width=1, edge_color='r')
if plot_dist_pair:
nx.draw_networkx_edges(G, pos_i, alpha=0.3, width=0.5)
subf.set_title('Length w/ NNetwork : ' + str(length_tsp_i.item())[:5])
L_concorde = torch.stack(L_concorde).squeeze()
print('L_concorde',L_concorde)
# gap
if plot_concorde:
print('Concorde time: {:.3f}sec'.format(running_time))
print('gap:',(gap/nb_plots).item())
bsz = 4
nb_nodes = 50; B = 2500; greedy = False; beamsearch = True
# nb_nodes = 100; B = 100
# nb_nodes = 100; B = 1000
# nb_nodes = 100; B = 3000
# nb_nodes = 200; B = 100
# nb_nodes = 200; B = 1000
x = torch.rand(bsz, nb_nodes, 2)
x = x.to(device)
with torch.no_grad():
tours_greedy, tours_beamsearch, scores_greedy, scores_beamsearch = model_baseline(x, B, True, True)
# greedy
L_greedy = compute_tour_length(x, tours_greedy)
mean_tour_length_greedy = L_greedy.mean().item()
mean_scores_greedy = scores_greedy.mean().item()
# beamsearch
tours_beamsearch = tours_beamsearch.view(bsz*B, nb_nodes)
x_beamsearch = x.repeat_interleave(B,dim=0)
L_beamsearch = compute_tour_length(x_beamsearch, tours_beamsearch)
L_beamsearch = L_beamsearch.view(bsz, B)
L_beamsearch, idx_min = L_beamsearch.min(dim=1)
tours_beamsearch = tours_beamsearch.view(bsz, B, nb_nodes)
torch.cuda.empty_cache() # free GPU reserved memory
print('GPU reserved mem: {:.2f}, GPU allocated mem: {:.2f}'.format(torch.cuda.memory_reserved(0)/1e9,torch.cuda.memory_allocated(0)/1e9))
print('L_greedy',L_greedy)
print('L_beamsearch',L_beamsearch)
tours = []
for b in range(bsz):
tours.append(tours_beamsearch[b,idx_min[b],:])
tours_beamsearch = torch.stack(tours, dim=0)
#plot_tsp(x, tours_greedy, plot_concorde=True)
plot_tsp(x, tours_beamsearch, plot_concorde=True)
# -
| visualization_TSP50.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# ### Preamble
# #### Please read this text carefully before beginning
# For the labs in this course, you should work with a partner, where the two of you share a single keyboard and screen. Each person needs to be responsible for making sure that both they and their partner understand what is going on. You should stay engaged with what your partner is doing and discuss what’s going on all times. That means:
#
# - If you know the answer and your partner doesn’t, _don’t_ just type it in and move on to the next question--- explain what you have done and why. This not only helps your partner to learn, but will also clarify your own understanding. Consider whether you could give other examples that would help your partner understand even better, and ask if they are satisfied with your explanation. Being able to think around a concept and explain it well to other people is an important skill to learn, and not always easy.
# - If your partner knows the answer and you don’t, _don’t_ let them move on until you understand too. And if you are at the keyboard, _don’t_ let them simply dictate what you should type if you don’t understand why. Do ask questions, and let your partner know if their explanation makes sense or not. Remember, just because they think they know the answer doesn’t necessarily mean they are right, and if it doesn’t make sense to you, they need to work harder at explaining (and maybe discover that they are wrong!)
# - If neither of you knows the answer, ask your TA, and they can try to help you.
# - If one person is much more familiar with Python than the other, consider putting the weaker person at the keyboard or at least switching frequently, so they will get more practice with basic coding skills.
#
# ### Coding guidelines
# Make an effort to write well-structured and well-commented code. Your grader should be able to understand your code without too much effort. One line of comment for every line of code is not too much - ideally, even a person who does not know the programming language should be able to understand what is going on based on your comments. Your grader is allowed to remove up to two points out of every ten points, if your code is not readable.
#
#
# ### Required software
# Jupyter notebook:
#
# **Easy installation** The easiest way to get the required software is to install Anaconda. See https://www.continuum.io/downloads . It contains all required packages, including python and jupyter. You can choose python 2.7 or 3.5.
#
# **Manual installation** Make sure that you have numpy and matplotlib installed. If you don't, you can use e.g. pip install <package> --user (python2) or pip3 install <package> --user (python3).
#
# ### Submission
# Please submit your jupyter notebook **as a pair via Blackboard**. The deadline for submission is **Friday 9 23:59**.
#
# ### Start the notebook
# Start a terminal, and cd into the directory where you saved the notebook. Then type jupyter notebook. Your web browser will open.
#
#
# This week's preamble is quite long. From next week, there will be less reading and more doing!
#
# ## Exercise 1
#
#
#
# This exercise is meant to help you get familiar with some language data, do some basic text-processing, and explore some concepts learnt in class. We will use two kinds of corpora in this exercise, the **Penn Treebank** corpus (newspaper text) and the **CHILDES** corpus (child language learning data), described below. For each of them, take a few minutes to read the sentences/utterances before starting to program. Then, using Jupyter Notebook, write code to answer the given questions. We have included a number of additional (optional) questions at the end of in **Going Further**. If you finish the main questions, choose one of the additional ones to work on. **Total points: 10**
#
#
#
#
# +
## if you need to download NLTK first, takes some time
import nltk
# nltk.download()
from nltk.corpus import treebank
# -
# %matplotlib inline
import numpy as np
import matplotlib.pyplot as plt
import csv
import string
import re
import glob
from __future__ import division
# ### Penn Treebank Corpus
#
# The **Penn Treebank** is a collection of data from the newspaper _The Wall Street Journal_, of about 1 Million words (40K sentences). This dataset is very commonly used to to train models in NLP systems. It also contains additional information like Part-of-Speech tags and syntactic trees, which has been removed for this exercise. (In later exercise we will use some of this information.) The dataset contains one sentence per line for this exercise.
# (a) What are the total number of word tokens in this corpus? In this case, consider a word to be any string that is separated by whitespace. What are the total number of distinct word _types_?
words = []
with open('sec02-21.raw') as file:
for line in file:
lineWords = line.split(' ')
lineWords = [w.strip('\n') for w in lineWords]
words.extend(lineWords)
print "Words read: ", len(words)
print "Unique words: ", len(set(words))
# (b) In some cases, we don't want to consider punctuations the same way as other word tokens. Using regular expressions, remove punctuations from the corpus, and count the number of word tokens now. What is the number of distinct word types?
# +
r = re.compile('(.*[a-zA-Z]+|\d)')
filtered_words = filter(r.match, words)
print "Filtered words: ",len(filtered_words)
print "Unique filtered words: ",len(set(filtered_words))
# -
# (c) In some cases, it might be useful to remove the distinction between upper-cased and lower-case words, for example, the two uses of _can_ in _Can you program in Python?_ and _I can program in Python_. Again using regular expressions, convert all words to lower-case, and now count the number of distinct word types in the corpus.
print "Count before lowercase cast: ", len(filtered_words)
words = [word.lower() for word in filtered_words]
print "Count after lowercase cast: ", len(set(words))
# (d) Plot a graph of word frequency versus rank of a word in this corpus (after removing punctuation and lower-casing). Does this corpus obey Zipf's law?
# +
# Didn't use Counter because this is faster
word_count = {}
# For every word in the corpus, if this word is already in the list add 1 to the respective key value.
# If the word was not in the list set the value to 1 and continue.
for word in words:
if word in word_count:
word_count[word] += 1
else:
word_count[word] = 1
# Sorted values of the ranks of the word in the corpus
sort_vals = sorted(word_count.values(), reverse=True)
plt.title('Double log function of ranks')
plt.loglog(sort_vals)
plt.show()
# -
# (e) While processing the corpus as above, there may still be a few things that you don't consider real words. What are some of those things? [The question of what counts as a word and what doesn’t comes up a lot in NLP, and there is rarely a single correct answer; often we need to use our judgment and justify our decisions.]. Go back and remove some of these non-words, and re-plot the graph.
# +
print sorted(word_count.keys())
[("'d","would"),("'ll", "will"),("'m", "am"),("'s", "is"),("n't","not")]
# -
# (f) Assume that the probability $P(w_1^n)$ of a sentence $w_1 \ldots w_n$ can be calculated as follows:
#
# $$P(w_1^n) = P(w_1) . P(w_2) \ldots P(w_n) $$
#
# The probability of a word $w_i$ can be calculated from a corpus as $P(w_i) = count (w_i) / N$ where $N$ is the total number of word tokens in the corpus.
#
# What is the probability of the first two sentences in the corpus?
# +
with open('sec02-21.raw') as file:
lines = file.readlines()
sen_words = lines[0].split()
sen_words.extend(lines[1].split())
r = re.compile('(.*[a-zA-Z]+|\d)')
sen_words = filter(r.match, sen_words)
sen_words = [w.lower() for w in sen_words]
totalTokens = len(filtered_words)
p = 1
for w in sen_words:
# print word_count[w], p, word_count[w] / totalTokens
p = p * (word_count[w]/totalTokens)
print "Probability of first two sentences : ", p
# -
# (g) What is the average _sentence length_ of sentences in the Penn Treebank?
file = open('sec02-21.raw')
senlen = 0
i = 0
for line in file:
senlen += len(line)
i += 1
tot = senlen/i
print tot
# ## CHILDES corpus
# The second corpus contains data from CHILDES, the Child Language Data Exchange System. CHILDES is a large repository containing many different corpora in many different languages, all contributed by researchers interested in child language development. Since the particular interests of the different researchers vary consider- ably, the different corpora contain different types of data (transcriptions, audio, and/or video) and different kinds of annotations (ranging from detailed phonetic transcripts to morphological and syntactic annotations). However all the corpora are annotated using similar guidelines, so that tools can be developed to work with the annotations across different corpora.
#
# We will use the Providence corpus for this lab. To download the corpus, click on the following URL or paste it into your web browser: http://homepages.inf.ed.ac.uk/sgwater/teaching/Providence.zip
# (a) This corpus is contains very different data from the Penn Treebank. To get an idea of what is in the files, take a look at _eth01.cha_ by opening it in a text editor or using a unix command like _less_. What information is in the metadata at the top of each file? _(Hint: child language researchers use the format y;m.d to indicate a child's age in years;months.days)_
#
# What do you see in the rest of the file? _Hint: the string of numbers at the end of each line is a code that links to a time point in the audio recording of this data. The audio isn't included here but can be obtained from the CHILDES database.)_
# ## Header
#
# @UTF8
#
# @Begin
#
# @Languages: eng
#
# @Participants: CHI Ethan Target_Child, MOT Mother Mother
#
# @ID: eng|Ethan|CHI|0;11.4|male|||Target_Child|||
#
# @ID: eng|Ethan|MOT|25;||||Mother|||
#
# @Birth of CHI: 12-DEC-2000
#
# @Birth of MOT: 16-NOV-1976
#
# @Media: eth01, audio
#
# @Comment: <NAME>
#
# @Comment: Language of Caregivers is Mother Standard American English (Midwest , Chicago)
#
# Father Standard American English (originally from Brooklyn but speaks SAE)
#
# @Date: 16-NOV-2001
#
# *MOT: you wanna sit with mommy ? 34413_37325
# (b) Actually, the _eth01.cha_ file is maybe not one of the more interesting ones. Take a look now at _eth50.cha_.
# What are some of the main differences you see between the data in these two files? Is there an obvious explanation for those differences?
# ## Header
#
# @UTF8
#
# @Begin
#
# @Languages: eng
#
# @Participants: CHI Ethan Target_Child, MOT Mother Mother, TOY Toy Toy
#
# @ID: eng|Ethan|CHI|1;1.5|male|||Target_Child|||
#
# @ID: eng|Ethan|MOT|26;||||Mother|||
#
# @ID: eng|Ethan|TOY|26;||||Toy|||
#
# @Birth of CHI: 12-DEC-2000
#
# @Birth of MOT: 17-JAN-1976
#
# @Birth of TOY: 17-JAN-1976
#
# @Media: eth05, audio
#
# @Comment: <NAME>
#
# @Comment: Language of Caregivers is Mother Standard American English (Midwest , Chicago)
#
# Father Standard American English (originally from Brooklyn but speaks SAE)
#
# @Comment: difference between /ɑ/ , /æ/ and /d/ , /t/ is often not clear
#
# @Date: 17-JAN-2002
#
#
# ## Child talks more, because older
# (c) Mean length of utterance (MLU) is a measure of child language development which refers to the average number of words (or, sometimes, morphemes) in each of the child’s utterances (spoken sentences). What is Ethan’s MLU in the file _eth50.cha_? Assume for this question that a word is any whitespace-delimited string of characters (including punctuation) in the transcription. So, for example, you should count _fill him with pom+poms xxx ._ as six words. (You will need some regular expression based text-processing here)
# +
# #!/usr/bin/env python
# -*- coding: utf-8 -*-
import pprint
import re
from __future__ import division
def getMLU(i, target="CHI"):
sentenceLength = []
fileName = "Providence/Ethan/eth%02d.cha" % i
with open(fileName) as ethan:
for line in ethan.readlines():
sentence = re.search("\*%s:\t(.*)\x15\d" % target, line)
if sentence:
# -1 on sentence length due to period at end of sentence
sentenceLength.append(len(sentence.group(1).split()) - 1)
return sum(sentenceLength)/ len(sentenceLength)
print getMLU(50)
# -
# (d) Make a plot of Ethan's MLU from _eth01_ to _eth50_
# +
import matplotlib.pyplot as plt
mlu = []
for i in range(1,51):
mlu.append(getMLU(i))
plt.scatter(range(1,51), mlu, color='red')
plt.xlim(0,51)
plt.show()
# -
# ## Going further
# Have a go at some or all of the following. These tasks are less challenging from a programming perspective but are still good for getting you to think about the data some more. Task 4 may require a bit more programming but should also be accessible.
#
# (e) Compute type/token ratios for the mothers and children in the corpus. Do you see any consistent patterns?
# (f) Compute the MLU for MOT instead of CHI. Plot the MLU of MOT and CHI from the same set of files in a single plot. Do you see the same trends in the MOT and CHI data? Discuss with each other why or why not.
# +
y = []
for i in range(1,51):
y.append(getMLU(i, target="MOT"))
plt.plot(range(1,51),y)
# -
# (g) Further improve the tokenization so that the word counts you are collecting more accurately reflect "real" words.
| Lab1/Lab1_a.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import sklearn
import numpy as np
import pandas as pd
import xgboost as xg
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn import metrics
from sklearn.model_selection import train_test_split
# %matplotlib inline
# +
equal_no_samples_in_each_class = False
datapath = '/home/tracek/Data/gender/gender_descriptors.csv'
seed = 42
params = {'max_depth': 13,
'n_estimators': 1000,
'objective': 'binary:logistic',
'eval_metric': ['auc', 'error'],
'gamma': 0.1}
test_fraction = 0.1
val_fraction = 0.2
# -
data = pd.read_csv(datapath).drop(['filename'], axis=1) # centroid corresponds to meanfreq
male_df_len = len(data[data['label'] == 0])
female_df_len = len(data[data['label'] == 1])
if equal_no_samples_in_each_class:
fraction_to_drop = 1 - female_df_len / male_df_len
data = data.drop(data[data['label'] == 0].sample(frac=fraction_to_drop, random_state=seed).index)
y = data.pop('label')
if test_fraction > 0.0:
X_train, X_test, y_train, y_test = train_test_split(data, y, test_size=test_fraction, random_state=seed, stratify=y)
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=val_fraction, random_state=seed, stratify=y_train)
dtest = xg.DMatrix(X_test, label=y_test)
else:
X_train, X_val, y_train, y_val = train_test_split(data, y, test_size=val_fraction, random_state=seed, stratify=y)
dtrain = xg.DMatrix(X_train, label=y_train)
dval = xg.DMatrix(X_val, label=y_val)
evallist = [(dval, 'eval'), (dtrain, 'train')]
model = xg.train(params=params, dtrain=dtrain, num_boost_round=100, evals=evallist, early_stopping_rounds=5)
y_pred_prob = model.predict(dtest)
y_pred = (y_pred_prob > 0.5).astype(int)
r = metrics.classification_report(y_true=y_test, y_pred=y_pred)
acc = metrics.accuracy_score(y_true=y_test, y_pred=y_pred)
print(r)
print('Accuracy: ', acc)
model.feature_names = [name.replace(' ', '') for name in model.feature_names]
fig, ax = plt.subplots(figsize=(15, 15))
xg.plot_importance(model, ax)
plt.figure(figsize=(8, 8))
plt.title('Confusion matrix for audio descriptors (Yaafe)')
cm = metrics.confusion_matrix(y_true=y_test, y_pred=y_pred)
a = sns.heatmap(cm, annot=True, annot_kws={"size": 16}, cmap='Blues', fmt='g', square=True,
xticklabels=['Male', 'Female'], yticklabels=['Male', 'Female'])
print(r)
# +
precision_avg, recall_avg, _, _ = metrics.precision_recall_fscore_support(y_test, y_pred, average='micro')
precision, recall, _ = metrics.precision_recall_curve(y_test, y_pred_prob)
f, (ax1, ax2, ax3) = plt.subplots(nrows=3, ncols=1, figsize=(10,30), gridspec_kw = {'height_ratios':[1,2,3] })
ax1.step(recall, precision, color='b', alpha=0.2, where='post')
ax1.fill_between(recall, precision, step='post', alpha=0.2, color='b')
ax1.set_xlabel('Recall')
ax1.set_ylabel('Precision')
ax1.set_ylim([0.0, 1.05])
ax1.set_xlim([0.0, 1.0])
ax1.set_title('2-class Precision-Recall curve: AP={0:0.2f}'.format(p))
sns.heatmap(cm, annot=True, annot_kws={"size": 16}, cmap='Blues', fmt='g', square=True,
xticklabels=['Male', 'Female'], yticklabels=['Male', 'Female'], ax=ax2)
ax2.set_title('Confusion matrix')
xg.plot_importance(model, ax3)
# -
f.savefig()
| analysis/model_descriptors.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Train and hyperparameter tune with RAPIDS
# ## Prerequisites
# * Install the Azure Machine Learning Python SDK and create an Azure ML Workspace
# +
import time
#check core SDK version
import azureml.core
print("SDK version:", azureml.core.VERSION)
# +
# data_dir = '../../data_airline_updated'
# -
# ## Initialize workspace
# Initialize a [Workspace](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#workspace) object from the existing workspace you created in the Prerequisites step. `Workspace.from_config()` creates a workspace object from the details stored in `config.json`.
# +
from azureml.core.workspace import Workspace
# if a locally-saved configuration file for the workspace is not available, use the following to load workspace
# ws = Workspace(subscription_id=subscription_id, resource_group=resource_group, workspace_name=workspace_name)
ws = Workspace.from_config()
print('Workspace name: ' + ws.name,
'Azure region: ' + ws.location,
'Subscription id: ' + ws.subscription_id,
'Resource group: ' + ws.resource_group, sep = '\n')
datastore = ws.get_default_datastore()
print("Default datastore's name: {}".format(datastore.name))
# +
# datastore.upload(src_dir='../../data_airline_updated', target_path='data_airline', overwrite=False, show_progress=True)
# -
path_on_datastore = 'data_airline'
ds_data = datastore.path(path_on_datastore)
print(ds_data)
# ## Create AmlCompute
# You will need to create a [compute target](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#compute-target) for training your model. In this tutorial, we use Azure ML managed compute ([AmlCompute](https://docs.microsoft.com/azure/machine-learning/service/how-to-set-up-training-targets#amlcompute)) for our remote training compute resource.
#
# As with other Azure services, there are limits on certain resources (e.g. AmlCompute) associated with the Azure Machine Learning service. Please read [this article](https://docs.microsoft.com/en-us/azure/machine-learning/service/how-to-manage-quotas) on the default limits and how to request more quota.
# If we could not find the cluster with the given name, then we will create a new cluster here. We will create an `AmlCompute` cluster of `Standard_NC6s_v3` GPU VMs.
# +
from azureml.core.compute import ComputeTarget, AmlCompute
from azureml.core.compute_target import ComputeTargetException
#choose a name for your cluster
gpu_cluster_name = "gpu-cluster"
if gpu_cluster_name in ws.compute_targets:
gpu_cluster = ws.compute_targets[gpu_cluster_name]
if gpu_cluster and type(gpu_cluster) is AmlCompute:
print('Found compute target. Will use {0} '.format(gpu_cluster_name))
else:
print("creating new cluster")
# m_size parameter below could be modified to one of the RAPIDS-supported VM types
provisioning_config = AmlCompute.provisioning_configuration(vm_size = 'Standard_NC6s_v3', max_nodes = 1)
#create the cluster
gpu_cluster = ComputeTarget.create(ws, gpu_cluster_name, provisioning_config)
#can poll for a minimum number of nodes and for a specific timeout.
#if no min node count is provided it uses the scale settings for the cluster
gpu_cluster.wait_for_completion(show_output=True, min_node_count=None, timeout_in_minutes=20)
#use get_status() to get a detailed status for the current cluster.
print(gpu_cluster.get_status().serialize())
# -
# ## Train model on the remote compute
# Now that you have your data and training script prepared, you are ready to train on your remote compute.
# Create a directory that will contain all the necessary code from your local machine that you will need access to on the remote resource. This includes the training script and any additional files your training script depends on.
# +
import os
project_folder = './train_rapids'
os.makedirs(project_folder, exist_ok=True)
# -
# ### Prepare training script
# Now you will need to create your training script. We log the parameters and the highest accuracy the model achieves:
#
# ```python
#
# run.log('Accuracy', np.float(accuracy))
# ```
#
# These run metrics will become particularly important when we begin hyperparameter tuning our model in the "Tune model hyperparameters" section.
#
# Once your script is ready, copy the training script `train_rapids_RF.py` into your project directory.
# +
import shutil
shutil.copy('train_rapids_RF.py', project_folder)
# use rapids_csp scripts
# shutil.copy('train_azure.py', project_folder)
# shutil.copy('rapids_csp_azure.py', project_folder)
# -
# ### Create an experiment
# Create an [Experiment](https://docs.microsoft.com/azure/machine-learning/service/concept-azure-machine-learning-architecture#experiment) to track all the runs in your workspace.
# +
from azureml.core import Experiment
experiment_name = 'train_rapids'
experiment = Experiment(ws, name=experiment_name)
# -
# ### Create environment
# +
from azureml.core import Environment
# Create the environment
rapids_env = Environment('rapids_env')
# Specify docker steps as a string. Alternatively, load the string from a file
dockerfile = """
FROM nvcr.io/nvidia/rapidsai/rapidsai:0.12-cuda10.0-runtime-ubuntu18.04
RUN source activate rapids && \
pip install azureml-sdk && \
pip install azureml-widgets
"""
# FROM rapidsai/rapidsai-nightly:0.13-cuda10.0-runtime-ubuntu18.04-py3.7
# Set base image to None, because the image is defined by dockerfile
rapids_env.docker.base_image = None
rapids_env.docker.base_dockerfile = dockerfile
rapids_env.python.user_managed_dependencies = True
# -
# ### Create a RAPIDS estimator
# +
# from azureml.core.container_registry import ContainerRegistry
# #this is an image available in Docker Hub
# image_name = 'zronaghi/rapidsai-nightly:0.13-cuda10.0-runtime-ubuntu18.04-py3.7-azuresdk-030920'
# #don't let the system build a new conda environment
# user_managed_dependencies = True
# +
from azureml.train.estimator import Estimator
script_params = {
'--data_dir': ds_data.as_mount(),
'--n_estimators': 100,
'--max_depth': 8,
'--n_bins': 8,
'--max_features': 0.6,
}
estimator = Estimator(source_directory=project_folder,
script_params=script_params,
compute_target=gpu_cluster,
entry_script='train_rapids_RF.py',
# custom_docker_image=image_name,
# user_managed=user_managed_dependencies
environment_definition=rapids_env)
# -
# ### Submit job
# Run your experiment by submitting your estimator object. Note that this call is asynchronous.
run = experiment.submit(estimator)
# ## Monitor your run
# Monitor the progress of the run with a Jupyter widget.The widget is asynchronous and provides live updates every 10-15 seconds until the job completes.
# +
from azureml.widgets import RunDetails
RunDetails(run).show()
# -
run.cancel()
| azure/notebooks/Train-RAPIDS.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # VANILA LSTM
# Univariate Time Series
# +
# Recurrent Neural Network
# Part 1 - Data Preprocessing
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the training set
training_set = pd.read_csv('Training_data_file_name')
training_set = training_set.iloc[:,1:2].values
# Feature Scaling
from sklearn.preprocessing import MinMaxScaler
sc = MinMaxScaler()
training_set = sc.fit_transform(training_set)
# Getting the number of inputs and the ouputs required for training
X_train = training_set[0:297]
y_train = training_set[0:297]
# Reshaping
X_train = np.reshape(X_train, (296, 1, 1))
# Part 2 - Building the RNN
# Importing the Keras libraries and packages
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
# Initialising the RNN
regressor = Sequential()
# Adding the input layer and the LSTM layer
regressor.add(LSTM(units = 4, activation = 'softmax', input_shape = (None, 1)))
# Adding the output layer
regressor.add(Dense(units = 1))
# Compiling the RNN
regressor.compile(optimizer = 'Nadam', loss = 'mean_squared_error')
# Fitting the RNN to the Training set
regressor.fit(X_train, y_train, batch_size = 3, epochs = 300)
# -
# # Making Predictions and Visualizing Results, Calculating RMSE
# +
# Part 3 - Making the predictions and visualising the results
# Predicting the number of jobs for future
df3=pd.read_csv('final_input_file')
test_set = pd.read_csv('Testing_data')
real_value = test_set.iloc[:,1:2].values
# Getting the predicted jobs of previous values
inputs = real_value
inputs = sc.transform(inputs)
inputs = np.reshape(inputs, (inputs.shape[0], 1, 1))
predicted_job = regressor.predict(inputs)
predicted_job = sc.inverse_transform(predicted_job)
# Visualising the results
fig=plt.figure(figsize=(12,8))
plt.title('Workload Prediction')
plt.xlabel('Time interval')
plt.ylabel('Number of instances')
plt.plot(test_set.time_interval,predicted_job,color='red',label='predicted')
plt.scatter(df3.time_interval,df3.no_of_instances,color='blue',label='actual')
#plt.legend()
#plt.show()
import math
from sklearn.metrics import mean_squared_error
rmse = math.sqrt(mean_squared_error(real_value, predicted_job))
print('rmse',rmse)
# -
# # Calculating MAPE
for i in range(len(real_value)):
obs=real_value[i]
yhat=predicted_job[i]
APE=abs((yhat-obs)/obs)
sumAPE=0
sumAPE=sumAPE+APE
MAPE=(sumAPE/(len(real_value)))
print(MAPE)
| lstm.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Community Quotient Graphs
#
# In this notebook, we mainly create the panels for Figure 4 and the analogue figures in the SI.
# We also compute the table describing the attribute scaling used in the visualizations.
# + [markdown] pycharm={"name": "#%% md\n"}
# ### Channel Coding
# + [markdown] pycharm={"name": "#%% md\n"}
# #### General
# - `edge color` reflects inter-cluster number of references (raw count, color scaled from 0 to 100)
# - `node color` (if not grey) reflects cluster family
# - `node size` reflects number of tokens
# - `layout` is force-directed, seeded, and computed based on all edges
# -
# #### Law Name Graphs by Title (US) or Gesetzesname (DE)
# - `nodes` are all titles for US, laws with `tokens >= 5000` for DE
# - `labels` for US are title numbers, for DE are law abbreviations
# - `size divisor` (number of tokens/size divisor = node size) is 4000 in US (as it is for US and DE in the community quotient graphs) but 100 for DE so we can see stuff
#
# > i.e.: sizes BETWEEN countries are NOT comparable for the law name quotient graphs (but they ARE comparable for the community quotient graphs)!
# #### Chapter Graphs by Chapter (where available)/Title (US) or Buch (where available)/Gesetzesname (DE)
# - `nodes` with more than `5000` tokens are shown
# - `labels` are `title/chapter` resp. `lawname/book`, where applicable, else `title` resp. `lawname`
# (only the labels for the 50 largest nodes are drawn)
# #### Quotient Graphs by Community IDs for 1994 and 2018, US vs DE
# - current node drawing thresholds around the 0.9 quantile of community sizes in the US (and seems to work decently for DE, too)
# - `nodes` with `tokens >= 100000 (1994)` resp. >= `150000 (2018)` are shown
# + [markdown] pycharm={"name": "#%% md\n"}
# ### Preparations
# + pycharm={"name": "#%%\n"}
# %run de_hue_order_alignment.py
# -
import numpy as np
import seaborn as sns
import networkx as nx
from matplotlib import pyplot as plt
from quantlaw.utils.networkx import quotient_graph, sequence_graph
from legal_data_clustering.utils.graph_api import cluster_families, get_clustering_result, add_community_to_graph
from legal_data_clustering.utils.nodes_merging import quotient_graph_with_merge
# + pycharm={"name": "#%%\n"}
# %matplotlib inline
sns.set_style('darkgrid')
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
import warnings
warnings.filterwarnings('ignore')
from matplotlib import cm
import pandas as pd
min_max_df = pd.DataFrame()
# -
def internal_references_for_community(G, cG):
community_ids = set(nx.get_node_attributes(G, 'community').values())
assert len(community_ids) > 0
G_reference_edges = nx.MultiDiGraph()
G_reference_edges.add_nodes_from(G.nodes(data=True))
G_reference_edges.add_edges_from(e for e in G.edges(data=True) if e[-1]['edge_type'] == 'reference')
internat_references = dict()
for com in sorted(community_ids):
com_nodes = [n for n, n_com in G.nodes(data='community') if n_com == com]
G_sub = G_reference_edges.subgraph(com_nodes)
internat_references[com] = len(G_sub.edges)
nx.set_node_attributes(cG, internat_references, 'internal_references')
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
def make_weighted(mG):
G = nx.DiGraph()
G.add_nodes_from(mG.nodes(data=True))
for u,v,data in mG.edges(data=True):
w = data['weight'] if 'weight' in data else 1.0
if G.has_edge(u,v):
G[u][v]['weight'] += w
else:
G.add_edge(u, v, weight=w)
return G
def extract_lawname_from_nodekey(nodekey, country_code):
"""
:param nodekey: the key of a node in a statute graph, e.g., the crossreference graph
:param country_code: us|de
:return: the short identifier of the law the node belongs to
"""
if country_code.lower() == "de":
return nodekey.split("_")[1]
else:
return nodekey.split("_")[0]
def make_lawname_graph_for_snapshot(snapshot, country_code, weight_threshold, size_threshold, size_divisor):
result = get_clustering_result(f'../../legal-networks-data/{country_code.lower()}/11_cluster_results/{snapshot}_0-0_1-0_-1_a-infomap_n100_m1-0_s0_c1000.json',country_code,'seqitems', path_prefix='../')
G = result.graph
qG = quotient_graph(G, node_attribute='law_name', edge_types=['reference'], root_level=-1, self_loops=True)
qG = make_weighted(qG)
# this label is suitable only for the US case, pass the DE case manually
nx.set_node_attributes(qG, {node_id:node_id.split('-', 1)[0].split(' ')[-1] for node_id in qG.nodes()}, name='label')
nx.set_node_attributes(qG, {node_id:node_size/size_divisor for node_id, node_size in dict(qG.nodes(data='tokens_n', default=0)).items()},
name='node_size')
nx.set_node_attributes(qG, {u:w for u,v,w in qG.edges(data='weight')
if u == v}, name='self_references'
)
nx.set_node_attributes(qG, {law_name:len([n for n, data in G.nodes(data=True)
if data['law_name'] == law_name and data['type'] == 'seqitem'])
for law_name in qG.nodes()
}, name='n_seqitems'
)
nx.set_node_attributes(qG, {law_name:data['self_references']/data['tokens_n']
if data.get('self_references',0) > 0 else 0.
for law_name, data in qG.nodes(data=True)
}, name='internal_density'
)
qG.remove_edges_from([(u,v) for u,v,w in qG.edges(data='weight') if w <= weight_threshold])
qG.remove_edges_from([(u,v) for u,v in qG.edges() if u == v])
qG = nx.subgraph(qG, [n for n, size in qG.nodes(data='tokens_n', default=0) if size >= size_threshold])
return qG
def make_community_graph_for_snapshot(snapshot, country_code, weight_threshold, size_threshold, size_divisor):
result = get_clustering_result(f'../../legal-networks-data/{country_code.lower()}/11_cluster_results/{snapshot}_0-0_1-0_-1_a-infomap_n100_m1-0_s0_c1000.json',country_code,'seqitems', path_prefix='../')
add_community_to_graph(result)
sG = sequence_graph(result.graph)
cG = quotient_graph(sG, 'community', self_loops=False, root_level=None, )
cG = make_weighted(cG)
cG.remove_edges_from([(u,v) for u,v,w in cG.edges(data='weight') if w <= weight_threshold])
internal_references_for_community(result.graph, cG)
nx.set_node_attributes(cG, nx.get_node_attributes(cG, 'chars_n'), name='community_size')
nx.set_node_attributes(cG, {
n:
cG.nodes[n].get('internal_references', 0)/chars_n if chars_n > 0 else 0
for n, chars_n in nx.get_node_attributes(cG, 'chars_n').items()
}, name='internal_density')
cG = nx.subgraph(cG, [n for n, size in cG.nodes(data='community_size', default=0) if size >= size_threshold])
nx.set_node_attributes(cG, {n:d/size_divisor for n,d in cG.nodes(data='community_size')}, name='node_size')
return cG
def make_chapter_graph_for_snapshot(snapshot, country_code, weight_threshold, size_threshold, size_divisor):
G = nx.read_gpickle(f'../../legal-networks-data/{country_code.lower()}/4_crossreference_graph/seqitems/{snapshot}.gpickle.gz')
# making this quotient graph takes too much time, implementation probably inefficient
G, _ = quotient_graph_with_merge(G, self_loops=True, merge_threshold=-1)
# remove all nodes above the chapter level
G.remove_nodes_from(set(e[-1] for e in G.nodes(data='parent_key')))
G.remove_node('root')
G = make_weighted(G)
nx.set_node_attributes(G, {u:w for u,v,w in G.edges(data='weight')
if u == v}, name='self_references'
)
if country_code.lower() == 'de':
node_ids = {
n: extract_lawname_from_nodekey(n,'de')
if data['level'] == 0
else f'{extract_lawname_from_nodekey(n, "de")}/{data["heading"].split("Buch")[0]}Buch'
for n, data in G.nodes(data=True)
}
else: # assuming 'us'
node_ids = {
# Title number plus chapter/etc. name
n: data['law_name'].split('-')[0].split(' ')[-1]
if data['level'] == 0
else f"{data['law_name'].split('-', 1)[0].split(' ', 1)[-1]}/{data['heading'].split('-', 1)[0].split(' ', 1)[-1]}".replace("–", '-')
for n, data in G.nodes(data=True)
}
nx.set_node_attributes(G, node_ids, name='label')
nx.set_node_attributes(G, {node_id:node_size/size_divisor for node_id, node_size in dict(G.nodes(data='tokens_n', default=0)).items()},
name='node_size')
nx.set_node_attributes(G, {law_name:data['self_references']/data['tokens_n']
if data.get('self_references',0) > 0 else 0.
for law_name, data in G.nodes(data=True)
}, name='internal_density'
)
G.remove_edges_from([(u,v) for u,v,w in G.edges(data='weight') if w <= weight_threshold])
G.remove_edges_from([(u,v) for u,v in G.edges() if u == v])
G = nx.subgraph(G, [n for n, size in G.nodes(data='tokens_n', default=0) if size >= size_threshold])
return G
def sort_edges_by_weight(G, reverse=False, filter_weight=0):
edges_sorted = [e for e in sorted(G.edges(data=True), key=lambda tup:tup[-1]['weight'], reverse=reverse) if e[-1]['weight'] >= filter_weight]
edgeweights_sorted = [e[-1]['weight'] for e in edges_sorted]
return edges_sorted, edgeweights_sorted
def sort_nodes_by_size(cG, size_attribute, color_attribute):
nodes_sorted = sorted([(c, data[size_attribute], data[color_attribute]) for c, data in cG.nodes(data=True)],
key=lambda tup:tup[1], reverse=True
)
return zip(*nodes_sorted)
def plot_community_graph(cG, size_attribute='node_size', color_attribute='internal_density',
labels=None, close=False, figsize=(16,16), edge_filter_weight=0,
savepath=None, size_divisor=None,
cmap=cm.Reds
):
assert size_divisor
plt.rcParams['figure.figsize'] = figsize
if labels is None:
labels = {node_id:node_id for node_id in cG.nodes()}
np.random.seed(1234)
pos = nx.fruchterman_reingold_layout(cG, k=2.2) # the seed argument of nx doesn't do its job properly, hence we use numpy
edges_sorted, edgeweights_sorted = sort_edges_by_weight(cG, reverse=False, filter_weight=edge_filter_weight)
nodes_sorted, node_sizes_sorted, node_colors_sorted = sort_nodes_by_size(cG, size_attribute, color_attribute)
min_max_row = dict(
edge_weight_min= min(edgeweights_sorted),
edge_weight_max= max(edgeweights_sorted),
node_size_min= min(node_sizes_sorted)*size_divisor,
node_size_max= max(node_sizes_sorted)*size_divisor,
node_color_min= min(node_colors_sorted),
node_color_max= max(node_colors_sorted)
)
print(min_max_row)
edges = nx.draw_networkx_edges(cG, pos=pos, width=[e/10 for e in edgeweights_sorted],
edgelist=edges_sorted,
edge_color=edgeweights_sorted, edge_cmap=cm.Greys,
edge_vmax=100, edge_vmin=0,arrowstyle='fancy', arrowsize=40, connectionstyle='arc3,rad=0.2',
#min_source_margin=0, min_target_margin=0, # margins don't seem to work
)
for i in range(len(edges_sorted)):
# we can set attrs of edges using the setters of FancyArrowPatch
# need to do this, inter alia, b/c alpha is not working in the default drawing function
# https://matplotlib.org/3.1.1/api/_as_gen/matplotlib.patches.FancyArrowPatch.html#matplotlib.patches.FancyArrowPatch
edges[i].set_alpha(0.5)
edges[i].set_linewidth(0)
#edges[i].set_zorder(i)
nodes = nx.draw_networkx_nodes(cG, nodelist=nodes_sorted, pos=pos, node_size=node_sizes_sorted,node_color=node_colors_sorted,
cmap=cmap, alpha=0.75)
#for n in sorted([c[0] for c in cG.nodes(data='node_size')], key=lambda c:c[-1], reverse=True):
# nx.draw_networkx_nodes(cG, nodelist=[n], pos=pos, node_size=node_sizes[n], node_color=[node_colors[n]], cmap=cm.Reds)
nx.draw_networkx_labels(cG, pos=pos, labels=labels)
plt.axis('off')
plt.tight_layout()
if close:
plt.close()
if savepath is not None:
plt.savefig(savepath)
return min_max_row
# +
def add_cluster_families_to_lawname_graph(qG, cluster_evolution_graph_config, country_code):
C = nx.read_gpickle(f'../../legal-networks-data/{country_code.lower()}/13_cluster_evolution_graph/all_{cluster_evolution_graph_config}.gpickle.gz')
cluster_fam_sorted = cluster_families(C, .15)
family_nodes = {
n: idx
for idx, family in enumerate(cluster_fam_sorted)
for cluster in family
for n in C.nodes[cluster]['nodes_contained'].split(',')
}
nx.set_node_attributes(qG, family_nodes, 'cluster_family')
nx.set_node_attributes(
qG,
{
n: min(f,20)
for n, f in family_nodes.items()
},
'cluster_family_color'
)
def add_cluster_families_to_community_graph(qG, cluster_evolution_graph_config, country_code, year):
C = nx.read_gpickle(f'../../legal-networks-data/{country_code.lower()}/13_cluster_evolution_graph/all_{cluster_evolution_graph_config}.gpickle.gz')
cluster_fam_sorted = cluster_families(C, .15)
family_nodes = {
int(cluster.split('_')[1]): idx
for idx, family in enumerate(cluster_fam_sorted)
for cluster in family
if cluster.startswith(year)
}
nx.set_node_attributes(qG, family_nodes, 'cluster_family')
nx.set_node_attributes(
qG,
{
n: min(f,20)
for n, f in family_nodes.items()
},
'cluster_family_color'
)
def get_labels(qG, n_label_threshold):
nodes_to_label = list(map(lambda tup:tup[0], sorted(list(qG.nodes(data='tokens_n')), key=lambda tup:tup[-1], reverse=True)[:n_label_threshold]))
labels = {q:qG.nodes[q]["label"] if q in nodes_to_label else "" for q in qG.nodes()}
return labels
# -
# ### Quotient graphs by Title and Title/Chapter resp. Law Name and Law Name/Book
cluster_evolution_graph_config = '0-0_1-0_-1_a-infomap_n100_m1-0_s0_c1000'
n_label_threshold = 50
# #### US
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
country_code = 'us'
weight_threshold = 0
edge_filter_weight = 0
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
snapshot = '1994'
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
size_divisor = 4000
size_threshold = 0
qG = make_lawname_graph_for_snapshot(snapshot, country_code, weight_threshold=weight_threshold,
size_threshold=size_threshold, size_divisor=size_divisor)
row = plot_community_graph(qG, edge_filter_weight=edge_filter_weight, size_attribute='node_size', labels=dict(qG.nodes(data='label')),
savepath=f'../graphics/lawname-graph-{snapshot}-{country_code.lower()}.pdf', size_divisor=size_divisor
)
min_max_df = min_max_df.append(dict(country_code=country_code, snapshot=snapshot, graph_type='lawname', **row), ignore_index=True)
# + pycharm={"name": "#%%\n"}
size_divisor = 100
size_threshold = 5000
qG = make_chapter_graph_for_snapshot(snapshot, country_code, weight_threshold=weight_threshold,
size_threshold=size_threshold, size_divisor=size_divisor)
add_cluster_families_to_lawname_graph(qG, cluster_evolution_graph_config, country_code)
# -
labels = get_labels(qG, n_label_threshold)
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
row = plot_community_graph(qG, edge_filter_weight=edge_filter_weight, size_attribute='node_size', labels=labels,
savepath=f'../graphics/chapter-graph-{snapshot}-{country_code.lower()}.pdf', size_divisor=size_divisor,
color_attribute='cluster_family_color', cmap=cluster_family_plt_colors(country_code)
)
min_max_df = min_max_df.append(dict(country_code=country_code, snapshot=snapshot, graph_type='chapter', **row), ignore_index=True)
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
snapshot = '2018'
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
size_divisor = 4000
size_threshold = 0
qG = make_lawname_graph_for_snapshot(snapshot, country_code, weight_threshold=weight_threshold,
size_threshold=size_threshold, size_divisor=size_divisor)
row = plot_community_graph(qG, edge_filter_weight=edge_filter_weight, size_attribute='node_size', labels=dict(qG.nodes(data='label')),
savepath=f'../graphics/lawname-graph-{snapshot}-{country_code.lower()}.pdf', size_divisor=size_divisor
)
min_max_df = min_max_df.append(dict(country_code=country_code, snapshot=snapshot, graph_type='lawname', **row), ignore_index=True)
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
size_divisor = 100
size_threshold = 5000
qG = make_chapter_graph_for_snapshot(snapshot, country_code, weight_threshold=weight_threshold,
size_threshold=size_threshold, size_divisor=size_divisor)
add_cluster_families_to_lawname_graph(qG, cluster_evolution_graph_config, country_code)
# -
labels = get_labels(qG, n_label_threshold)
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
row = plot_community_graph(qG, edge_filter_weight=edge_filter_weight, size_attribute='node_size', labels=labels,
savepath=f'../graphics/chapter-graph-{snapshot}-{country_code.lower()}.pdf', size_divisor=size_divisor,
color_attribute='cluster_family_color', cmap=cluster_family_plt_colors(country_code)
)
min_max_df = min_max_df.append(dict(country_code=country_code, snapshot=snapshot, graph_type='chapter', **row), ignore_index=True)
# -
# #### DE
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
country_code = 'de'
weight_threshold = 0
edge_filter_weight = 0
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
snapshot = '1994-01-01'
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
size_divisor = 100
size_threshold = 5000
qG = make_lawname_graph_for_snapshot(snapshot, country_code, weight_threshold=weight_threshold,
size_threshold=size_threshold, size_divisor=size_divisor)
labels = {x:key.split('_',2)[1] for x, key in qG.nodes(data='key')}
row = plot_community_graph(qG, edge_filter_weight=edge_filter_weight, size_attribute='node_size', labels=labels,
savepath=f'../graphics/lawname-graph-{snapshot[:4]}-{country_code.lower()}.pdf', size_divisor=size_divisor
)
min_max_df = min_max_df.append(dict(country_code=country_code, snapshot=snapshot, graph_type='lawname', **row), ignore_index=True)
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
size_divisor = 100
size_threshold = 5000
qG = make_chapter_graph_for_snapshot(snapshot, country_code, weight_threshold=weight_threshold,
size_threshold=size_threshold, size_divisor=size_divisor)
add_cluster_families_to_lawname_graph(qG, cluster_evolution_graph_config, country_code)
# -
labels = get_labels(qG, n_label_threshold)
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
row = plot_community_graph(qG, edge_filter_weight=edge_filter_weight, size_attribute='node_size', labels=labels,
savepath=f'../graphics/chapter-graph-{snapshot[:4]}-{country_code.lower()}.pdf', size_divisor=size_divisor,
color_attribute='cluster_family_color', cmap=cluster_family_plt_colors(country_code)
)
min_max_df = min_max_df.append(dict(country_code=country_code, snapshot=snapshot, graph_type='chapter', **row), ignore_index=True)
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
snapshot = '2018-01-01'
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
size_divisor = 100
size_threshold = 5000
qG = make_lawname_graph_for_snapshot(snapshot, country_code, weight_threshold=weight_threshold,
size_threshold=size_threshold, size_divisor=size_divisor)
labels = {x:key.split('_',2)[1] for x, key in qG.nodes(data='key')}
row = plot_community_graph(qG, edge_filter_weight=edge_filter_weight, size_attribute='node_size', labels=labels,
savepath=f'../graphics/lawname-graph-{snapshot[:4]}-{country_code}.pdf', size_divisor=size_divisor
)
min_max_df = min_max_df.append(dict(country_code=country_code, snapshot=snapshot, graph_type='lawname', **row), ignore_index=True)
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
size_divisor = 100
size_threshold = 5000
qG = make_chapter_graph_for_snapshot(snapshot, country_code, weight_threshold=weight_threshold,
size_threshold=size_threshold, size_divisor=size_divisor)
add_cluster_families_to_lawname_graph(qG, cluster_evolution_graph_config, country_code)
# -
labels = get_labels(qG, n_label_threshold)
for (k,v) in labels.items():
if ('_BGB_' in k or '_ZPO_' in k) and v:
labels[k] = f"{labels[k].split('/')[0]}/{' '.join(qG.nodes[k]['heading'].split(' ')[:2])}"
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
row = plot_community_graph(qG, edge_filter_weight=edge_filter_weight, size_attribute='node_size', labels=labels,
savepath=f'../graphics/chapter-graph-{snapshot[:4]}-{country_code}.pdf', size_divisor=size_divisor,
color_attribute='cluster_family_color', cmap=cluster_family_plt_colors(country_code)
)
min_max_df = min_max_df.append(dict(country_code=country_code, snapshot=snapshot, graph_type='chapter', **row), ignore_index=True)
# -
# ### Quotient graphs by community
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
edge_filter_weight = 0
weight_threshold = 0
size_divisor = 4000
# -
# #### US
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
country_code = 'us'
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
snapshot = '1994'
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
size_threshold = 100000
cG = make_community_graph_for_snapshot(snapshot, country_code, weight_threshold=weight_threshold,
size_threshold=size_threshold, size_divisor=size_divisor)
add_cluster_families_to_community_graph(cG, cluster_evolution_graph_config, country_code, snapshot)
row = plot_community_graph(cG, size_attribute='node_size', edge_filter_weight=edge_filter_weight,
savepath=f'../graphics/community-graph-{snapshot}-{country_code}.pdf', size_divisor=size_divisor,
color_attribute='cluster_family_color', cmap=cluster_family_plt_colors(country_code)
)
min_max_df = min_max_df.append(dict(country_code=country_code, snapshot=snapshot, graph_type='community', **row), ignore_index=True)
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
snapshot = '2018'
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
size_threshold = 150000
cG = make_community_graph_for_snapshot(snapshot, country_code, weight_threshold=weight_threshold,
size_threshold=size_threshold, size_divisor=size_divisor)
add_cluster_families_to_community_graph(cG, cluster_evolution_graph_config, country_code, snapshot)
row = plot_community_graph(cG, size_attribute='node_size', edge_filter_weight=edge_filter_weight,
savepath=f'../graphics/community-graph-{snapshot}-{country_code}.pdf', size_divisor=size_divisor,
color_attribute='cluster_family_color', cmap=cluster_family_plt_colors(country_code)
)
min_max_df = min_max_df.append(dict(country_code=country_code, snapshot=snapshot, graph_type='community', **row), ignore_index=True)
# -
# #### DE
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
country_code = 'de'
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
snapshot = '1994-01-01'
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
size_threshold = 100000
cG = make_community_graph_for_snapshot(snapshot, country_code, weight_threshold=weight_threshold,
size_threshold=size_threshold, size_divisor=size_divisor)
add_cluster_families_to_community_graph(cG, cluster_evolution_graph_config, country_code, snapshot)
row = plot_community_graph(cG, size_attribute='node_size', edge_filter_weight=edge_filter_weight,
savepath=f'../graphics/community-graph-{snapshot[:4]}-{country_code}.pdf', size_divisor=size_divisor,
color_attribute='cluster_family_color', cmap=cluster_family_plt_colors(country_code)
)
min_max_df = min_max_df.append(dict(country_code=country_code, snapshot=snapshot, graph_type='community', **row), ignore_index=True)
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
snapshot = '2018-01-01'
# + jupyter={"outputs_hidden": false} pycharm={"name": "#%%\n"}
size_threshold = 150000
cG = make_community_graph_for_snapshot(snapshot, country_code, weight_threshold=weight_threshold,
size_threshold=size_threshold, size_divisor=size_divisor)
add_cluster_families_to_community_graph(cG, cluster_evolution_graph_config, country_code, snapshot)
row = plot_community_graph(cG, size_attribute='node_size', edge_filter_weight=edge_filter_weight,
savepath=f'../graphics/community-graph-{snapshot[:4]}-{country_code}.pdf', size_divisor=size_divisor,
color_attribute='cluster_family_color', cmap=cluster_family_plt_colors(country_code)
)
min_max_df = min_max_df.append(dict(country_code=country_code, snapshot=snapshot, graph_type='community', **row), ignore_index=True)
# -
# ### Min-max-table output
min_max_df.to_csv(f'../graphics/graph-min-max.csv', index=False)
min_max_df = pd.read_csv(f'../graphics/graph-min-max.csv')
min_max_df.graph_type = [g.capitalize() for g in min_max_df.graph_type]
min_max_df.snapshot = [s[:4] for s in min_max_df.snapshot]
min_max_df.country_code = [c.upper() for c in min_max_df.country_code]
min_max_df.edge_weight_max = min_max_df.edge_weight_max.astype('int64')
min_max_df.edge_weight_min = min_max_df.edge_weight_min.astype('int64')
min_max_df.node_size_max = min_max_df.node_size_max.astype('int64')
min_max_df.node_size_min = min_max_df.node_size_min.astype('int64')
sorted_columns = sorted(min_max_df.columns, key=lambda c: {
'graph_type': 'a',
'country_code': 'b',
'snapshot': 'c',
}.get(c, 'x') + c.replace(' min', ' amin'))
min_max_df = min_max_df[sorted_columns]
min_max_df = min_max_df[min_max_df.graph_type != 'Lawname']
min_max_df.columns = [c+'.' if c.endswith('max') or c.endswith('min') else c for c in min_max_df.columns]
min_max_df.columns = ['\multicolumn{1}{p{20mm}}{\centering ' + c.replace('_', ' ').capitalize().replace(' ', '\\\\') + '}' for c in min_max_df.columns]
with open(f'../graphics/graph-min-max-table.tex', 'w') as f:
min_max_df[[c for c in min_max_df.columns if 'color' not in c]].to_latex(f, escape=False, index=False)
# + [markdown] pycharm={"name": "#%% md\n"}
# ### End
| notebooks/03_community_quotient_graphs.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 20 Newsgroups
#
# Using [this tutorial!](https://scikit-learn.org/0.19/datasets/twenty_newsgroups.html) | 10-22-19
from sklearn.datasets import fetch_20newsgroups
from sklearn.datasets import fetch_20newsgroups_vectorized
newsgroups_train = fetch_20newsgroups(subset='train')
# newsgroups_train = fetch_20newsgroups(subset='train')
from pprint import pprint
pprint(list(newsgroups_train.target_names))
# The data is in `filenames` and `target` attributes (target is integer index of category)
# +
# categories = ['alt.atheism', 'sci.space']
# atheism = ['alt.atheism']
# ng_atheism = fetch_20newsgroups(subset='train', categories=atheism)
# -
from sklearn.feature_extraction.text import TfidfVectorizer
categories = ['alt.atheism', 'talk.religion.misc',
'comp.graphics', 'sci.space']
newsgroups_train = fetch_20newsgroups(subset='train',
categories=categories)
# newsgroups_train = fetch_20newsgroups(subset='train',
# categories=categories)
vectorizer = TfidfVectorizer()
vectors = vectorizer.fit_transform(newsgroups_train.data)
vectors.shape
# +
# vectorizer = TfidfVectorizer()
# vectors = vectorizer.fit_transform(ng_atheism.data)
# vectors.shape
# -
from sklearn.naive_bayes import MultinomialNB
from sklearn import metrics
newsgroups_test = fetch_20newsgroups(subset='test',
categories=categories)
vectors_test = vectorizer.transform(newsgroups_test.data)
clf = MultinomialNB(alpha=.01)
clf.fit(vectors, newsgroups_train.target)
pred = clf.predict(vectors_test)
metrics.f1_score(newsgroups_test.target, pred, average='macro')
import numpy as np
def show_top10(classifier, vectorizer, categories):
feature_names = np.asarray(vectorizer.get_feature_names())
for i, category in enumerate(categories):
top10 = np.argsort(classifier.coef_[i])[-10:]
print("%s: %s" % (category, " ".join(feature_names[top10])))
# +
import numpy as np
def show_top10(classifier, vectorizer, categories):
feature_names = np.asarray(vectorizer.get_feature_names())
for i, category in enumerate(categories):
print(np.argsort(classifier.coef_[i]))
# top10 = np.argsort(classifier.coef_[i])[-10:]
# print("%s: %s" % (category, " ".join(feature_names[top10])))
show_top10(clf, vectorizer, newsgroups_train.target_names)
# -
| assets/all_html/2019_10_22_WK4_ASYNC.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [gl-env]
# language: python
# name: Python [gl-env]
# ---
# # Load GrahpLab Create
import graphlab
# ## Basic settings
#limit number of worker processes to 4
graphlab.set_runtime_config('GRAPHLAB_DEFAULT_NUM_PYLAMBDA_WORKERS', 8)
#set canvas to open inline
graphlab.canvas.set_target('ipynb')
# # Load the Amazon baby data
products = graphlab.SFrame('amazon_baby.gl/')
products.head()
selected_words = ['awesome', 'great', 'fantastic', 'amazing', 'love', 'horrible', 'bad', 'terrible', 'awful', 'wow', 'hate']
# # Feature engineering
products['word_count'] = graphlab.text_analytics.count_words(products['review'])
products.head()
# ## 1. Use .apply() to build a new feature with the counts for each of the selected_words
for key in selected_words:
products[key] = 0
products.head()
products[0]['word_count'].keys()
def key_count(dict, key):
if key in dict:
return dict[key]
else:
return 0
for key in selected_words:
products[key] = products['word_count'].apply(lambda x: key_count(x, key))
products.head()
products.tail()
products['awesome'].show(view='Categorical')
# ### Using the .sum() method on each of the new columns you created, answer the following questions
# Out of the selected_words, which one is most used in the dataset? Which one is least used? Save these results to answer the quiz at the end.
for key in selected_words:
print key + ' : ' + str(products[key].sum())
len(selected_words)
# ignore all 3* reviews
products = products[products['rating'] != 3]
# positive sentiment = 4* or 5* reviews
products['sentiment'] = products['rating'] >=4
for key in selected_words:
products[key] = products['word_count'].apply(lambda x: key_count(x, key))
for key in selected_words:
print key + ' : ' + str(products[key].sum())
# ## 2. Create a new sentiment analysis model using only the selected_words as features
train_data,test_data = products.random_split(.8, seed=0)
selected_words_model = graphlab.logistic_classifier.create(train_data,
target='sentiment',
features=selected_words,
validation_set=test_data)
# ### Coefficients
# Using this approach, sort the learned coefficients according to the ‘value’ column using .sort(). Out of the 11 words in selected_words, which one got the most positive weight? Which one got the most negative weight? Do these values make sense for you? Save these results to answer the quiz at the end.
selected_words_model['coefficients'].sort('value', ascending=False).print_rows(12,5)
# ## 3. Comparing the accuracy of different sentiment analysis model
sentiment_model = graphlab.logistic_classifier.create(train_data,
target='sentiment',
features=['word_count'],
validation_set=test_data)
sentiment_model.evaluate(test_data, metric='roc_curve')
sentiment_model.show(view='Evaluation')
sentiment_model.evaluate(test_data)
selected_words_model.evaluate(test_data)
selected_words_model.show(view='Evaluation')
# What is the accuracy of the selected_words_model on the test_data? What was the accuracy of the sentiment_model that we learned using all the word counts in the IPython Notebook above from the lectures? What is the accuracy majority class classifier on this task? How do you compare the different learned models with the baseline approach where we are just predicting the majority class? Save these results to answer the quiz at the end.
test_data['sentiment'].show(view='Categorical')
# ## 4. Interpreting the difference in performance between the models
diaper_champ_reviews = products[products['name'] == 'Baby Trend Diaper Champ']
len(diaper_champ_reviews)
diaper_champ_reviews.head()
diaper_champ_reviews['predicted_sentiment'] = sentiment_model.predict(diaper_champ_reviews, output_type='probability')
# ### What is the ‘predicted_sentiment’ for the most positive review for ‘Baby Trend Diaper Champ’ according to the sentiment_model from the IPython Notebook from lecture?
# Save this result to answer the quiz at the end.
diaper_champ_reviews = diaper_champ_reviews.sort('predicted_sentiment', ascending=False)
diaper_champ_reviews.head()
# Now use the selected_words_model you learned using just the selected_words to predict the sentiment most positive review you found above. Save this result to answer the quiz at the end.
selected_words_model.predict(diaper_champ_reviews[0:1], output_type='probability')
# Why is the predicted_sentiment for the most positive review found using the model with all word counts (sentiment_model) much more positive than the one using only the selected_words (selected_words_model)? Hint: examine the text of this review, the extracted word counts for all words, and the word counts for each of the selected_words, and you will see what each model used to make its prediction. Save this result to answer the quiz at the end.
diaper_champ_reviews[0]['review']
diaper_champ_reviews[0]['word_count']
diaper_champ_reviews[0]
# # That's all folks!
| ml-foundations/week-3/Assignment - Week 3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # 12 - Enhancing Log Plots With Plot Fills
#
# **Created By:** <NAME>
# **Link to Article:**
#
# Matplotlib is a great library to work with in Python and it is one that I always go back to time and time again to work with well logs. Due to its high degree of flexibility it can be tricky to get started with it at first, but once you have mastered the basics it can become a powerful tool for data visualization.
#
# When working with well log data it can be common to apply color fills to the data to help quickly identify areas of interest. For example, identifying lithologies or hydrocarbon bearing intervals. Most of the time when I have been searching on the web for ways to achieve a color fill the articles point to filling between a line and the x-axis on a plot. There are significantly fewer results showing how to apply shading to well log plots, which generally have their longest axis along the y dimension, or to the y-axis in general.
#
# This article forms part of my Python & Petrophysics series. Details of which can be found here.
#
# In this article, I am going to work through four different examples of how we can enhance the look of well log data using simple fills. These include:
# - A simple color fill from a curve to the edge of a plot / track
# - A color fill from a curve to both edges of a plot / track
# - A variable fill from a curve to the edge of a plot / track
# - A fill between two curves (density and neutron porosity) that changes when they cross over
# ## Applying Fills Using matplotlib
# ### Setting up The Libraries and Loading Data
# To begin, we will import a number of common libraries before we start working with the actual data. For this article we will be using pandas, matplotlib and numpy. These three libraries allow us to load, work with and visualise our data. Additionally, the data that is commonly used to store and transfer data is .las files. For this we will use the excellent lasio library to load this data. You can find more about this in my previous post.
import pandas as pd
import matplotlib.pyplot as plt
import lasio
import numpy as np
# ### Importing & Viewing LAS Data
# The dataset we are using comes from the publicly available Equinor Volve Field dataset released in 2018. The file used in this tutorial is from well 15/9- 19A which contains a nice set of well log data.
# To begin loading in our las file, we can use the following method from lasio:
las = lasio.read('Data/15-9-19_SR_COMP.LAS')
df = las.df()
df['DEPTH'] = df.index
# To make plotting easier, we will also convert our las file to a pandas dataframe and create a new column containing our depth curve, which is based on the dataframe index.
#
# We can then find out what is contained within our dataset by calling upon the .describe() method for the dataframe like so.
df.describe()
# This will return back a simple, but very useful summary table detailing the statistics of all the curves.
#
# As we have added the depth curve as a column to the dataframe we can easily get the min and max values of our data. Note that this may not necessarily be the full extent of all the curves.
# ### Plotting Our Data With A Simple Fill
# Now that we have our data loaded and have confirmed we have the curves we want, we can begin plotting our data. For this article I am going to plot directly from the dataframe using the .plot() method.
#
# In the code below, you will see that I am specifying a number of arguments:
# - x & y axis
# - c specifies the color of the line
# - lw specifies the line width
# - legend is used to turn on or off a legend. Useful to have on with multiple curves/lines
# - figsize specifies the size of our figure in inches
#
# The remaining sections of code allow the setting of the axes limits (ylim and xlim). Note that as we are plotting depth on our y-axis we have to flip the numbers around so that the deepest depth is the first number and the shallowest depth is the second number.
# +
df.plot(x='GR', y='DEPTH', c='black',
lw=0.5, legend=False, figsize=(4,8))
plt.ylim(4500, 3500)
plt.xlim(0,150)
plt.title('Plot Without Any Fill')
plt.show()
# -
# We can enhance this plot a little bit further by adding a simple fill extending from the left edge of the plot to the curve value. This is achieved by using .fill_betweenx().
#
# To use this function we need to pass the y value (DEPTH), the curve being shaded to (GR) and the value we are shading from the GR curve to (0). We can then easily specify the color of the fill by using the facecolor argument.
# +
df.plot(x='GR', y='DEPTH', c='black',
lw=0.5, legend=False, figsize=(7,10))
plt.fill_betweenx(df['DEPTH'], df['GR'], 0, facecolor='green')
plt.ylim(4500, 3500)
plt.xlim(0,150)
plt.title('Plot With a Single Colour Fill to Y-Axis')
plt.show()
# -
# We can go one step further and shade the opposite way by duplicating the line and changing the value to shade to along with the color:
# +
df.plot(x='GR', y='DEPTH', c='black',
lw=0.5, legend=False, figsize=(7,10))
plt.fill_betweenx(df['DEPTH'], df['GR'], 0, facecolor='green')
plt.fill_betweenx(df['DEPTH'], df['GR'], 150, facecolor='yellow')
plt.ylim(4500, 3500)
plt.xlim(0,150)
plt.title('Plot With a Double Colour Fill')
plt.show()
# -
# ### Plotting Our Data With A Variable Fill
# We can take our plot to the next level by applying a variable fill between the gamma ray curve and the y-axis. You will notice that the code below has expanded significantly compared to the code above.
#
# We first have to identify how many colors we will split our shading into. This is done by assigning our x-axis values to a variable and working out the absolute difference between them using span = abs(left_col_value - right_col_value). This gives us our range of values.
#
# We then grab a color map of our choosing using from a wide list of colormaps using cmap= plt.get_cmap('nipy_spectral'). A full list of colormaps can be found here. For this example, I have selected nipy_spectral.
#
# The next section of code looks similar to the above with the exception that the x limits are now controlled by the variables left_col_value. and right_col_value. This allows us to change the value for the limits in just one place rather than in multiple places.
#
# The final section, the for loop, loops through each of the color index values in the array that was created on line 14 and obtains a color from the color map. We then (line 26) use the fill_betweenx method to apply that color. Notice that we now using where = curve >= index in the arguments. This allows us to shade the appropriate color when the curve value is greater than or equal to the index value.
# +
left_col_value = 0
right_col_value = 150
#assign the column to a variable for easier reading
curve = df['GR']
#calculate the span of values
span = abs(left_col_value - right_col_value)
#assign a color map
cmap = plt.get_cmap('nipy_spectral')
#create array of values to divide up the area under curve
color_index = np.arange(left_col_value, right_col_value, span / 100)
#setup the plot
df.plot(x='GR', y='DEPTH', c='black', lw=0.5, legend=False, figsize=(7,10))
plt.ylim(4500, 4000)
plt.xlim(left_col_value, right_col_value)
plt.title('Plot With a Variable Fill to Y-Axis')
#loop through each value in the color_index
for index in sorted(color_index):
index_value = (index - left_col_value)/span
color = cmap(index_value) #obtain colour for color index value
plt.fill_betweenx(df['DEPTH'], 0 , curve, where = curve >= index, color = color)
plt.savefig('12_3.png', dpi=100)
plt.show()
# -
# ### Applying Shading Between Two Curves With Different Scales
# Our final example illustrates how we can apply lithology shading to our density and neutron porosity curves. These two curves are often shaded depending on the crossover. When density moves to the left of neutron porosity, we could potentially have a a porous reservoir rock. When the crossover occurs the opposite way, with density to the right of neutron porosity we could potentially have shale rock.
#
# Note that this is very simplified and there are a number of different reasons why these two curves crossover one another.
#
# The first step to display our density and neutron porosity data, is to add them to a plot. In this instance we have to create a figure and add multiple axes to it as opposed to using df.plot(). To allow us to plot the neutron porosity we have to add it on as an extra axis. Using ax1.twiny() we can share the depth curve between the two curves.
# +
fig = plt.subplots(figsize=(7,10))
ax1 = plt.subplot2grid((1,1), (0,0), rowspan=1, colspan=1)
ax2 = ax1.twiny()
ax1.plot('DEN', 'DEPTH', data=df, color='red', lw=0.5)
ax1.set_xlim(1.95, 2.95)
ax1.set_xlabel('Density')
ax1.xaxis.label.set_color("red")
ax1.tick_params(axis='x', colors="red")
ax1.spines["top"].set_edgecolor("red")
ax2.plot('NEU', 'DEPTH', data=df, color='blue', lw=0.5)
ax2.set_xlim(45, -15)
ax2.set_xlabel('Neutron')
ax2.xaxis.label.set_color("blue")
ax2.spines["top"].set_position(("axes", 1.08))
ax2.tick_params(axis='x', colors="blue")
ax2.spines["top"].set_edgecolor("blue")
for ax in [ax1, ax2]:
ax.set_ylim(4400, 4300)
ax.xaxis.set_ticks_position("top")
ax.xaxis.set_label_position("top")
plt.show()
# -
# We can now add the shading. Which is a little bit more complicated than expected as the two curves have different scales. Generally neutron porosity is scaled from 45 to -15 porosity units (p.u) (0.45 to -0.15 for decimal) and density from 1.95 to 2.95 g/cc.
#
# We have to add in an extra bit of code that will scale one of the curves to the unit scale of the other.
#
# +
fig = plt.subplots(figsize=(7,10))
ax1 = plt.subplot2grid((1,1), (0,0), rowspan=1, colspan=1)
ax2 = ax1.twiny()
ax1.plot('DEN', 'DEPTH', data=df, color='red', lw=0.5)
ax1.set_xlim(1.95, 2.95)
ax1.set_xlabel('Density')
ax1.xaxis.label.set_color("red")
ax1.tick_params(axis='x', colors="red")
ax1.spines["top"].set_edgecolor("red")
ax2.plot('NEU', 'DEPTH', data=df, color='blue', lw=0.5)
ax2.set_xlim(45, -15)
ax2.set_xlabel('Neutron')
ax2.xaxis.label.set_color("blue")
ax2.spines["top"].set_position(("axes", 1.08))
ax2.tick_params(axis='x', colors="blue")
ax2.spines["top"].set_edgecolor("blue")
x1=df['DEN']
x2=df['NEU']
x = np.array(ax1.get_xlim())
z = np.array(ax2.get_xlim())
nz=((x2-np.max(z))/(np.min(z)-np.max(z)))*(np.max(x)-np.min(x))+np.min(x)
ax1.fill_betweenx(df['DEPTH'], x1, nz, where=x1>=nz, interpolate=True, color='green')
ax1.fill_betweenx(df['DEPTH'], x1, nz, where=x1<=nz, interpolate=True, color='yellow')
for ax in [ax1, ax2]:
ax.set_ylim(4400, 4300)
ax.xaxis.set_ticks_position("top")
ax.xaxis.set_label_position("top")
# -
# This allows us to easily identify where potential reservoir sections are. To confirm what these sections are, further analysis would need to be carried out. You should always look at the other logging curves when doing an interpretation to aid your understanding and interpretation.
#
# ## Summary
# In summary, matplotlib is a powerful data visualization tool when working with well log data. We can easily display our logs on tracks and fill in between the lines to aid visualization and interpretation of our data. In this article I have covered how to apply a fixed color fill and a variable gradient fill between our curve and the edge of the plot and also how to fill between two curves that are on different scales.
| 12 - Enhancing Log Plots With Plot Fills.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
lecturerData = pd.read_csv('/home/atrides/Desktop/R/statistics_with_Python/03_Python_Enviroment/Data_Files/Lecturer Data.dat', sep='\s+')
lecturerData
lecturerData['job'] = lecturerData['job'].replace({1:'lecturer', 2:'students'})
lecturerData
# saving data
lecturerData.to_csv('final.csv')
# manipulating Data
lecturerData[['friends', 'income', 'neurotic']]
# lecturer only
lecturerData[lecturerData['job']=='lecturer']
# personality variables but only for people who drink more than 10 units of alcohol
lecturerData[lecturerData['alcohol']>10][["friends", "alcohol", "neurotic"]]
# Dataframe to matrix/array
np.array(lecturerData[lecturerData['alcohol']>10][["friends", "alcohol", "neurotic"]])
| Python/statistics_with_Python/03_Python_Enviroment/Markdown_notebook/04_readingData.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Visualizing the Graph and Training Curves Using TensorBoard
#
# So now we have a computation graph that trains a Linear Regression model using Mini-batch Gradient Descent, and we are saving checkpoints at regular intervals. Sounds sophisticated, doesn’t it?
#
# However, we are still relying on the `print()` function to visualize progress during training.
#
# There is a better way: enter `TensorBoard`. If you feed it some training stats, it will display nice interactive visualizations of these stats in your web browser (e.g., learning curves). You can also provide it the graph’s definition and it will give you a great interface to browse through it.
#
# This is very useful to identify errors in the graph, to find bottlenecks, and so on.
# The first step is to tweak your program a bit so it writes the graph definition and some training stats—for example, the training error (MSE)—to a log directory that TensorBoard will read from.
#
# You need to **use a different log directory every time you run your program, or else TensorBoard will merge stats from different runs, which will mess up the visualizations**.
#
# The simplest solution for this is to include a time‐stamp in the log directory name. Add the following code at the beginning of the program:
# +
from datetime import datetime
now = datetime.utcnow().strftime("%Y%m%d%H%M%S")
root_logdir = "./tf_logs"
logdir = "{}/run-{}".format(root_logdir, now)
# +
import tensorflow as tf
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import fetch_california_housing
# To plot pretty figures
# %matplotlib inline
import matplotlib
import matplotlib.pyplot as plt
plt.rcParams['axes.labelsize'] = 14
plt.rcParams['xtick.labelsize'] = 12
plt.rcParams['ytick.labelsize'] = 12
# Where to save the figures
PROJECT_ROOT_DIR = "."
CHAPTER_ID = "tensorflow"
def save_fig(fig_id, tight_layout=True):
path = os.path.join(PROJECT_ROOT_DIR, "images", CHAPTER_ID, fig_id + ".png")
print("Saving figure", fig_id)
if tight_layout:
plt.tight_layout()
plt.savefig(path, format='png', dpi=300)
# to make this notebook's output stable across runs
def reset_graph(seed= 2018):
tf.reset_default_graph()
tf.set_random_seed(seed)
np.random.seed(seed)
housing = fetch_california_housing()
m,n = housing.data.shape
housing_data_plus_bias = np.c_[np.ones((m,1)), housing.data]
scaler = StandardScaler()
scaled_housing_data = scaler.fit_transform(housing.data)
scaled_housing_data_plus_bias = np.c_[np.ones((m, 1)), scaled_housing_data]
# +
reset_graph()
n_epochs = 200
batch_size = 500
learning_rate = 0.01
n_batches = int(np.ceil(m/batch_size))
X = tf.placeholder(tf.float32, shape=(None, n+1), name = 'X')
y = tf.placeholder(tf.float32, shape=(None,1), name = 'y')
theta = tf.Variable(tf.random_uniform([n+1, 1], -1.0, 1.0), name = 'theta')
y_pred = tf.matmul(X, theta, name = 'predictions')
error = y_pred - y
mse = tf.reduce_mean(tf.square(error), name = 'mse')
optimizer = tf.train.GradientDescentOptimizer(learning_rate = learning_rate)
#optimizer = tf.train.MomentumOptimizer(learning_rate = learning_rate, momentum = 0.9)
training_op = optimizer.minimize(mse)
init = tf.global_variables_initializer()
saver = tf.train.Saver()
mse_summary = tf.summary.scalar('MSE', mse) # <--- here is the scalar
file_writer = tf.summary.FileWriter(logdir, tf.get_default_graph()) # <-- here is the writer
# -
# The first line creates a node in the graph that will evaluate the MSE value and write it to a TensorBoard-compatible binary log string called a summary. The second line creates a `FileWriter` that you will use to write summaries to logfiles in the log directory.
#
# Next you need to update the execution phase to evaluate the `mse_summary` node regularly during training (e.g., every 10 mini-batches). This will output a summary that you can then write to the events file using the file_writer. Here is the updated code:
def fetch_batch(epoch, batch_index, batch_size):
np.random.seed(epoch * n_batches + batch_index)
indices = np.random.randint(m, size=batch_size)
X_batch = scaled_housing_data_plus_bias[indices]
y_batch = housing.target.reshape(-1, 1)[indices]
return X_batch, y_batch
# +
with tf.Session() as sess:
sess.run(init)
for epoch in range(n_epochs):
if epoch % 100 == 0:
save_path = saver.save(sess, "./temp/my_model.ckpt")
print("Epoch", epoch, "MSE =", mse.eval(feed_dict = {X:scaled_housing_data_plus_bias, y:housing.target.reshape(-1, 1)}))
for batch_index in range(n_batches):
X_batch, y_batch = fetch_batch(epoch, batch_index, batch_size)
if batch_index % 10 == 0:
summary_str = mse_summary.eval(feed_dict={X: X_batch, y: y_batch})
step = epoch * n_batches + batch_index
file_writer.add_summary(summary_str, step)
sess.run(training_op, feed_dict = {X:X_batch, y:y_batch})
#y_pred_value = y_pred.eval(feed_dict = {X:scaled_housing_data_plus_bias, y:housing.target.reshape(-1, 1)})
best_theta = theta.eval()
save_path = saver.save(sess, "./temp/my_model_final.ckpt")
file_writer.close()
# -
# Now run this program: it will create the log directory and write an events file in this directory, containing both the graph definition and the MSE values. Open up a shell and go to your working directory, then type ls -l tf_logs/run* to list the contents of the log directory:
#
# ```bash
# [Data_Science_Python] c.cui $:ls -l tf_logs/run*
# total 128
# -rw-r--r-- 1 caihaocui staff 62207 Apr 26 19:03 events.out.tfevents.1524733371.192-168-1-103.tpgi.com.au
# ```
#
# If you run the program a second time, you should see a second directory in the tf_logs/ directory:
#
# ```bash
# [Data_Science_Python] c.cui $:ls -l tf_logs/run*
# tf_logs/run-20180426090242:
# total 128
# -rw-r--r-- 1 caihaocui staff 62207 Apr 26 19:03 events.out.tfevents.1524733371.192-168-1-103.tpgi.com.au
# tf_logs/run-20180426090550:
# total 168
# -rw-r--r-- 1 caihaocui staff 83218 Apr 26 19:06 events.out.tfevents.1524733551.192-168-1-103.tpgi.com.au
# ```
# Great! Now it’s time to fire up the TensorBoard server. You need to activate your virtualenv environment if you created one, then start the server by running the tensorboard command, pointing it to the root log directory. This starts the TensorBoard web server, listening on port 6006 (which is “goog” written upside down):Next open a browser and go to http://0.0.0.0:6006/ (or http://localhost:6006/).
#
# Welcome to TensorBoard! In the Events tab you should see MSE on the right.
#
# ```bash
# [Data_Science_Python] c.cui $:tensorboard --logdir=tf_logs
#
# ```
# ## Name Scopes
# When dealing with more complex models such as neural networks, the graph can easily become cluttered with thousands of nodes. To avoid this, you can create name scopes to group related nodes. For example, let’s modify the previous code to define the error and mse ops within a name scope called "loss":
# +
reset_graph()
now = datetime.utcnow().strftime("%Y%m%d%H%M%S")
root_logdir = "tf_logs"
logdir = "{}/run-{}/".format(root_logdir, now)
X = tf.placeholder(tf.float32, shape=(None, n + 1), name="X")
y = tf.placeholder(tf.float32, shape=(None, 1), name="y")
theta = tf.Variable(tf.random_uniform([n + 1, 1], -1.0, 1.0, seed=42), name="theta")
y_pred = tf.matmul(X, theta, name="predictions")
with tf.name_scope("loss") as scope: # <-- Name Scope
error = y_pred - y
mse = tf.reduce_mean(tf.square(error), name="mse")
# +
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
training_op = optimizer.minimize(mse)
init = tf.global_variables_initializer()
mse_summary = tf.summary.scalar('MSE', mse)
file_writer = tf.summary.FileWriter(logdir, tf.get_default_graph())
# +
n_epochs = 200
batch_size = 500
n_batches = int(np.ceil(m / batch_size))
with tf.Session() as sess:
sess.run(init)
for epoch in range(n_epochs):
for batch_index in range(n_batches):
X_batch, y_batch = fetch_batch(epoch, batch_index, batch_size)
if batch_index % 10 == 0:
summary_str = mse_summary.eval(feed_dict={X: X_batch, y: y_batch})
step = epoch * n_batches + batch_index
file_writer.add_summary(summary_str, step)
sess.run(training_op, feed_dict={X: X_batch, y: y_batch})
best_theta = theta.eval()
file_writer.flush()
file_writer.close()
print("Best theta:")
print(best_theta)
# -
print(error.op.name)
print(mse.op.name)
# In TensorBoard, the mse and error nodes now appear inside the loss namespace, which appears collapsed by default.
# ## inside Jupyter
# If you want to take a peek at the graph directly within Jupyter, you can use the show_graph() function
# +
from IPython.display import clear_output, Image, display, HTML
def strip_consts(graph_def, max_const_size=32):
"""Strip large constant values from graph_def."""
strip_def = tf.GraphDef()
for n0 in graph_def.node:
n = strip_def.node.add()
n.MergeFrom(n0)
if n.op == 'Const':
tensor = n.attr['value'].tensor
size = len(tensor.tensor_content)
if size > max_const_size:
tensor.tensor_content = b"<stripped %d bytes>"%size
return strip_def
def show_graph(graph_def, max_const_size=32):
"""Visualize TensorFlow graph."""
if hasattr(graph_def, 'as_graph_def'):
graph_def = graph_def.as_graph_def()
strip_def = strip_consts(graph_def, max_const_size=max_const_size)
code = """
<script>
function load() {{
document.getElementById("{id}").pbtxt = {data};
}}
</script>
<link rel="import" href="https://tensorboard.appspot.com/tf-graph-basic.build.html" onload=load()>
<div style="height:600px">
<tf-graph-basic id="{id}"></tf-graph-basic>
</div>
""".format(data=repr(str(strip_def)), id='graph'+str(np.random.rand()))
iframe = """
<iframe seamless style="width:1200px;height:620px;border:0" srcdoc="{}"></iframe>
""".format(code.replace('"', '"'))
display(HTML(iframe))
# -
show_graph(tf.get_default_graph())
# # Modularity
# Suppose you want to create a graph that adds the output of two rectified linear units (ReLU). A ReLU computes a linear function of the inputs, and outputs the result if it is positive, and 0 otherwise, $h(x) = max\{w*x+b, 0\}$.
# +
# An Ugly Flat Code
reset_graph()
n_features = 3
X = tf.placeholder(tf.float32, shape=(None, n_features), name="X")
w1 = tf.Variable(tf.random_normal((n_features, 1)), name="weights1")
w2 = tf.Variable(tf.random_normal((n_features, 1)), name="weights2")
b1 = tf.Variable(0.0, name="bias1")
b2 = tf.Variable(0.0, name="bias2")
z1 = tf.add(tf.matmul(X, w1), b1, name="z1")
z2 = tf.add(tf.matmul(X, w2), b2, name="z2")
relu1 = tf.maximum(z1, 0., name="relu1")
#relu2 = tf.maximum(z1, 0., name="relu2") # Oops, cut&paste error! Did you spot it? z1 should be z2.
relu2 = tf.maximum(z2, 0., name="relu2") # fix the wrong copy/paste
output = tf.add(relu1, relu2, name="output")
show_graph(tf.get_default_graph())
# -
# Such repetitive code is hard to maintain and error-prone (in fact, this code contains a cut-and-paste error; did you spot it?). It would become even worse if you wanted to add a few more ReLUs.
#
# Fortunately, TensorFlow lets you stay DRY (Don’t Repeat Yourself): simply create a function to build a ReLU. The following code creates five ReLUs and outputs their sum (note that add_n() creates an operation that will com‐ pute the sum of a list of tensors):
#
# Much better, using a function to build the ReLUs:
# +
reset_graph()
def relu(X):
w_shape = (int(X.get_shape()[1]),1)
w = tf.Variable(tf.random_normal(w_shape), name='weights')
b = tf.Variable(0.0, name="bias")
z = tf.add(tf.matmul(X,w), b, name="z")
return tf.maximum(z, 0., name="relu")
n_features = 3
X = tf.placeholder(tf.float32, shape=(None, n_features), name ="X")
relus = [relu(X) for i in range(5)]
output = tf.add_n(relus, name = "output")
file_writer = tf.summary.FileWriter("logs/relu1", tf.get_default_graph())
show_graph(tf.get_default_graph())
file_writer.close()
# -
# Using name scopes, you can make the graph much clearer. Simply move all the con‐ tent of the `relu()` function inside a name scope.
# +
reset_graph()
def relu(X):
with tf.name_scope("relu"):
w_shape = (int(X.get_shape()[1]),1)
w = tf.Variable(tf.random_normal(w_shape), name='weights')
b = tf.Variable(0.0, name="bias")
z = tf.add(tf.matmul(X,w), b, name="z")
return tf.maximum(z, 0., name="max")
n_features = 3
X = tf.placeholder(tf.float32, shape=(None, n_features), name ="X")
relus = [relu(X) for i in range(5)]
output = tf.add_n(relus, name = "output")
file_writer = tf.summary.FileWriter("logs/relu2", tf.get_default_graph())
show_graph(tf.get_default_graph())
file_writer.close()
# -
# ## Sharing Variables
# If you want to share a variable between various components of your graph, one simple option is to create it first, then pass it as a parameter to the functions that need it.
# For example, suppose you want to control the ReLU threshold (currently hardcoded to 0) using a shared threshold variable for all ReLUs. You could just create that variable first, and then pass it to the `relu()` function:
# +
reset_graph()
def relu(X, threshold):
with tf.name_scope("relu"):
w_shape = (int(X.get_shape()[1]), 1) # not shown in the book
w = tf.Variable(tf.random_normal(w_shape), name="weights") # not shown
b = tf.Variable(0.0, name="bias") # not shown
z = tf.add(tf.matmul(X, w), b, name="z") # not shown
return tf.maximum(z, threshold, name="max")
n_features = 3
threshold = tf.Variable(0.0, name="threshold")
X = tf.placeholder(tf.float32, shape=(None, n_features), name="X")
relus = [relu(X, threshold) for i in range(5)]
output = tf.add_n(relus, name="output")
show_graph(tf.get_default_graph())
# -
# This works fine: now you can control the threshold for all ReLUs using the threshold
# variable. However, if there are many shared parameters such as this one, it will be
# painful to have to pass them around as parameters all the time. Many people create a
# Python dictionary containing all the variables in their model, and pass it around to
# every function.
# +
reset_graph()
def relu(X):
with tf.name_scope("relu"):
if not hasattr(relu, "threshold"):
relu.threshold = tf.Variable(0.0, name="threshold")
w_shape = int(X.get_shape()[1]), 1 # not shown in the book
w = tf.Variable(tf.random_normal(w_shape), name="weights") # not shown
b = tf.Variable(0.0, name="bias") # not shown
z = tf.add(tf.matmul(X, w), b, name="z") # not shown
return tf.maximum(z, relu.threshold, name="max")
X = tf.placeholder(tf.float32, shape=(None, n_features), name="X")
relus = [relu(X) for i in range(5)]
output = tf.add_n(relus, name="output")
show_graph(tf.get_default_graph())
# +
reset_graph()
with tf.variable_scope("relu"):
threshold = tf.get_variable("threshold", shape=(),
initializer=tf.constant_initializer(0.0))
with tf.variable_scope("relu", reuse=True):
threshold = tf.get_variable("threshold")
with tf.variable_scope("relu") as scope:
scope.reuse_variables()
threshold = tf.get_variable("threshold")
reset_graph()
def relu(X):
with tf.variable_scope("relu", reuse=True):
threshold = tf.get_variable("threshold")
w_shape = int(X.get_shape()[1]), 1 # not shown
w = tf.Variable(tf.random_normal(w_shape), name="weights") # not shown
b = tf.Variable(0.0, name="bias") # not shown
z = tf.add(tf.matmul(X, w), b, name="z") # not shown
return tf.maximum(z, threshold, name="max")
X = tf.placeholder(tf.float32, shape=(None, n_features), name="X")
with tf.variable_scope("relu"):
threshold = tf.get_variable("threshold", shape=(),
initializer=tf.constant_initializer(0.0))
relus = [relu(X) for relu_index in range(5)]
output = tf.add_n(relus, name="output")
show_graph(tf.get_default_graph())
file_writer = tf.summary.FileWriter("logs/relu6", tf.get_default_graph())
file_writer.close()
# +
reset_graph()
def relu(X):
with tf.variable_scope("relu"):
threshold = tf.get_variable("threshold", shape=(), initializer=tf.constant_initializer(0.0))
w_shape = (int(X.get_shape()[1]), 1)
w = tf.Variable(tf.random_normal(w_shape), name="weights")
b = tf.Variable(0.0, name="bias")
z = tf.add(tf.matmul(X, w), b, name="z")
return tf.maximum(z, threshold, name="max")
X = tf.placeholder(tf.float32, shape=(None, n_features), name="X")
with tf.variable_scope("", default_name="") as scope:
first_relu = relu(X) # create the shared variable
scope.reuse_variables() # then reuse it
relus = [first_relu] + [relu(X) for i in range(4)]
output = tf.add_n(relus, name="output")
file_writer = tf.summary.FileWriter("logs/relu8", tf.get_default_graph())
file_writer.close()
show_graph(tf.get_default_graph())
# +
reset_graph()
def relu(X):
threshold = tf.get_variable("threshold", shape=(),
initializer=tf.constant_initializer(0.0))
w_shape = (int(X.get_shape()[1]), 1) # not shown in the book
w = tf.Variable(tf.random_normal(w_shape), name="weights") # not shown
b = tf.Variable(0.0, name="bias") # not shown
z = tf.add(tf.matmul(X, w), b, name="z") # not shown
return tf.maximum(z, threshold, name="max")
X = tf.placeholder(tf.float32, shape=(None, n_features), name="X")
relus = []
for relu_index in range(5):
with tf.variable_scope("relu", reuse=(relu_index >= 1)) as scope:
relus.append(relu(X))
output = tf.add_n(relus, name="output")
file_writer = tf.summary.FileWriter("logs/relu9", tf.get_default_graph())
file_writer.close()
show_graph(tf.get_default_graph())
# -
| 02.TensorFlow/TF1.x/TensorFlow_04_Visualizing the Graph and Training Curves Using TensorBoard.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .r
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: R
# language: R
# name: ir
# ---
# +
amount_of_cells = as.integer(readline(prompt = "Enter cell count : "));
expected_viability = as.integer(readline(prompt = "Enter expected cell viability (%) : "));
epitopes_per_beadset <- as.integer(readline(prompt = "Enter epitopes per bead set : "));
viablecells <- amount_of_cells * (expected_viability / 100)
total_beads_needed <- viablecells * 2
ul_of_beads_needed <- total_beads_needed * 7.5
acd28_needed = total_beads_needed * 0.12
bead_pools = total_beads_needed / 2
amount_per_monomer_needed = (25 * bead_pools) / epitopes_per_beadset
rounded_amount_per_monomer_needed = round(((30 * bead_pools) / epitopes_per_beadset), digits = -1)
total_monomer_needed = (25 * total_beads_needed)
uv_monomer_needed = (rounded_amount_per_monomer_needed / 2) * ((epitopes_per_beadset * 2))
take_uvmonomer = round(uv_monomer_needed / 40, digits = -1)
pbs_needed =(uv_monomer_needed - take_uvmonomer)
uv_monomer_per_well = uv_monomer_needed / (epitopes_per_beadset * 2)
message("For ", viablecells,"M viable cells, you will need ", total_beads_needed,"M beads. Take ",
ul_of_beads_needed,"ul of beads wash and resuspended in 60ul PBS-T. Add ", acd28_needed,
"ul aCD28 and incubate. After washing, resuspend the beads, split in 2 equal parts and add ", amount_per_monomer_needed,"ul for each of the ", epitopes_per_beadset, " pMonomers per set of beads(total 2 sets of beads with ", epitopes_per_beadset, " epitopes each).")
message("UVmonomer needed ", uv_monomer_needed,"ul => Take ", take_uvmonomer,"ul UVmononer and mix with ",pbs_needed,"ul of PBS. Add ", uv_monomer_per_well,"ul of UVmonomer per well and match with an equal amount of peptide (diluted 1:50).")
message("ul of Beads:", ul_of_beads_needed)
message("ul of aCD28:", acd28_needed)
message("ul per pMonomer per set:", amount_per_monomer_needed)
message("UVmonomer needed: ", take_uvmonomer, "ul UVmonomer + ", pbs_needed," ul PBS add ", uv_monomer_per_well," ul per well")
# -
| beadfinalcalculator (5).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ---
#
# _You are currently looking at **version 1.0** of this notebook. To download notebooks and datafiles, as well as get help on Jupyter notebooks in the Coursera platform, visit the [Jupyter Notebook FAQ](https://www.coursera.org/learn/python-text-mining/resources/d9pwm) course resource._
#
# ---
# # Assignment 2 - Introduction to NLTK
#
# In part 1 of this assignment you will use nltk to explore the <NAME> novel <NAME>. Then in part 2 you will create a spelling recommender function that uses nltk to find words similar to the misspelling.
# ## Part 1 - Analyzing <NAME>
# +
import nltk
import pandas as pd
import numpy as np
# If you would like to work with the raw text you can use 'moby_raw'
with open('moby.txt', 'r') as f:
moby_raw = f.read()
# If you would like to work with the novel in nltk.Text format you can use 'text1'
moby_tokens = nltk.word_tokenize(moby_raw)
text1 = nltk.Text(moby_tokens)
# -
# ### Example 1
#
# How many tokens (words and punctuation symbols) are in text1?
#
# *This function should return an integer.*
# +
def example_one():
return len(nltk.word_tokenize(moby_raw)) # or alternatively len(text1)
example_one()
# -
# ### Example 2
#
# How many unique tokens (unique words and punctuation) does text1 have?
#
# *This function should return an integer.*
# +
def example_two():
return len(set(nltk.word_tokenize(moby_raw))) # or alternatively len(set(text1))
example_two()
# -
# ### Example 3
#
# After lemmatizing the verbs, how many unique tokens does text1 have?
#
# *This function should return an integer.*
# +
from nltk.stem import WordNetLemmatizer
def example_three():
lemmatizer = WordNetLemmatizer()
lemmatized = [lemmatizer.lemmatize(w,'v') for w in text1]
return len(set(lemmatized))
example_three()
# -
# ### Question 1
#
# What is the lexical diversity of the given text input? (i.e. ratio of unique tokens to the total number of tokens)
#
# *This function should return a float.*
# +
def answer_one():
return example_two()/float(example_one())# Your answer here
answer_one()
# -
# ### Question 2
#
# What percentage of tokens is 'whale'or 'Whale'?
#
# *This function should return a float.*
# +
def answer_two():
dist = nltk.FreqDist(text1)
freqwhale = dist['whale']
freqWhale = dist['Whale']
return (freqwhale+freqWhale)/float(example_one())*100 # Your answer here
answer_two()
# -
# ### Question 3
#
# What are the 20 most frequently occurring (unique) tokens in the text? What is their frequency?
#
# *This function should return a list of 20 tuples where each tuple is of the form `(token, frequency)`. The list should be sorted in descending order of frequency.*
# +
def answer_three():
dist = nltk.FreqDist(text1)
word_freq = [(k, v) for k, v in sorted(dist.items(), key=lambda item: item[1])]
return word_freq[-1:-21:-1]# Your answer here
answer_three()
# -
# ### Question 4
#
# What tokens have a length of greater than 5 and frequency of more than 150?
#
# *This function should return an alphabetically sorted list of the tokens that match the above constraints. To sort your list, use `sorted()`*
# +
def answer_four():
dist = nltk.FreqDist(text1)
freqwords = [w for w in dist.keys() if len(w) > 5 and dist[w] > 150]
return sorted(freqwords) # Your answer here
answer_four()
# -
# ### Question 5
#
# Find the longest word in text1 and that word's length.
#
# *This function should return a tuple `(longest_word, length)`.*
# +
def answer_five():
long_len = 0
for w in set(nltk.word_tokenize(moby_raw)):
if len(w) > long_len:
long_len = len(w)
long_w = w
return (long_w, long_len)# Your answer here
answer_five()
# -
# ### Question 6
#
# What unique words have a frequency of more than 2000? What is their frequency?
#
# "Hint: you may want to use `isalpha()` to check if the token is a word and not punctuation."
#
# *This function should return a list of tuples of the form `(frequency, word)` sorted in descending order of frequency.*
# +
def answer_six():
w_freq = []
dist = nltk.FreqDist(text1)
for w in dist.keys():
if dist[w]>2000 and str.isalpha(w):
w_freq.append((dist[w],w))
return sorted(w_freq, key=lambda wf: wf[0])[::-1] # Your answer here
answer_six()
# -
# ### Question 7
#
# What is the average number of tokens per sentence?
#
# *This function should return a float.*
# +
def answer_seven():
return example_one()/float(len(nltk.sent_tokenize(moby_raw)))# Your answer here
answer_seven()
# -
# ### Question 8
#
# What are the 5 most frequent parts of speech in this text? What is their frequency?
#
# *This function should return a list of tuples of the form `(part_of_speech, frequency)` sorted in descending order of frequency.*
# +
def answer_eight():
tags = nltk.pos_tag(moby_tokens)
part_freq = {}
for tag in tags:
if tag[1] not in part_freq.keys():
part_freq[tag[1]] = 1
else:
part_freq[tag[1]] += 1
return [(pp,f) for pp, f in sorted(part_freq.items(), key=lambda item: item[1], reverse=True)][:5]# Your answer here
answer_eight()
# -
# ## Part 2 - Spelling Recommender
#
# For this part of the assignment you will create three different spelling recommenders, that each take a list of misspelled words and recommends a correctly spelled word for every word in the list.
#
# For every misspelled word, the recommender should find find the word in `correct_spellings` that has the shortest distance*, and starts with the same letter as the misspelled word, and return that word as a recommendation.
#
# *Each of the three different recommenders will use a different distance measure (outlined below).
#
# Each of the recommenders should provide recommendations for the three default words provided: `['cormulent', 'incendenece', 'validrate']`.
# +
from nltk.corpus import words
correct_spellings = words.words()
# -
# ### Question 9
#
# For this recommender, your function should provide recommendations for the three default words provided above using the following distance metric:
#
# **[Jaccard distance](https://en.wikipedia.org/wiki/Jaccard_index) on the trigrams of the two words.**
#
# *This function should return a list of length three:
# `['cormulent_reccomendation', 'incendenece_reccomendation', 'validrate_reccomendation']`.*
# +
def answer_nine(entries=['cormulent', 'incendenece', 'validrate']):
output = []
for entry in entries:
entry_set = set([entry[i:i+3] for i in range(len(entry)-2)])
start_word = [t for t in correct_spellings if t.startswith(entry[0])]
j_score = 0
best_word = None
for word in start_word:
word_set = set([word[i:i+3] for i in range(len(word)-2)])
interlen = len(entry_set.intersection(word_set))
this_j = interlen/(len(entry_set) + len(word_set) - interlen)
if this_j > j_score:
j_score = this_j
best_word = word
output.append(best_word)
return output# Your answer here
answer_nine()
# -
# ### Question 10
#
# For this recommender, your function should provide recommendations for the three default words provided above using the following distance metric:
#
# **[Jaccard distance](https://en.wikipedia.org/wiki/Jaccard_index) on the 4-grams of the two words.**
#
# *This function should return a list of length three:
# `['cormulent_reccomendation', 'incendenece_reccomendation', 'validrate_reccomendation']`.*
set(nltk.ngrams('cormulent', n=4))
# +
# def answer_ten(entries=['cormulent', 'incendenece', 'validrate']):
# output = []
# for entry in entries:
# entry_n = set(nltk.ngrams(entry, n=4))
# start_word = [t for t in correct_spellings if t.startswith(entry[0])]
# j_score = 1
# best_word = None
# for word in start_word:
# word_n = set(nltk.ngrams(word, n=4))
# this_j = nltk.jaccard_distance(entry_n, word_n)
# if this_j < j_score:
# j_score = this_j
# best_word = word
# output.append(best_word)
# return output# Your answer here
# answer_ten()
# +
def answer_ten(entries=['cormulent', 'incendenece', 'validrate']):
output = []
for entry in entries:
entry_set = set([entry[i:i+4] for i in range(len(entry)-3)])
start_word = [t for t in correct_spellings if t.startswith(entry[0])]
j_score = 0
best_word = None
for word in start_word:
word_set = set([word[i:i+4] for i in range(len(word)-3)])
interlen = len(entry_set.intersection(word_set))
this_j = interlen/(len(entry_set) + len(word_set) - interlen)
if this_j > j_score:
j_score = this_j
best_word = word
output.append(best_word)
return output# Your answer here
answer_ten()
# -
# ### Question 11
#
# For this recommender, your function should provide recommendations for the three default words provided above using the following distance metric:
#
# **[Edit distance on the two words with transpositions.](https://en.wikipedia.org/wiki/Damerau%E2%80%93Levenshtein_distance)**
#
# *This function should return a list of length three:
# `['cormulent_reccomendation', 'incendenece_reccomendation', 'validrate_reccomendation']`.*
# +
def answer_eleven(entries=['cormulent', 'incendenece', 'validrate']):
output = []
for entry in entries:
start_word = [t for t in correct_spellings if t.startswith(entry[0])]
d_score = 1024
best_word = None
for word in start_word:
d = np.zeros((len(entry)+1,len(word)+1))
d[:,0] = np.arange(len(entry)+1)
d[0,:] = np.arange(len(word)+1)
for i in np.arange(1,len(entry)+1):
for j in np.arange(1,len(word)+1):
cost = 0 if entry[i-1]== word[j-1] else 1
d[i,j] = min(d[i-1,j]+1, d[i,j-1]+1, d[i-1, j-1] + cost)
if (i>1) and (j>1) and (entry[i-1]==word[j-2]) and (entry[i-2]==word[j-1]):
d[i,j] = min(d[i,j], d[i-2,j-2]+1)
this_d = d[-1,-1]
if this_d < d_score:
d_score = this_d
best_word = word
output.append(best_word)
return output # Your answer here
answer_eleven()
# -
| Applied Text Mining in Python/Assignment+2.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Customer Churn Analysis - Predictive Modeling (Logistic Regression and Decision Tree)
# > In this blog, we will create Logistic Regression and Decision Tree Models to predict the customer churn probability based on the historical churn data having customer demographics and purchase patterns.
#
# - toc: true
# - comments: true
# - categories: [python, churn, retention, classification modeling, churn probability, sklearn, LogisticRegression, DecisionTreeClassifier]
# ## Loading the Data
# Let's import the pandas library and load the csv file having the churn data we saved in the last post on Exploratory Data Analysis - [Link](https://rahuls0959.github.io/ds-blog/python/churn/retention/classification%20modeling/exploratory%20data%20analysis/matplotlib/seaborn/2020/06/06/_Customer-Churn-Analysis_Exploratory-Data-Analysis.html)
import pandas as pd
df = pd.read_csv('Telco-Customer-Churn-Final.csv')
df.info()
# As we know from the previous post, dataset has 7032 customers' data and contain both numerical and categorical features. Let's now proceed with data pre-processing before fitting a machine learning model to predict churn.
# ## Data Pre-Processing
# We can see above the the columns 'customerID' and 'grouped_tenuew' are not relevant in building the model, hence, we remove them from the data.
#Remove unecessary columns from beginning of data ('customerID', 'grouped_tenure')
df2=df.drop(columns=['customerID','grouped_tenure'])
df2.head(5)
# Next, out target variable "Churn" has its values in binary string form ("Yes", "No") which the model will not be able to understand. Let's convert it into binary numeric form ("1", "0") where 1: customer churned and 0: customer not churned
# Replace churn string values with numeric binary values
df2.Churn.replace({"Yes":1, "No":0}, inplace = True)
# ### Dummy Variables
# Now, we must deal with other categorical variables. It would be difficult for the model to interpret "Yes" or "No" values of the categorical variables. The dummy variables function is performed which adds new binary features with [0,1] values to overcome this issue. Let's implement the function and see how the data looks!
# Utilize pandas dummy variable function to create dummy variable series for categorical data
dummy_df = pd.get_dummies(df2)
dummy_df.info()
# Our new DataFrame features are above and now include dummy variables.
# ## Visualising correlations
# Before starting to build a model, it is a good practise to visualise interdependencies between the variables. This will help us to identify the variables which co-move with target variable and also with each other. We can then include only those features which add significance to the model and avoid model performance issues due to multicollinearity, etc.
#
# Reference: https://machinelearningmastery.com/how-to-use-correlation-to-understand-the-relationship-between-variables/
# ### What is correlation?
# The statistical relationship between two variables is referred to as their correlation. A correlation could be positive, meaning both variables move in the same direction, or negative, meaning that when one variable’s value increases, the other variables’ values decrease. Correlation can also be neutral or zero, meaning that the variables are unrelated.
#
# The performance of some algorithms can deteriorate if two or more variables are tightly related, called multicollinearity. An example is linear regression, where one of the offending correlated variables should be removed in order to improve the skill of the model.
#
# We may also be interested in the correlation between input variables with the output variable in order provide insight into which variables may or may not be relevant as input for developing a model.
# ### How to calculate correlation?
# First calculate the covariance.
# The calculation of the covariance between two variables X and Y is as follows:
#
# cov(X, Y) = (sum (x - mean(X)) * (y - mean(Y)) ) * 1/(n-1)
#
# The Pearson correlation coefficient can be used to summarize the strength of the linear relationship between two data samples.
#
# The Pearson’s correlation coefficient is calculated as the covariance of the two variables divided by the product of the standard deviation of each data sample. It is the normalization of the covariance between the two variables to give an interpretable score between -1 and 1.
#
# Pearson's correlation coefficient = covariance(X, Y) / (stdv(X) * stdv(Y))
# Let's calculate the correlations for our dataset!
# +
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings('ignore')
# %matplotlib inline
# Plot correlations between our features and our target feature, churn, as a bar plot, sort by descending order
dummy_df.corr()['Churn'].sort_values(ascending = False).plot('bar', figsize = (20, 10), color = 'Navy')
plt.title('Feature Correlation w/ Churn', fontsize = 30, fontweight = 'bold')
plt.xticks(fontsize = 15, fontweight = 'bold')
plt.yticks(fontsize = 12, fontweight = 'bold',)
plt.tight_layout()
# +
import seaborn as sns
# Visualize a heatmap for our features correlation values
plt.figure(figsize = (30, 15))
x = sns.heatmap(dummy_df.corr(), cmap = 'YlGnBu')
# -
# From our heatmap and correlation barplot, we see that monthly contract, and a lack of online security or techsupport have the strongest positive correlation with churn. 2-Year Contracts and not having internet service are the most negatively correlated with churn.
# ## Building the Model
# ### Splitting the data into Target and Input Variables
# Separate the data into target feature ('y' variable) as "Churn" and predictive features ('X' variables) as all the other features except "Churn".
# Establish our target feature, churn, as our y feature
y = dummy_df.Churn.values
# Drop our target feature from our features dataframe
X = dummy_df.drop('Churn', axis = 1)
# Save dataframe column titles to list for reassigning after min max scale
cols = X.columns
# Note, we have taken all the predictors in this case. This is normally called "kitchen sink" approach. This can create issues such as multi-collinearity in case two 'x' variables are highly correlated. This can be found by looking at the p-value of the coeeficients or parameters for each predictor and removing the variables which are not significant to improve the model performance.
# ### Scaling the data
# All data has all numeric values now. Comparing a binary value [0,1] with continous values will not given relevant information because they all have different units. We can normalise all the data between [0,1] using Min-Max scaling to address this issue.
# +
# Import the necessary sklearn method
from sklearn.preprocessing import MinMaxScaler
# Instantiate min-max scaling object
mm = MinMaxScaler()
# Fit and transform our feature dataframe
X = pd.DataFrame(mm.fit_transform(X))
# Reassign column names so new dataframe has corresponding names
X.columns = cols
X.head()
# -
# ### Splitting data into Training and Validation Sets
# We will now split data into training and validation sets with 70% data used to build the model and rest 30% data held back to evaluate the model performance.
# +
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = .30, random_state = 33)
# -
# ## Logistic Regression
# ### Logistic Function
# Logistic regression is widely used for binary classification problems. Logistic function (or sigmoid function) is used at the core of this algorithm.
#
# Logistic function is an S-shaped curve that can take any real-valued number and map it into a value between 0 and 1, but never exactly at those limits:
#
# 1 / (1 + e^-value)
# 
# ### Logistic Regression Equation
# Logistic regression gives the predicted output as probability of success class. Logistic regression is denoted by the equation:
#
# p(X) = e^(b0 + b1*X) / (1 + e^(b0 + b1*X))
# where,
# p(X) is the predicted output or probability of success class,
# b0 is the bias or intercept term and
# b1 is the coefficient for the input variable (X).
# Each column or variable in the input data has an associated 'b' coefficient (a constant real value) thatis learned from the training data.
#
# Logistic regression is indeed a linear method, but the predictions are transformed using the logistic function.
# Rearranging the above equation, we get:
#
# ln(p(X) / 1 – p(X)) = b0 + b1 * X
# where, left-hand side is the log-odds of the success class and right-hand side is the linear combination of inputs or predictors.
#
# Since the logistic regression output gives probabilities, we can use these probabilities to give the binary class (0 or 1) with 1 as the success class based on cutoff probability (say, 0.5) such as:
#
# 0 if p(X)<0.5 and 1 if p(X)>0.5
# ### Fitting the Model
# Let's fit the Logistic Regression Model on our training data.
# +
from sklearn.linear_model import LogisticRegression
# Instantiate a Logistic Regression model with default parameters
logreg = LogisticRegression(random_state=33)
# Fit the model to our X and y training sets
logreg.fit(X_train, y_train)
# -
# As above, model is fit using default parameters. These parameters can be changed to improve the model accuracy.
#
# Let's start with default parameters and understood how the model performs. We can use any random state so that the model gives the same output on running the model at different iterations. Here, we have set the random state=33, however it can be any whole number.
# At this point, our model is actually completely built and can be used for predicting the outputs. Let’s take a look at evaluating our performance.
# ### Evaluating Model Performance
# #### Accuracy
# How many times the model is able to predict correctly. It is calculated as:
#
# Accuracy = # Successful predictions / Total # of datapoints
print("train accuracy: ",round(logreg.score(X_train,y_train),3))
print("test accuracy: ", round(logreg.score(X_test,y_test),3))
# Since the model accuracy is equally good on test data (held back or unseen data) as on training data (on which model is fitted), our model is performing well. Also, accuracy is ~80% which is quite good and can be optimised further by changing the parameters while fitting the model.
# #### Confusion Matrix
# It is used to depict the number of observation in each of the four classes:
#
# 1. True Positives (TP) - Model predicted customer would churn (1) and they actually do churn (1)
# 2. True Negatives (TN) - Model predicted customer wouldn't churn (0) and they actually don't churn (0)
# 3. False Positives (FP) - Model predicted customer would churn (1) and they actually don't churn (0)
# 4. False Negatives (FN) - Model predicted customer wouldn't churn (0) and they actually do churn (1)
#
# Depending on the situation, each of these have different weightages. In this case, false negatives are more important to pay attention to as it is worse for us to predict customer not churning (and hence not taking any preventive action) when actual in real life customer churns, losing the potential revenue.
# +
# Confusion matrix
from sklearn.metrics import confusion_matrix
# Pass actual test and predicted target test outcomes to function
y_hat_test = logreg.predict(X_test)
cnf_matrix = confusion_matrix(y_test, y_hat_test)
print('Confusion Matrix: \n', cnf_matrix)
# -
# From the above connfusion matrix, we can see that: TP=1399, FP=162, TN=288, FN=261. In order to derive more insights from these absolute numbers, we can use classification report and produce more descriptive metrics.
# #### Classification Report
# Precision: How many times the customer actually churned in real life out of all the times the model predicted customer would churn.
#
# Precision = True Positives / Predicted Positives = TP/ (TP + FP)
#
# Recall: How many times the model is able to correct predict the customer would churn out of the total times customer churned in real life.
#
# Recall = True Positives / Total Positives = TP/ (TP + FN)
#
# F1 Score: a single indicator to capture both precision and recall - harmonic mean of precision and recall. Penalised model heavily if it is skewed towards precision or recall.
#
# F1 = 2(Precision * Recall)/(Precision + Recall)
#
# Precision tells us how precise the predictions are, whereas recall tells the percentage of success class captured correctly by the model.
# +
#Classification Report
from sklearn.metrics import classification_report
print(classification_report(y_test, y_hat_test))
# -
# As we can see, precision for success class (churn=1) is quite low at 64% i.e. only 64% of the customers predicted to be churned actually churn. Recall at 52% is also bit lower and means that we are able to predict only 52% of the customers who would actually churn accurately. However, accuracy is quite high at 80%. Model building is an iterative process and these metrics can be further improved by changing the model parameters.
# #### Receiver Operator Characteristic Curve (ROC Curve) and Area Under Curve (AUC)
# ROC is a visual graph between True Positive Rate (recall - TPR) and False Positive Rate (FPR).
#
# True Positive Rate = TP/ (TP+FN); False Positive Rate = FP/ (TN+FP)
#
# The AUC will give us a singular numeric metric to compare instead of a visual representation. An AUC = 1 would represent a perfect classifier, and an AUC = 0.5 represents a classifier which only has 50% precision. This metric quantifies the overall accuracy of our classifier model.
#
# Let's plot the AUC curve and find the AUC for our classifier!
# Best performing models will have an ROC curve that hugs the upper left corner of the graph. This would represent that we correctly classify the positives much more often than we incorrectly classify them. The dotted line in the graph represents a 1:1 linear relationship and is representative of a bad classifier, because the model guesses one incorrectly for every correct guess.
# +
#ROC Curve and AUC Metric
from sklearn.metrics import roc_curve,auc
import matplotlib.pyplot as plt
y_hat_test_proba = logreg.predict_proba(X_test)[:,1] # get the probabilities of the positive class
fpr, tpr, thresholds = roc_curve(y_test, y_hat_test_proba,pos_label=1)
roc_auc= round(auc(fpr, tpr),2)
plt.plot(fpr, tpr,lw=2,label='(AUC = {})'.format(roc_auc))
plt.plot([0,1],[0,1],'k--')
plt.legend(loc=4)
plt.xlabel('fpr',fontweight='bold')
plt.ylabel('tpr',fontweight='bold')
plt.title('Logistic Regression ROC curve', fontweight='bold')
plt.show()
# -
# ROC curve is plotted by calculating the TPR and FPR at different cutoff probabilities. As we can that there is a tradeoff between TPR and FPR. We can increase TPR by increasing the cutoff probability, but FPR would also continue to increase. The best model is where TPR is high and FPR is low, i.e., which stays towards the upper left corner.
#
# Notice how test curve hug the upper left corner and has very strong AUC value. With such strong model, we can now turn our eyes to tuning some model parameters/hyperparameters to slowly elevate our scores.
# ## Decision Tree
# Decision Trees are popular classification alogorithms, though they are also be used in predictive regression problems.
#
# The representation of decision tree model is a binary tree. A node of the tree represents a single input variable (X) and a split point on that variable, assuming the variable is numeric. The leaf nodes (also called terminal nodes) of the tree contain an output variable (y) which is used to make a prediction.
#
# Once created, a tree can be navigated with a new row of data following each branch with the splits until a final prediction is made.
# ### Splitting the Tree
# With more than one attribute taking part in the decision-making process, it is necessary to decide the relevance and importance of each of the attributes, thus placing the most relevant at the root node and further traversing down by splitting the nodes. As we move further down the tree, the level of impurity or uncertainty decreases, thus leading to a better classification or best split at every node. To decide the same, splitting measures such as Information Gain, Gini Index, etc. are used.
# #### Gini Index
# Gini index or Gini impurity measures the degree or probability of a particular variable being wrongly classified when it is randomly chosen.
#
# If all the elements belong to a single class, then it can be called pure. The degree of Gini index varies between 0 and 1, where 0 denotes that all elements belong to a certain class or if there exists only one class, and 1 denotes that the elements are randomly distributed across various classes. A Gini Index of 0.5 denotes equally distributed elements into some classes.
#
# Let's say we have a variable named Gender (Male and Female) each having 5 entries. 2 males out of 5 churned and 5 females out of 5 churned.
#
# Gini Index = P(Male)*{1- P(Churn|Male)^2 - P(No Churn|Male)^2} + P(Female)*{1- P(Churn|Female)^2 - P(No Churn|Female)^2}
# Gini Index = (5/10)*{1 - (2/5)^2 - (3/5)^2} + (5/10)*{1 - (5/5)^2 - (0/5)^2}
# Gini Index = 0.5*0.48 + 0.5*0 = 0.24
#
# Thus, the variable with the least gini index is placed at the root node and similarly the tree is transversed till leaf nodes by placing other features having lower gini index above the others with higher gini index . Also, root node is most important in classification based on this reason.
# ### Building the Model
# Let's fit the decision tree model on our training data! We had already defined input and output variables and split the data into training and test sets before running Logistic Regression, so no need to repeat those steps here.
# +
from sklearn.tree import DecisionTreeClassifier
# Instantiate a Decision Tree model with default parameters
dt = DecisionTreeClassifier(max_depth=3,random_state=33)
# Fit the model to our X and y training sets
dt.fit(X_train, y_train)
# -
# As above, model is fit using default parameters. These parameters can be changed to improve the model accuracy.
#
# Let's start with default parameters and understood how the model performs. We can use any random state so that the model gives the same output on running the model at different iterations. Here, we have set the random state=33, however it can be any whole number. Also, max depth is taken as 3 which means data will be split till three levels. If we don't choose this, there will be overfitting of the data and model performance on the test data will be poor.
# ### Evaluating Model Performance
# #### Accuracy
print("train accuracy: ",round(dt.score(X_train,y_train),3))
print("test accuracy: ", round(dt.score(X_test,y_test),3))
# Model performance is good as test data accuracy is nearly same as training data accuracy.
# #### Confusion Matrix
# +
# Confusion matrix
from sklearn.metrics import confusion_matrix
# Pass actual test and predicted target test outcomes to function
y_hat_test = dt.predict(X_test)
cnf_matrix = confusion_matrix(y_test, y_hat_test)
print('Confusion Matrix: \n', cnf_matrix)
# -
# From the above confusion matrix, we can see that: TP= 1466, FP=95, TN=196, FN=353.
# #### Classification Report
# +
#Classification Report
from sklearn.metrics import classification_report
print(classification_report(y_test, y_hat_test))
# -
# For the success class i.e.Churn=1, recall is pretty low. Churn prediction out of total cases which actually churned is only 36%. Hence, this leads to not taking proper action on other cases which churned but we predicted to be not churning. The model needs to be improved by tuning the parameters/ hyper-parameters.
# #### Receiver Operator Characteristic Curve (ROC Curve) and Area Under Curve (AUC)
# +
#ROC Curve and AUC Metric
from sklearn.metrics import roc_curve,auc
import matplotlib.pyplot as plt
y_hat_test_proba = dt.predict_proba(X_test)[:,1] # get the probabilities of the positive class
fpr, tpr, thresholds = roc_curve(y_test, y_hat_test_proba,pos_label=1)
roc_auc= round(auc(fpr, tpr),2)
plt.plot(fpr, tpr,lw=2,label='(AUC = {})'.format(roc_auc))
plt.plot([0,1],[0,1],'k--')
plt.legend(loc=4)
plt.xlabel('fpr',fontweight='bold')
plt.ylabel('tpr',fontweight='bold')
plt.title('Decision Tree ROC curve', fontweight='bold')
plt.show()
# -
# Area under curve is quite good at 0.82.
# ### Visualisation of Decision Tree Model
# Scikit-Learn library provides a nice framework to see how the tree is formed and what all attributes/ features are used while splitting the tree.
# +
from sklearn.tree import plot_tree
plt.figure(figsize=(25,10))
target=['No Churn', 'Churn']
a = plot_tree(dt,
feature_names=cols,
class_names=target,
filled=True,
rounded=True,
fontsize=14)
# -
# From the above tree, we can see that Contract_Month-to-month is the most important feature in deciding the customer churn. Customers with Month-to-month contract <=0.5 (i.e 0) are split on LHS and customers with month-to-month contract >0.5 (i.e. 1) are split on RHS. Gini index for this variable is 0.393. Similarly, this is done for internal nodes or branches of the trees. At the leaf nodes we can see that predicted class is shown based on which class the majority values are falling. We can see For the leaf node on the extreme right, we can see that 455 values fall in "No Churn" and 313 in "Churn", hence the node class is predicted as "No Churn". However, the probability of error is quite high here. Hence, we can improve the model performance by changing the parameters by fitting the model such as max_depth, splitting_criterion, min_samples_split, etc.
# ## Limitations
# 1. Model is built on default parameters. The model performance can be improved by changing the parameters. Cross validaion and grid search is a technique which can be used to find the parameters which gives maximum model accuracy.
# 2. All the features/ variables are used in Logistic Regression Model. The model performance can be improved by using only features which are highly correlated with churn. Also, in case two features are having higher correlation with each other, one of them can be included while building the model.
# ## Conclusion
# The model performance is quite good in the first round as we achieved ~80% accuracy. However, model building is an iterative process. We have woeked on only two classification algorithms - Logostic Regressiona and Decision Tree in this blog. We can also try different classification modeling algorithms such as Support Vector Machine (SVM), Random Forest, XGBoost to check if the model performance improves. I will work on these algorithms in the next series of blogs.
#
# If you have any comments or suggestions please comment below or reach out to me at - [Twitter](https://twitter.com/rahulsingla0959) or [LinkedIn](https://www.linkedin.com/in/rahul-singla1/)
# ## References
# [1] https://scikit-learn.org/stable/modules/preprocessing.html
#
# [2] https://machinelearningmastery.com/logistic-regression-for-machine-learning/
#
# [3] https://machinelearningmastery.com/implement-decision-tree-algorithm-scratch-python/
#
# [4] https://blog.quantinsti.com/gini-index/
| 2020-06-07_Customer Churn Analysis_Predictive Modeling.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: PythonData
# language: python
# name: pythondata
# ---
# Dependencie
import pandas as pd
import os
# Load in file
movie_file = "Resources/movie_scores.csv"
movie_df = pd.read_csv(movie_file)
# Read and display the CSV with Pandas
movie_df = pd.read_csv(movie_file)
movie_df.head()
# List all the columns in the table
movie_df.columns
# We only want IMDb data, so create a new table that takes the Film and all the columns relating to IMDB
movieIMBd_df = movie_df[['FILM','IMDB','IMDB_norm_round','IMDB_norm_round','IMDB_user_vote_count']]
movieIMBd_df.head()
# We only like good movies, so find those that scored over 7, and ignore the norm rating
good_movies = movieIMBd_df.loc[movieIMBd_df['IMDB'] > 7, ['FILM','IMDB','IMDB_user_vote_count']]
good_movies.head()
# Find less popular movies--i.e., those with fewer than 20K votes
unkown_movies_df = good_movies.loc[good_movies['IMDB_user_vote_count'] < 20000, :]
unkown_movies_df.head()
| good_movies_unsolved.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %matplotlib inline
from math import *
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from QuantLib import *
from PyFin.Math.Distributions import CumulativeNormalDistribution
plt.style.use('fivethirtyeight')
# -
# # 1. Functions
# -------------------
def bs_theoretical_price(payoff, spot, ttm, volatility, rf_rate, finance_rate=None):
if not finance_rate:
finance_rate = rf_rate
forward = spot * exp(finance_rate * ttm)
std_dev = volatility * sqrt(ttm)
discount = exp(-rf_rate * ttm)
return blackFormula(payoff.optionType(), payoff.strike(), forward, std_dev, discount)
# +
_dist = CumulativeNormalDistribution()
def _exercise(payoff, spot):
return payoff(spot)
_exercise = np.frompyfunc(_exercise, 2, 1)
def _create_path(rsg, ln_spot, drift, diffusion, delta_t):
rnd = np.array(rsg.nextSequence().value())
in_c = delta_t * drift + np.sqrt(delta_t) * diffusion * rnd
inc_c_cum = np.cumsum(in_c)
return np.exp(np.concatenate(([ln_spot], ln_spot + inc_c_cum)))
def _bs_delta(option_type, finance_rate, volatility, ttm, spot, strike):
money_ness = log(spot / strike)
drift = (finance_rate + 0.5 * (volatility ** 2)) * ttm
d1 = (money_ness + drift) / volatility / sqrt(ttm)
call_delta = _dist(d1)
if option_type == Option.Call:
return call_delta
elif option_type == Option.Put:
return call_delta - 1.
_bs_delta = np.frompyfunc(_bs_delta, 6, 1)
def _hedging_on_path(payoff, ttm, time_grids, spot_path, volatility, inflations, rf_rate, finance_rate, trading_cost):
delta_t = time_grids[0] - time_grids[1]
deltas = _bs_delta(payoff.optionType(), finance_rate, volatility, time_grids, spot_path[:-1], payoff.strike())
borrows = spot_path[:-1] * deltas
finance_cost = borrows * finance_rate * delta_t
stock_pnl = deltas * (spot_path[1:] - spot_path[:-1])
trading_slipge = np.abs(np.concatenate(([deltas[0]], np.diff(deltas)))) * spot_path[:-1] * trading_cost
hedging_pnl = ((stock_pnl - finance_cost - trading_slipge) * inflations).sum()
exercise_pnl = payoff(spot_path[-1])
total_cost = hedging_pnl - exercise_pnl
return exp(-rf_rate * ttm) * total_cost
class HedgeAnalysor(object):
def __init__(self,
payoff,
trading_cost=0.):
self.payoff = payoff
self.trading_cost = trading_cost
@staticmethod
def _prepare_parameters(rf_rate, finance_rate, underlying_risk_return):
if finance_rate is None:
finance_rate = rf_rate
if underlying_risk_return is None:
underlying_risk_return = rf_rate
return finance_rate, underlying_risk_return
def exercise(self, spots):
return _exercise(self.payoff, spots)
def _hedge_path(self, rsg, ttm, time_grids, ln_spot, volatility, drift, diffusion, delta_t, inflations, rf_rate, finance_rate):
spot_path = _create_path(rsg, ln_spot, drift, diffusion, delta_t)
return _hedging_on_path(self.payoff, ttm, time_grids, spot_path, volatility, inflations, rf_rate, finance_rate, self.trading_cost)
def hedge_cost(self,
rf_rate,
volatility,
realized_vol,
ttm,
finance_rate=None,
underlying_risk_return=None,
spot=1.,
time_steps=50,
simulations=100,
seed=20):
rng = MersenneTwisterUniformRng(seed)
rsg = MersenneTwisterUniformRsg(dimensionality=time_steps,
rng=rng)
rsg = InvCumulativeMersenneTwisterGaussianRsg(rsg)
finance_rate, underlying_risk_return = self._prepare_parameters(rf_rate,
finance_rate,
underlying_risk_return)
print("risk free: {0:.02f}%".format(rf_rate*100))
print('finance rate: {0:.02f}%'.format(finance_rate*100))
print('underlying risk return: {0:.02f}%'.format(underlying_risk_return*100))
print('implied vol: {0:.02f}%'.format(volatility))
print('realized vol: {0:.02f}%'.format(realized_vol))
print('number of simulations: {0}'.format(simulations))
print('time to maturity: {0} yrs.'.format(ttm))
print('time steps: {0}'.format(time_steps))
print('trading cost: {0:0.4f}%'.format(self.trading_cost*100))
print('payoff: type={0}, k={1}'.format(self.payoff.optionType(), self.payoff.strike()))
ln_spot = log(spot)
delta_t = ttm / time_steps
drift = underlying_risk_return - 0.5 * (realized_vol ** 2)
diffusion = realized_vol
time_grids = np.linspace(ttm, 0, num=time_steps, endpoint=False)
inflations = np.exp(finance_rate * (time_grids - delta_t))
hedging_cost_batch = np.zeros(simulations)
for i in range(simulations):
hedging_cost = self._hedge_path(rsg,
ttm,
time_grids,
ln_spot,
volatility,
drift,
diffusion,
delta_t,
inflations,
rf_rate,
finance_rate)
hedging_cost_batch[i] = hedging_cost
return -hedging_cost_batch
# +
rf_rate = 0.04
finance_rate = 0.06
underlying_risk_return = -0.15
strike = 1.
volatility = 0.30
realized_vol = 0.30
ttm = 0.25
spot = 1.
trading_cost = 0.0
simulations = 50000
time_steps = int(ttm * 250)
payoff = PlainVanillaPayoff(Option.Call, strike)
hf = HedgeAnalysor(payoff, trading_cost)
# +
# %%time
res = hf.hedge_cost(rf_rate=rf_rate,
volatility=volatility,
realized_vol=realized_vol,
ttm=ttm,
spot=spot,
finance_rate=finance_rate,
underlying_risk_return=underlying_risk_return,
simulations=simulations,
time_steps=time_steps)
bs_price = bs_theoretical_price(payoff, spot, ttm, volatility, rf_rate, finance_rate)
# +
fig, axes = plt.subplots(1, 1, figsize=(12, 6), sharex=True)
axes = [axes]
for i, ax in enumerate(axes):
ax.hist(res, bins=50)
ax.axvline(x=bs_price, color='red', linestyle='dashed', label='Theoretical: {0:.02f}%'.format(bs_price*100))
ax.axvline(x=res.mean(), color='green', linestyle='dashed', label='Hedging Mean')
ax.axvline(x=np.percentile(res, 1), color='yellow', linestyle='dashed', label='Hedging per. 1%')
ax.axvline(x=np.percentile(res, 5), color='black', linestyle='dashed', label='Hedging per. 5%')
ax.axvline(x=np.percentile(res, 95), color='black', linestyle='dashed', label='Hedging per. 95%')
ax.axvline(x=np.percentile(res, 99), color='yellow', linestyle='dashed', label='Hedging per. 99%')
ax.set_title("Hedging v.s. Theoretical (tc = {0}%, r_vol = {1:0.2f}%, k={2})".format(trading_cost*100,
realized_vol*100,
strike))
ax.legend()
# -
# produce the table
index = ['理论', '对冲(平均)', '对冲(分位数 1%)', '对冲(分位数 5%)', '对冲(分位数 95%)', '对冲(分位数 99%)']
values = np.array([bs_price, res.mean(), np.percentile(res, 1), np.percentile(res, 5), np.percentile(res, 95), np.percentile(res, 99)])
rel_values = values / values[0] - 1.
df = pd.DataFrame(data={'成本': values, '相对': rel_values}, index=index)
df
# # 2. Base Parameters
# ----------------------
# +
rf_rate = rf_rate
finance_rate = finance_rate
underlying_risk_return = underlying_risk_return
strike = 1.
volatility = 0.30
realized_vol = 0.30
ttm = 0.25
spot = 1.
trading_cost = 0.0015
simulations = 50000
time_steps = int(ttm * 250)
payoff = PlainVanillaPayoff(Option.Call, strike)
hf = HedgeAnalysor(payoff, trading_cost=trading_cost)
# -
# # 3. Scenario Analysis
# ---------------
# ## 2.1 Strike Scenarios
# -------------
strike_scenarios =[0.9, 1.0, 1.1]
# +
simulations_res = []
bs_prices_res = []
for i, this_strike in enumerate(strike_scenarios):
print("\nScenarios {0} ......".format(i+1))
this_payoff = PlainVanillaPayoff(Option.Call, this_strike)
this_bs_price = bs_theoretical_price(this_payoff, spot, ttm, volatility, rf_rate, finance_rate)
hf = HedgeAnalysor(this_payoff, trading_cost)
path_res = hf.hedge_cost(rf_rate=rf_rate,
volatility=volatility,
realized_vol=realized_vol,
time_steps=time_steps,
ttm=ttm,
finance_rate=finance_rate,
underlying_risk_return=underlying_risk_return,
spot=spot,
simulations=simulations)
simulations_res.append(path_res)
bs_prices_res.append(this_bs_price)
# +
fig, axes = plt.subplots(len(strike_scenarios), 1, figsize=(12, 6 * len(strike_scenarios)), sharex=True)
if not hasattr(axes, '__iter__'):
axes = [axes]
for i, ax in enumerate(axes):
res = simulations_res[i]
ax.hist(res, bins=50)
ax.axvline(x=bs_prices_res[i], color='red', linestyle='dashed', label='Theoretical: {0:.02f}%'.format(bs_prices_res[i]))
ax.axvline(x=res.mean(), color='green', linestyle='dashed', label='Hedging Mean')
ax.axvline(x=np.percentile(res, 1), color='yellow', linestyle='dashed', label='Hedging per. 1%')
ax.axvline(x=np.percentile(res, 5), color='black', linestyle='dashed', label='Hedging per. 5%')
ax.axvline(x=np.percentile(res, 95), color='black', linestyle='dashed', label='Hedging per. 95%')
ax.axvline(x=np.percentile(res, 99), color='yellow', linestyle='dashed', label='Hedging per. 99%')
ax.set_title("Hedging v.s. Theoretical (tc = {0:.2f}%, r_vol = {1:0.2f}%, k={2})".format(trading_cost*100,
realized_vol*100,
strike_scenarios[i]))
ax.legend()
# +
# produce the table
index = ['理论', '对冲(平均)', '对冲(分位数 1%)', '对冲(分位数 5%)', '对冲(分位数 95%)', '对冲(分位数 99%)']
col_names = ['{0:.0f}%'.format(t*100) for t in strike_scenarios]
values = np.zeros((len(index), len(col_names)))
for j, res in enumerate(simulations_res):
this_values = np.array([bs_prices_res[j], res.mean(), np.percentile(res, 1), np.percentile(res, 5), np.percentile(res, 95), np.percentile(res, 99)])
values[:, j] = this_values
df = pd.DataFrame(data=values, columns=col_names, index=index)
df
# -
# ## 2.2 Trading Cost Scenarios
# ---
trading_cost_scenarios =[0.0010, 0.0015, 0.0020]
# +
simulations_res = []
bs_price = bs_theoretical_price(payoff, spot, ttm, volatility, rf_rate, finance_rate)
for i, this_trading_cost in enumerate(trading_cost_scenarios):
print("\nScenarios {0} ......".format(i+1))
hf = HedgeAnalysor(payoff, this_trading_cost)
path_res = hf.hedge_cost(rf_rate=rf_rate,
volatility=volatility,
realized_vol=realized_vol,
time_steps=time_steps,
ttm=ttm,
finance_rate=finance_rate,
underlying_risk_return=underlying_risk_return,
spot=spot,
simulations=simulations)
simulations_res.append(path_res)
# +
fig, axes = plt.subplots(len(trading_cost_scenarios), 1, figsize=(12, 6 * len(trading_cost_scenarios)), sharex=True)
if not hasattr(axes, '__iter__'):
axes = [axes]
for i, ax in enumerate(axes):
res = simulations_res[i]
ax.hist(res, bins=50)
ax.axvline(x=bs_price, color='red', linestyle='dashed', label='Theoretical: {0:.02f}%'.format(bs_price*100))
ax.axvline(x=res.mean(), color='green', linestyle='dashed', label='Hedging Mean')
ax.axvline(x=np.percentile(res, 1), color='yellow', linestyle='dashed', label='Hedging per. 1%')
ax.axvline(x=np.percentile(res, 5), color='black', linestyle='dashed', label='Hedging per. 5%')
ax.axvline(x=np.percentile(res, 95), color='black', linestyle='dashed', label='Hedging per. 95%')
ax.axvline(x=np.percentile(res, 99), color='yellow', linestyle='dashed', label='Hedging per. 99%')
ax.set_title("Hedging v.s. Theoretical (tc = {0:.2f}%, r_vol = {1:0.2f}%, k={2})".format(trading_cost_scenarios[i]*100,
realized_vol*100,
strike))
ax.legend()
# +
# produce the table
index = ['理论', '对冲(平均)', '对冲(分位数 1%)', '对冲(分位数 5%)', '对冲(分位数 95%)', '对冲(分位数 99%)']
col_names = ['{0:.2f}%'.format(t*100) for t in trading_cost_scenarios]
values = np.zeros((len(index), len(col_names)))
for j, res in enumerate(simulations_res):
this_values = np.array([bs_price, res.mean(), np.percentile(res, 1), np.percentile(res, 5), np.percentile(res, 95), np.percentile(res, 99)])
values[:, j] = this_values
df = pd.DataFrame(data=values, columns=col_names, index=index)
df
# -
# ## 2.3 Volatility Scenario
# -------------
volatility_scenarios = [0.20, 0.30, 0.40]
# +
simulations_res = []
bs_price = bs_theoretical_price(payoff, spot, ttm, volatility, rf_rate, finance_rate)
hf = HedgeAnalysor(payoff, trading_cost)
for i, this_volatility in enumerate(volatility_scenarios):
print("\nScenarios {0} ......".format(i+1))
path_res = hf.hedge_cost(rf_rate=rf_rate,
volatility=volatility,
realized_vol=this_volatility,
time_steps=time_steps,
ttm=ttm,
finance_rate=finance_rate,
underlying_risk_return=underlying_risk_return,
spot=spot,
simulations=simulations)
simulations_res.append(path_res)
# +
fig, axes = plt.subplots(len(volatility_scenarios), 1, figsize=(12, 6 * len(volatility_scenarios)), sharex=True)
if not hasattr(axes, '__iter__'):
axes = [axes]
for i, ax in enumerate(axes):
res = simulations_res[i]
ax.hist(res, bins=50)
ax.axvline(x=bs_price, color='red', linestyle='dashed', label='Theoretica: {0:.02f}%'.format(bs_price*100))
ax.axvline(x=res.mean(), color='green', linestyle='dashed', label='Hedging Mean')
ax.axvline(x=np.percentile(res, 1), color='yellow', linestyle='dashed', label='Hedging per. 1%')
ax.axvline(x=np.percentile(res, 5), color='black', linestyle='dashed', label='Hedging per. 5%')
ax.axvline(x=np.percentile(res, 95), color='black', linestyle='dashed', label='Hedging per. 95%')
ax.axvline(x=np.percentile(res, 99), color='yellow', linestyle='dashed', label='Hedging per. 99%')
ax.set_title("Hedging v.s. Theoretical (tc = {0:.2f}%, r_vol = {1:0.2f}%, k={2})".format(trading_cost*100,
volatility_scenarios[i]*100,
strike))
ax.legend()
# +
# produce the table
index = ['理论', '对冲(平均)', '对冲(分位数 1%)', '对冲(分位数 5%)', '对冲(分位数 95%)', '对冲(分位数 99%)']
col_names = ['{0:.2f}%'.format(v*100) for v in volatility_scenarios]
values = np.zeros((len(index), len(col_names)))
for j, res in enumerate(simulations_res):
this_values = np.array([bs_price, res.mean(), np.percentile(res, 1), np.percentile(res, 5), np.percentile(res, 95), np.percentile(res, 99)])
values[:, j] = this_values
df = pd.DataFrame(data=values, columns=col_names, index=index)
df
# -
| examples/python/options/dynamical hedging.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/nin-ed/Vertically-Partitioned-Split-Learning/blob/master/Vertically_Partitioned_SplitNN.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="iUbZ9MjlEAlz" colab_type="text"
#
# + [markdown] id="H14AmGzhEB9h" colab_type="text"
# # Tutorial - Vertically Partitioned Split Learning
#
# For demonstration of this structure, MNIST model has been split vertically according to the integer assigned to the variable 'partition'.
#
# Images have a dimension of [28 x 28]. We will divide the dataset into some batches, in this case we divide it in 64. Then image dimensions will be resolved to [64 x 784] where row denotes the image and column denotes features of that image. Then we will split these 784 features to act like we have separate datasets containing unique features of an image across columns.
#
#
# + id="rmMpPyhmEWJj" colab_type="code" outputId="f242506c-1c75-4859-cf18-3dee7d3cab4d" colab={"base_uri": "https://localhost:8080/", "height": 81}
import syft, torch
from torch import nn, optim
from torchvision import datasets, transforms
from torch.utils.data import DataLoader
# + [markdown] id="8jcd1auoE1hp" colab_type="text"
# Create hook and virtual workers - alice and bob
# + id="DSdmw9cHE4CR" colab_type="code" colab={}
hook = syft.TorchHook(torch)
alice = syft.VirtualWorker(hook, id='alice')
bob = syft.VirtualWorker(hook, id='bob')
# + [markdown] id="8sP632wmE7Cx" colab_type="text"
# # Creating a class SplitNN
#
# Create a class SplitNN containing all the functionalities.
# + id="3IHmJKurE6Q8" colab_type="code" colab={}
class SplitNN(nn.Module):
def __init__(self, models, optimizers, partition):
super().__init__()
self.models = models
self.optimizers = optimizers
self.output = [None] * (partition)
def zero_grads(self):
for opt in self.optimizers:
opt.zero_grad()
# Here x is a list having a batch of diffent partitioned datasets.
def forward(self, x):
for i in range(len(x)):
self.output[i] = self.models[i](x[i])
# Concatenating the output of various structures in bottom part (alice's location)
total_out = torch.cat(tuple(self.output[i] for i in range(len(self.output))), dim=1)
if self.output[-1].location == self.models[-1].location:
second_layer_inp = total_out.detach().requires_grad_()
else:
second_layer_inp = total_out.detach().move(self.models[-1].location).requires_grad_()
self.second_layer_inp = second_layer_inp
pred = self.models[-1](second_layer_inp)
return pred
def backward(self):
second_layer_inp = self.second_layer_inp
if self.output[-1].location == second_layer_inp.location:
grad = second_layer_inp.grad.copy()
else:
grad = second_layer_inp.grad.copy().move(self.output[-1].location)
i = 0
while i < partition-1:
self.output[i].backward(grad[:, hidden_sizes[1]*i : hidden_sizes[1]*(i+1)])
i += 1
# This is implemented because it is not necessary that last batch is of exact same size as partitioned.
self.output[i].backward(grad[:, hidden_sizes[1]*i : ])
def step(self):
for opt in self.optimizers:
opt.step()
# + [markdown] id="AIFIJmvwFIpq" colab_type="text"
# Here function create_models, creates models according to the partitions of the model in the bottom part (alice's model).
#
# First we iterate 1 less than the partition size because of their same model structure. Since we divided the data in batches, last part may or may not have the size of the batch. So for this we calculate the remaining size of the dataset, and accordingly create the model.
#
# Then a model on the bob's machine is created having labels.
# + id="whMro6Z1FMLj" colab_type="code" colab={}
def create_models(partition, input_size, hidden_sizes, output_size):
models = list()
for _ in range(1, partition):
models.append(nn.Sequential(nn.Linear(int(input_size/partition), hidden_sizes[0]),
nn.ReLU(),
nn.Linear(hidden_sizes[0], hidden_sizes[1]),
nn.ReLU()))
rem = input_size - int(input_size/partition * (partition-1))
models.append(nn.Sequential(nn.Linear(rem, hidden_sizes[0]),
nn.ReLU(),
nn.Linear(hidden_sizes[0], hidden_sizes[1]),
nn.ReLU()))
models.append(nn.Sequential(nn.Linear(hidden_sizes[1]*partition, hidden_sizes[2]),
nn.ReLU(),
nn.Linear(hidden_sizes[2], output_size),
nn.LogSoftmax(dim=1)))
return models
# + [markdown] id="cWJfvE3GFOqY" colab_type="text"
# Assign an integer to the partition variable indicating number of partitions available.
# + id="R9mquoxyFQ8S" colab_type="code" colab={}
partition = 3
# + [markdown] id="tJdbcXdsFURy" colab_type="text"
# Load the dataset.
# + id="ov2HOL6VFXxg" colab_type="code" outputId="7ae3831d-5a56-4ded-aec1-6f7312d0aa20" colab={"base_uri": "https://localhost:8080/", "height": 408, "referenced_widgets": ["615c17bed6714d3cbddc1567e3662994", "014e66393d4941c5999290cbdc998480", "<KEY>", "4b48f7959b9a4744831c0eca7441bdec", "9995ec1e59fd458e92f636a8a4a487cf", "<KEY>", "<KEY>", "<KEY>", "116a94edc55d4967a47b6e5b6709abfa", "f83ba30751c54a0685d47ad026cc81d9", "<KEY>", "3a43ed496986425d8d02d9ed58b1c335", "d92c1468c45a416d890834b554e1ed9e", "<KEY>", "93d434dfed4d44c1af43e4dad900fb55", "7a2be1f7aed74d38a4690c0806bee793", "<KEY>", "06b37e408c684d9f86f0e848bd30cc16", "<KEY>", "c8ad3635a3584b7ca7d33ddb0b2ac9a2", "<KEY>", "538ba1739c364d2bb88489440e817c7c", "<KEY>", "<KEY>", "<KEY>", "<KEY>", "aa638ae6917d4e708e540769922d6a5e", "<KEY>", "6395fb1a70f1469eaf79f143cedfbedc", "f116c4e20ed84a55b264c920733618f0", "cfbfef66e1844d528e5efa0609a6785c", "6ddf89f9787b4c80aec1fe8b8e023e3b"]}
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,))])
trainset = datasets.MNIST('/content/sample_data/mnist', download=True, train=True, transform=transform)
trainloader = DataLoader(trainset, batch_size=64, shuffle=True)
# + [markdown] id="pgT5xDUtFksM" colab_type="text"
# Initialize the sizes.
# + id="QfAldJzjFq6Z" colab_type="code" colab={}
input_size = 784
hidden_sizes = [128, 256, 512]
output_size = 10
# + [markdown] id="ZoBrA77uFr1A" colab_type="text"
# Partition the data as needed to imitate that we have different datasets having different features for our image.
# + id="fQXY2wEZF9Tq" colab_type="code" colab={}
# 3 lists to contain 3 segmented datasets.
image_set1 = list()
image_set2 = list()
image_set3 = list()
labels = list()
# Segment the datalength in number of partitions
distr = int(input_size/partition)
for image, label in trainloader:
"""Here we will set the image to [64 x 784] and split columns so that each
list contains unique features for each batch of images which are
arranged row-wise."""
image = image.view(image.shape[0], -1)
image_set1.append(image[:, 0:distr])
image_set2.append(image[:, distr:distr*2])
image_set3.append(image[:, distr*2:])
labels.append(label)
# + [markdown] id="syokshGTGIfg" colab_type="text"
# Here we assign output from create_models to a variable models.
#
# Now, according to these models, we are creating different optimizers for different model.
# + id="TdQWjSw_GKxq" colab_type="code" colab={}
models = create_models(partition, input_size, hidden_sizes, output_size)
optimizers = [optim.SGD(model.parameters(), lr=0.01) for model in models]
# + [markdown] id="8wsT-GJbGNp7" colab_type="text"
# Now append the model locations. All the partitioned models are situated in alice's machine and the model with labels in bob's machine.
#
# After this, send each model to its location.
# + id="wzivQFinGRHK" colab_type="code" colab={}
model_locations = list()
for i in range(partition):
model_locations.append(alice)
model_locations.append(bob)
for model, location in zip(models, model_locations):
model.send(location)
# + [markdown] id="GVT4pzB8GWVL" colab_type="text"
# Create a object for class SplitNN.
# + id="Qxbt9VbjGYQ7" colab_type="code" colab={}
splitNN = SplitNN(models, optimizers, partition)
# + [markdown] id="MMl88cGrGaGo" colab_type="text"
# Define a train function.
# + id="Rt70k3upGbvt" colab_type="code" colab={}
def train(x, target, splitnn):
splitnn.zero_grads()
pred = splitnn.forward(x)
criterion = nn.NLLLoss()
loss = criterion(pred, target)
loss.backward()
splitnn.backward()
splitnn.step()
return loss
# + [markdown] id="BOYMedbVGddf" colab_type="text"
# Since we have 2 partitions, we will run the lists in parallel which is made by splitting the dataset from trainloader. Send them to their respective machines i.e. images in alice's machine and labels in bob's machine.
# + id="isJctZN_Gfi-" colab_type="code" outputId="c14ba29e-ba84-4eb9-d2e2-f364ba8767f4" colab={"base_uri": "https://localhost:8080/", "height": 364}
epochs = 20
for i in range(epochs):
total_loss = 0
for x1, x2, x3, y in zip(image_set1, image_set2, image_set3, labels):
x1, x2 = x1.send(models[0].location), x2.send(models[0].location)
x3 = x3.send(models[0].location)
y = y.send(models[-1].location)
loss = train([x1, x2, x3], y, splitNN)
total_loss += loss.get()
else:
print(f"Epoch: {i+1}... Training Loss: {total_loss/len(image_set1)}")
# + id="45I2kyXtG3kB" colab_type="code" colab={}
| Vertically_Partitioned_SplitNN.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # <NAME>:
# Estamos en plena competición para ganar el concurso de tiro con arco en Sherwood. Con nuestro arco y flechas disparamos sobre una diana e intentamos acertar lo más cerca posible del centro.
#
# El centro de la diana viene representado por los valores (0, 0) en los ejes de coordenadas.
#
# ## Objetivos:
# * estructuras de datos: listas, conjuntos, tuplas
# * operadores lógicos: if-elif-else
# * bucle: while/for
# * cálculo de mínimo (sorting opcional)
#
# ## Descripción:
# En el espacio de 2 dimensiones un punto se puede definir por un par de valores que corresponden a la coordenada horizontal (x) y a la vertical (y). El espacio puede quedar dividido en 4 zonas (cuadrantes): Q1, Q2, Q3, Q4. Cuyo punto de unión único es el punto (0, 0).
#
# Si un punto se encuentra en Q1 tanto su coordenada x como la y son positivas. Te dejo un enlace a wikipedia para que te familiarices con estos cuadrantes.
#
# https://es.wikipedia.org/wiki/Coordenadas_cartesianas
#
# https://es.wikipedia.org/wiki/Distancia_euclidiana
#
# 
#
# ## Tareas
# 1. <NAME> es famoso por acertar a una flecha con otra flecha. ¿Lo ha conseguido?
# 2. Calcula cuántos flechazos han caido en cada cuadrante.
# 3. Halla el punto más cercano al centro. Calcula su distancia al centro
# 4. Si la diana tiene un radio de 9, calcula el número de flechas que hay que recoger al bosque.
# +
puntos = [(4,5), (-0,2), (4,7), (1,-3), (3,-2), (4,5),
(3,2), (5,7), (-5,7), (2,2), (-4,5), (0,-2),
(-4,7), (-1,3), (-3,2), (-4,-5), (-3,2),
(5,7), (5,7), (2,2), (9, 9), (-8, -9)]
# 1. <NAME> es famoso por acertar a una flecha con otra flecha. ¿Lo ha conseguido?
flecha_anterior = (puntos[0])
for n in range(1, len(puntos)):
if puntos[n] == flecha_anterior:
print(f"Lo ha conseguido! en {flecha_anterior}")
else:
flecha_anterior = puntos[n]
# 2. Calcula cuántos flechazos han caido en cada cuadrante.
q1, q2, q3, q4 = 0, 0, 0, 0
x, y = 0, 1
for i in range(0, len(puntos)):
if puntos[i][x] > 0 and puntos[i][y] > 0:
q1 += 1
elif puntos[i][x] < 0 and puntos[i][y] < 0:
q3 += 1
elif puntos[i][x] > 0 and puntos[i][y] < 0:
q4 += 1
else:
q2 += 1
print(f"I:{q1} II:{q2} III:{q3} IV:{q4}")
# 3. Halla el punto más cercano al centro. Calcula su distancia al centro
# Definir una función que calcula la distancia al centro puede servir de ayuda.
from math import sqrt
def distancia_euclidiana(p1, p2):
x, y = 0, 1
return sqrt( (p2[x] - p1[x])**2 + (p2[y] - p1[y])**2 )
centro = (0,0)
distancias = []
for n in puntos:
distancias.append(distancia_euclidiana(centro, n))
min_distancia = min(distancias)
for i in range(0, len(distancias)):
if min_distancia == distancias[i]:
print(f"La flecha más cercana al centro fue {puntos[i]}")
# 4. Si la diana tiene un radio de 9, calcula el número de flechas que hay que recoger al bosque.
flechas_out = 0
for n in puntos:
if distancia_euclidiana(centro, n) >= 9:
flechas_out += 1
print(f"Hay que recoger {flechas_out} flechas")
| 01_PREWORK/week03/pra/04-robin-hood/your-solution-here/robin-hood-ESP.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: PySpark
# language: python
# name: pysparkkernel
# ---
spark
df = spark.read.csv("hdfs:///Projects/paysim/Resources/embeddings_features.csv",inferSchema=True,header=True)
feature_names = df.columns
feature_names = ["_" + s + "c" for s in feature_names]
df = df.toDF(*feature_names)
feature_names = df.columns
print(feature_names)
feature_names[0]= 'id'
df = df.toDF(*feature_names)
# +
from hops import featurestore
featurestore.create_featuregroup(
df,
"embeddings_features",
description="embeddings features",
descriptive_statistics=False,
feature_correlation=False,
feature_histograms=False,
cluster_analysis=False,
featurestore=featurestore.project_featurestore(),
featuregroup_version= featurestore.get_latest_featuregroup_version("embeddings_features") + 1
)
# -
# +
from hops import featurestore
nodes = featurestore.get_featuregroup("node_features", featuregroup_version= featurestore.get_latest_featuregroup_version("node_features"))
nodes.show()
# -
nodes.printSchema()
embeddings_td = df.join(nodes.drop('type'),on='id')
embeddings_td.count()
from pyspark.sql import functions as func
from pyspark.sql.types import IntegerType
embeddings_td = embeddings_td.withColumn("label" , func.col("label").cast(IntegerType()))
embeddings_td.printSchema()
featurestore.create_training_dataset(
embeddings_td, "embeddings_training_dataset",
descriptive_statistics=False,
feature_correlation=False,
feature_histograms=False,
cluster_analysis=False,
featurestore=featurestore.project_featurestore(),
training_dataset_version=featurestore.get_latest_training_dataset_version("embeddings_training_dataset") + 1
)
| embeddigs_to_feature.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (Data Science)
# language: python
# name: python3__SAGEMAKER_INTERNAL__arn:aws:sagemaker:us-east-1:081325390199:image/datascience-1.0
# ---
# # Explore The Data
# <img src="img/explore-data-ml.png" width="60%" align="left">
# # Amazon Customer Reviews Dataset
#
# https://s3.amazonaws.com/amazon-reviews-pds/readme.html
#
# ### Dataset Columns:
#
# - `marketplace`: 2-letter country code (in this case all "US").
# - `customer_id`: Random identifier that can be used to aggregate reviews written by a single author.
# - `review_id`: A unique ID for the review.
# - `product_id`: The Amazon Standard Identification Number (ASIN). `http://www.amazon.com/dp/<ASIN>` links to the product's detail page.
# - `product_parent`: The parent of that ASIN. Multiple ASINs (color or format variations of the same product) can roll up into a single parent.
# - `product_title`: Title description of the product.
# - `product_category`: Broad product category that can be used to group reviews (in this case digital videos).
# - `star_rating`: The review's rating (1 to 5 stars).
# - `helpful_votes`: Number of helpful votes for the review.
# - `total_votes`: Number of total votes the review received.
# - `vine`: Was the review written as part of the [Vine](https://www.amazon.com/gp/vine/help) program?
# - `verified_purchase`: Was the review from a verified purchase?
# - `review_headline`: The title of the review itself.
# - `review_body`: The text of the review.
# - `review_date`: The date the review was written.
#
#
# # Release Resources
# + language="html"
#
# <p><b>Shutting down your kernel for this notebook to release resources.</b></p>
# <button class="sm-command-button" data-commandlinker-command="kernelmenu:shutdown" style="display:none;">Shutdown Kernel</button>
#
# <script>
# try {
# els = document.getElementsByClassName("sm-command-button");
# els[0].click();
# }
# catch(err) {
# // NoOp
# }
# </script>
# + language="javascript"
#
# try {
# Jupyter.notebook.save_checkpoint();
# Jupyter.notebook.session.delete();
# }
# catch(err) {
# // NoOp
# }
| 05_explore/00_Overview.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/khalinvidamo1-1/CPEN-21A---BSCPE-1-1/blob/main/Midterm_Exam_.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="Pjy7DgzkhmAl"
# ###Midterm Exam
# + [markdown] id="5V2yEvAVlrz9"
# ###PROBLEM STATEMENT 1
# + colab={"base_uri": "https://localhost:8080/"} id="xHUTNHbBhTcv" outputId="b35bc071-88fa-47aa-eb46-bc86f3cb7d34"
Name= "<NAME>"
Stud_Num= "202101935"
Age= "18"
Birth= "03/16/03"
Add= "Tagaytay City"
Course="Bachelor of Science in Computer Engineering 1-1"
gwa= "93"
Information= "Hi! My name is "+ Name +" and here is my personal information:" + "\n" + "Student Number:" + Stud_Num +"\n"+ Age +" years old, birthdate "+ Birth +"\n"+ "From "+ Add +"\n"+ "Currently taking "+ Course +"\n"+ "With GWA of " + gwa +" last semester"
print(Information)
# + [markdown] id="65GJhekhlk5w"
# ###POBLEM STATEMENT 2
# + id="m2EtjGuIlyEs" colab={"base_uri": "https://localhost:8080/"} outputId="5c5fed73-ba4e-4066-945f-50a1e76ea0a6"
n=4
answ= "Y"
a=(2<n) and (n<6)
b=(2<n) or (n<6)
c=not(2<n) or (n<6)
d=not(n<6)
e= (answ=="Y") or (answ=="y")
f= (answ=="Y") or (answ=="y")
g= not (answ=="y")
h=(2<n) and (n== 5+1) or (answ=="No")
i= ((n==2) and (n==7)) or (answ=="Y")
j= (n==+2) and ((n==7) or (answ=="Y"))
print(a)
print(b)
print(c)
print(d)
print(e)
print(f)
print(g)
print(h)
print(i)
print(j)
# + [markdown] id="QJYu0B0Ol0Pv"
# ###PROBLEM STATEMENT 3
# + id="gftP0Yl2l2fs" colab={"base_uri": "https://localhost:8080/"} outputId="bcb9736f-9532-43a5-b7cb-3d57fb4a84fa"
x=int(2)
y=(-3)
w=(7)
z=(-10)
a= x/y
b= w/y/x
c= z/y%x
d= x%-y*w
e= x%y
f= z%w-y/x*5+5
g= 9-x%(2+y)
h= z//w
i= (2+y)**2
j= w/x*2
print(a)
print(b)
print(c)
print(d)
print(e)
print(f)
print(g)
print(h)
print(i)
print(j)
| Midterm_Exam_.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# ## Extra plotting exercise
#
# Here we'll build on what was done beforehand and create a two panel plot that overlays two sets of information per map. We'll also look at one extra functionality about UVCDAT, which makes it very useful to use.
# ## cdscan
#
# Sometimes the same variable you want to analyze is distributed across multiple datasets, each NetCDF file with a different timestamp. Loading the data from each dataset might seem like a chore. With cdscan, you can quickly create an .xml file which points to the appropriate data in your netcdf files. You can then treat the .xml file as if it is one NetCDF file and use cdms2.open to open and access the data.
#
# cdscan is a functionality of UVCDAT that you can call outside of the python environment if you have the your UVCDAT environment activated.
# https://uvcdat.llnl.gov/documentation/cdms/cdms_7.html
#
# In the week3 directory you will notice that there are two climate model data with tas (surface air temperature). Let's create an xml file that combines the data from the two datasets.
#
# First open a new terminal windown and activate the UVCDAT environment. Then run the following command.
#
# cdscan -x tas_Amon_GFDL-CM3_amip_r1i1p1_197901-198812.xml tas*.nc
#
# This should create the .xml file tas_Amon_GFDL-CM3_amip_r1i1p1_197901-198812.xml
import cdms2
import cdutil
f_in=cdms2.open('tas_Amon_GFDL-CM3_amip_r1i1p1_197901-198812.xml')
tas=f_in('tas',time=('1980-01-01','1985-12-31'))
# +
# Test out different attribues here
# E.g. tas.shape
# -
# ## Plotting (multi-panel and contour overlay)
#
# Now let's take climatologies of DJF and JJA and take the difference, while also overlaying contours annual mean values.
#
# First, let's take the climatologies:
# Calculate the DJF and JJA climatologies here. Consult previous notebook for how...
tas_DJF=
tas_JJA=
JJA_minus_DJF_tas=tas_JJA-tas_DJF # This gives us the difference
JJA_minus_DJF.id='tas_difference' # Name the variable something meaningful
# Calculate the Annual mean climatology here.
tas_YEAR=
# Now let's make the two panel plot. Top plot will be of annual mean surface temperature and then the bottom plot will be of JJA minus DJF, with an overlay of annual mean surface temperature:
# +
import vcs
import EzTemplate #for multi-panel plotting
x=vcs.init()
M=EzTemplate.Multi(rows=2,columns=1)
#M.x.setbgoutputdimensions(1700,1212,units="pixels") #you can set the output dimensions
x.drawlogooff()
x.setcolormap('bl_to_darkred')
M.legend.direction='vertical'
M.margins.top=0.05
M.margins.bottom=0.05
M.margins.left=0.25
M.margins.right=0.25
M.spacing.vertical=0.1
t=M.get(legend='local')
aa1=x.createisofill()
levels=[-1e20,253,258,263,268,273,278,283,288,293,298,303,1e20]
aa1.levels=levels
cols1=vcs.getcolors(levels, range(11,239, 1))
#cols1=vcs.getcolors(levels, range(239, 11, -1)) #you can flip the colormap this way
aa1.fillareacolors=cols1
aa1.ext_1='y'
aa1.ext_2='y'
x.plot(tas_YEAR,t,aa1) #plot tas_YEAR in template t, and isofill plot aa1
#start second panel
t=M.get(legend='local')
aa2=x.createisofill()
levels=[-1e20,-24,-20,-16,-12,-8,-4,-2,0,2,4,8,12,16,20,24,1e20]
cols1=vcs.getcolors(levels, range(11,239, 1))
#cols1=vcs.getcolors(levels, range(239, 11, -1)) #you can flip the colormap this way
aa2.levels=levels
aa2.fillareacolors =cols1
aa2.ext_1='y'
aa2.ext_2='y'
x.plot(JJA_minus_DJF_tas,t,aa2)
#now overlay contours
aa1b=x.createisoline()
levelsb=[253,263,273,283,293] #contour every 10K
aa1b.levels=levelsb
aa1b.label = 'y' #add labels to contours
t2=t #copy the same template
t2.min.priority=0 #but turn off many of the labels so that they don't overwrite on top of each other
t2.max.priority=0
t2.mean.priority=0
t2.dataname.priority=0
t2.title.priority=0
t2.xlabel1.priority=0
t2.ylabel1.priority=0
t2.units.priority=0
x.plot(tas_YEAR,t2,aa1b)
# -
| materials/week3/Extra_Functionalities_Plotting.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import pandas as pd
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Input
from keras.layers import Dropout
from keras.callbacks import EarlyStopping
from keras.applications.inception_v3 import InceptionV3
from keras.applications.mobilenet import MobileNet
from keras.layers import merge, Conv2D, MaxPooling2D, GlobalAveragePooling1D, Flatten, GlobalAveragePooling2D, ZeroPadding2D
from keras.models import load_model, Model
from keras.layers.normalization import BatchNormalization
from keras.callbacks import History ,ModelCheckpoint
from keras.layers import Activation, LeakyReLU, GlobalMaxPooling2D
from keras.preprocessing.image import ImageDataGenerator
from keras.optimizers import Adam
from keras.models import load_model
import collections
import sys
import pickle
import os
import matplotlib.pyplot as plt
import skimage
import skimage.io
from sklearn import preprocessing
from squeezenet import SqueezeNet
# %matplotlib inline
# %env CUDA_VISIBLE_DEVICES=1
# -
with open("../dlcv_final_2_dataset/train_id.txt","r") as f:
train_y = f.readlines()
train_y = np.array([line.strip().split(" ")[1] for line in train_y])
lb = preprocessing.LabelBinarizer()
train_y_onehot = lb.fit_transform(train_y)
model = load_model('../models/weights.89-0.59.hdf5')
test_X = np.load("../features/test_X.npy")/255
prediction = model.predict(test_X)
predict_y = np.argmax(prediction, axis=1)
predict_y[0]
test_y=lb.inverse_transform(prediction)
print(model.summary())
# load SampleSubmission
submission = pd.read_csv("../SampleSubmission.csv")
submission["ans"] = test_y
submission.to_csv("../test_output.csv",index=False)
| final/dev_notebook/inference.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
#
# # Multiple Linear Regression
#
#
# #### Multiple Linear Regression is a statistical technique that we use to predict the outcome of a quantity (dependent variable) by using more than one causations/features (independent variables) which affect the outcome . Here the notebook takes an example of study time and play time as independent variables to predict the final grade of students.
#
# The general equation for multiple linear regression may look like this
#
# $${Y} {=} {\sum_{i=1}^{n}} {\beta}_{i} X_{i} + {\beta}_{0} {=} {\beta}_{1} X_{1} + {\beta}_{2} X_{2} + {\beta}_{3} X_{3} +....{\beta}_{n} X_{n} + {\beta}_{0}$$
#
#
#
#
#
#
# Here:
#
# ${\beta}$ represents the coeffecients of the independent variables otherwise known as weights.
#
# ${n}$ represents the number of independent variables.
#
# ${X}_{i}$ independent variables.
#
# ${\beta}_{0}$ is the y-intercept also known as bias(Bias refers to a global offset not explained by the predictor variable.)
#
# Note that -
#
# Sometimes there is a term called residuals ${\epsilon}$in the above equation, to account for the deviation which the model may produce in case it is not able to fit with 100% accuracy.
# ## Assumptions in a multiple linear regression:
#
# 1.**Linearity between the dependent and independent variables**
# This means that each independent variable $X_i$ must only have a **linear relation** with dependent variable ${Y}$ .
#
#
# 2.**Homoscedasticity should be present**
# Homoscedasticity means that as we go further along any $X_i$ and check the values for Y, these data points should not diverge from the mean values, ie the data points are not scattered uniformly about the mean with a finite variance.
#
# $${\sigma^2 = \frac{\sum_{i=1}^{n}(x_i - \mu)^2} {n}}$$
#
#
# 3.**Multivariate Normality**
# Multivariate normality means when the difference between predicted values and actual values(called residual or errors) and are normally distributed throught the distribution.
#
#
# 4.**Absence of multicollinearity**
# The independent variables should not be having a high correlation amongst themselves, ie each one should be truly independent of the fluctuation in the other variable, otherwise it might deviate from a linear model.
#
#
# 5.**Independence of errors or residuals**
# The residual for each data point must be independent of other residual. There must not lie any hidden pattern between the residuals, because if there is, then it implies the model is not able to capture the relation perfectly due to a missing variable, multicollinearity or the relation itself is not linear.
# ## Algortihm steps:
# 1. Checking for all the assumptions being satisfied on the dataset on which we wish to apply a linear regression analysis.
#
# 2. Formulate an equation in terms of number of features(independent variables) and the predicted label(dependent variable).
#
# 3. Trying to find the best possible weightsor coeffecients of the feature variables and the bias by either using-
#
# i. Ordinary least squares method(Used here)
#
# ii.Gradient descent algorithm
#
# **Ordinary Least Squares method**
# As we have stated earlier that residual is the distance of one a data point from the best fit. Our goal is to minimise the value of square of this residual sum that is to say to find such weights which will minimise the value of
# $${\sum_{i= 1}^{n}(y_i - \hat{y})^2}$$
#
# where $\hat{y}$ represents the actual label value and $y_i$ represents the predicted label value.
#
# The simple formula we use here to attain the minimum value of squared residual sum is given in the matrix form by-:
# $$ {\theta} = (X^{T}X)^{-1} X^{T} Y$$.
#
# To learn more about the derivation of the matrix form of the formula, one can refer to learn about the normal equations which are used to arrive at the same.
#
#
# We can later measure how well our model fits to the data- by ${R^2}$ metric.
#
# $${R^2} = 1-\frac{\sum_{i= 1}^{n} (y_i - \hat{y})^2} {\sum_{i= 1}^{n} (y_i - \bar{y})^2}$$
# ### Shape of the different matrices which will be used in our equations
#
# The shape of these matrices here is given by "p x q".
# For the following matrices let us assume that:
#
# m: Number of training examples.
#
# n: Number of features.
#
# <b>
#
# Shape of X: m x (n + 1)
#
# Shape of $X^{T}$: (n + 1) x m
#
# Shape of $X^{T}X$ : (n + 1) x m * m x (n + 1) = (n + 1) x (n + 1)
#
# Shape of $(X^{T}X)^{-1} X^{T}$ : (n + 1) x (n + 1) * (n + 1) x m = (n + 1) x m
#
# Shape of ${\theta}$ = (n + 1) x m * m x 1 = (n + 1) x 1
#
#
# Note : Theta here is the matrix which will store our final weights and bias.
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from mpl_toolkits.mplot3d import Axes3D #Used to plot in a 3D place
# %matplotlib inline
# **Basics of matrix manipulation**
#
# What np.matrix does is that when you define a list as np.matrix, it allows us to find the inverse and transpose of that matrix and store the same, which can be called as X.I as ${X^{-1}}$ and X.T as ${X^{T}}$.
# +
X = np.matrix([[1,6,2], #x_0, study time, play time
[1,7,4],
[1,3,2],
[1,1,2],
[1,6,3]]) #multilinear data for example
Y = np.matrix([[70],
[72],
[50],
[45],
[73]]) #multilinear labels
# -
# **Finding the transpose of X and Y and storing them in XT and YT**
X.T,Y.T
XT = X.T
YT = Y.T
# **To find the dot product of two matrices (numpy arrays)**
np.dot(XT,X)
# **Finding inverse of ${X^{T}.X}$ and storing in XTX_inv**
#
XTX_inv = np.dot(XT,X).I
XTX_inv
# **Now this is the essential step, here we try to find the weights or the coeffecients of our independent variables and the bias or the y-intercept, here stored in matrix theta (${\theta}$) (Theta0,Theta1,...)**
# In short this is basically a one line equation to implement ordinary least squares for finding the best weights and bias to formulate our final equation.
#
# $$ {\theta} = (X^{T}X)^{-1} X^{T} Y$$
theta = np.dot(XTX_inv , np.dot(XT,Y))
# #### Here first value is bias rest all are weights.
theta
# #### Yhat = X.theta was the formula for predicted Y.
# #### Since we wanted error to be 0, we equated Y = Yhat
y_hat = np.dot(X,theta)
y_hat
# **Whenever we plot graphs in simple linear regression we end up with a line which is used to visualise the distribution, however with more number of independent variable(IV) or features, the dimensionality increases, here we used two IVs which resulted in a plane, any higher number of IV will result in greater dimensions which are not easy to visualise. However they behave in the same fashion as with 1,2 or 3 IV.**
#
# **Now we visualise the plane generated for our example of multilinear regression**
#
# Refer from mpl_toolkit documentation for implementation of below code.
plt3d = plt.figure(figsize=(15,12)).gca(projection = '3d')
X1 = np.array(X[:,1]) #Array of X1 features
X2 = np.array(X[:,2]) #Array of X2 features
xx , yy = np.meshgrid(range(min(X1)[0],max(X1)[0]),
range(min(X2)[0],max(X2)[0])) #Creates a meshgrid in 3D
plt3d.scatter(np.array(X[:,1]),np.array(X[:,2]),
np.array(Y[:,0]),color = 'blue',s=100) #Scattering of points (X1,X2,Y)
plt3d.plot_surface(xx,yy,
np.array(theta[0,0]+theta[1,0]*xx
+theta[2,0]*yy),color = 'cyan') #Plots the plane formed in 3D space
plt3d.view_init(-140,120)
plt.xlabel('X1')
plt.ylabel('X2')
plt.show()
# ### Multivariate Regression Algorithm implementation in separate functions to be called as per need.
# We have now created functions to turn any dataset into aur matrix format so that we can apply our equations on it
#
def make_X_mat(dataset):
return np.append(
np.ones((dataset.shape[0],1)),
dataset,
axis=1)
# Calculates weights and bias matrix for given input
def get_theta(X,Y):
XT = X.T
XTX_inv = np.dot(XT,X).I
return np.dot(XTX_inv , np.dot(XT,Y))
# Gives prediction for given feature matrix
def predict(X,theta):
return(np.dot(X,theta))
| notebooks/Multilinear-Regression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Multiclass classification/ Phân loại nhiều lớp với DNN
# ## Import motdules
# %matplotlib inline
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.optimizers import SGD, Adam
from tensorflow.keras.callbacks import EarlyStopping
# ## The Iris dataset
df = pd.read_csv('../data/iris.csv')
import seaborn as sns
sns.pairplot(df, hue="species")
df.head()
X = df.drop('species', axis=1)
X.head()
# ## Mã hoá lại y
target_names = df['species'].unique()
target_names
target_dict = {n:i for i, n in enumerate(target_names)}
target_dict
y = df['species'].map(target_dict)
y.head(10)
from tensorflow.keras.utils import to_categorical
y_cat = to_categorical(y)
y_cat[:10]
# ### Train test split
X_train, X_test, y_train, y_test = train_test_split(X.values, y_cat,
test_size=0.2,
random_state=1)
# ## Tạo model
# 
model = Sequential()
model.add(Dense(4, input_shape=(4,), activation='sigmoid'))
model.add(Dense(3, activation='tanh'))
model.add(Dense(3, activation='softmax'))
model.compile(Adam(learning_rate=0.1),
loss='categorical_crossentropy',
metrics=['accuracy'])
model.summary()
model.fit(X_train, y_train, epochs=40, validation_split=0.1)
y_pred = model.predict(X_test)
y_pred[:5]
y_test_class = np.argmax(y_test, axis=1)
y_pred_class = np.argmax(y_pred, axis=1)
from sklearn.metrics import classification_report
print(classification_report(y_test_class, y_pred_class))
from sklearn.metrics import accuracy_score, confusion_matrix
confusion_matrix(y_test_class, y_pred_class)
from sklearn.metrics import log_loss
log_loss(y_test_class, y_pred)
loss, accuracy = model.evaluate(X_test, y_test)
print("Loss: " + str(loss));
print("Accuracy: " + str(accuracy));
# ## Bài tập
# ## 1. Multiclass classification sử dụng DNN với bộ dataset fetal_health.csv, output là fetal_health có giá trị 0, 1, 2
# ### Gợi ý: Mạng gồm tối thiểu 2 lớp ẩn, lớp đầu 64 neuron, lớp thứ hai có 32 neuron tối thiểu
# ### Hãy tinh chỉnh, tăng số lớp, số neuron để xem kết quả có cải thiện không
| Code/DNN - Multiclass Classification.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# <a href="https://qworld.net" target="_blank" align="left"><img src="../qworld/images/header.jpg" align="left"></a>
# $ \newcommand{\bra}[1]{\langle #1|} $
# $ \newcommand{\ket}[1]{|#1\rangle} $
# $ \newcommand{\braket}[2]{\langle #1|#2\rangle} $
# $ \newcommand{\dot}[2]{ #1 \cdot #2} $
# $ \newcommand{\biginner}[2]{\left\langle #1,#2\right\rangle} $
# $ \newcommand{\mymatrix}[2]{\left( \begin{array}{#1} #2\end{array} \right)} $
# $ \newcommand{\myvector}[1]{\mymatrix{c}{#1}} $
# $ \newcommand{\myrvector}[1]{\mymatrix{r}{#1}} $
# $ \newcommand{\mypar}[1]{\left( #1 \right)} $
# $ \newcommand{\mybigpar}[1]{ \Big( #1 \Big)} $
# $ \newcommand{\sqrttwo}{\frac{1}{\sqrt{2}}} $
# $ \newcommand{\dsqrttwo}{\dfrac{1}{\sqrt{2}}} $
# $ \newcommand{\onehalf}{\frac{1}{2}} $
# $ \newcommand{\donehalf}{\dfrac{1}{2}} $
# $ \newcommand{\hadamard}{ \mymatrix{rr}{ \sqrttwo & \sqrttwo \\ \sqrttwo & -\sqrttwo }} $
# $ \newcommand{\vzero}{\myvector{1\\0}} $
# $ \newcommand{\vone}{\myvector{0\\1}} $
# $ \newcommand{\stateplus}{\myvector{ \sqrttwo \\ \sqrttwo } } $
# $ \newcommand{\stateminus}{ \myrvector{ \sqrttwo \\ -\sqrttwo } } $
# $ \newcommand{\myarray}[2]{ \begin{array}{#1}#2\end{array}} $
# $ \newcommand{\X}{ \mymatrix{cc}{0 & 1 \\ 1 & 0} } $
# $ \newcommand{\I}{ \mymatrix{rr}{1 & 0 \\ 0 & 1} } $
# $ \newcommand{\Z}{ \mymatrix{rr}{1 & 0 \\ 0 & -1} } $
# $ \newcommand{\Htwo}{ \mymatrix{rrrr}{ \frac{1}{2} & \frac{1}{2} & \frac{1}{2} & \frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & \frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} \\ \frac{1}{2} & -\frac{1}{2} & -\frac{1}{2} & \frac{1}{2} } } $
# $ \newcommand{\CNOT}{ \mymatrix{cccc}{1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 \\ 0 & 0 & 0 & 1 \\ 0 & 0 & 1 & 0} } $
# $ \newcommand{\norm}[1]{ \left\lVert #1 \right\rVert } $
# $ \newcommand{\pstate}[1]{ \lceil \mspace{-1mu} #1 \mspace{-1.5mu} \rfloor } $
# $ \newcommand{\greenbit}[1] {\mathbf{{\color{green}#1}}} $
# $ \newcommand{\bluebit}[1] {\mathbf{{\color{blue}#1}}} $
# $ \newcommand{\redbit}[1] {\mathbf{{\color{red}#1}}} $
# $ \newcommand{\brownbit}[1] {\mathbf{{\color{brown}#1}}} $
# $ \newcommand{\blackbit}[1] {\mathbf{{\color{black}#1}}} $
# <font style="font-size:28px;" align="left"><b> <font color="blue"> Solutions for </font>Operators on Multiple Bits </b></font>
# <br>
# _prepared by <NAME>_
# <br><br>
# <a id="task1"></a>
# <h3> Task 1</h3>
#
# We have two bits. What is $ (4 \times 4) $-dimensional matrix representation of the probabilistic operator $ M = \mymatrix{c}{ 0.2 & 0.7 \\ 0.8 & 0.3 } $ applied to the first bit?
# <h3>Solution</h3>
#
# We assume that the identity operator is applied to the second bit:
#
# $$ M \otimes I = \mymatrix{rr}{ 0.2 & 0.7 \\ 0.8 & 0.3 } \otimes \I = \mymatrix{rrrr}{ 0.2 & 0 & 0.7 & 0 \\
# 0 & 0.2 & 0 & 0.7 \\ 0.8 & 0 & 0.3 & 0 \\ 0 & 0.8 & 0 & 0.3} $$.
# <a id="task2"></a>
# <h3> Task 2</h3>
#
# We have three bits. What is $ (8 \times 8) $-dimensional matrix representation of the probabilistic operator $ M = \mymatrix{c}{ 0.9 & 0.4 \\ 0.1 & 0.6 } $ applied to the second bit?
# <h3>Solution</h3>
#
# We assume that the identity operators are applied to the first and third bits:
# $ I \otimes M \otimes I = \I \otimes \mymatrix{rr}{ 0.9 & 0.4 \\ 0.1 & 0.6 } \otimes \I $.
#
# Tensor product is associative and so it does not matter from which pair we start. We first calculate the tensor product of the second and third matrices:
#
# $$ I \otimes \mypar{ M \otimes I } = \I \otimes \mymatrix{rrrr}{ 0.9 & 0 & 0.4 & 0 \\ 0 & 0.9 & 0 & 0.4 \\ 0.1 & 0 & 0.6 & 0 \\ 0 & 0.1 & 0 & 0.6} = \mymatrix{rrrr|rrrrr}{0.9 & 0 & 0.4 & 0 & 0 & 0 & 0 & 0 \\ 0 & 0.9 & 0 & 0.4 & 0 & 0 & 0 & 0 \\ 0.1 & 0 & 0.6 & 0 & 0 & 0 & 0 & 0 \\ 0 & 0.1 & 0 & 0.6 & 0 & 0 & 0 & 0
# \\ \hline 0 & 0 & 0 & 0 & 0.9 & 0 & 0.4 & 0 \\ 0 & 0 & 0 & 0 & 0 & 0.9 & 0 & 0.4 \\ 0 & 0 & 0 & 0 & 0.1 & 0 & 0.6 & 0 \\ 0 & 0 & 0 & 0 & 0 & 0.1 & 0 & 0.6} $$
# <a id="task4"></a>
# <h3> Task 4 </h3>
#
# We have three bits and the probabilistic operator
# $ M = \mymatrix{rrrr}{0.05 & 0 & 0.70 & 0.60 \\ 0.45 & 0.50 & 0.20 & 0.25 \\ 0.20 & 0.35 & 0.10 & 0 \\ 0.30 & 0.15 & 0 & 0.15 } $
# to the first and third bits.
#
# What is the corresponding the $(8 \times 8)$-dimensional matrix applied to the whole system?
#
# *You may solve this task by using python.*
# <h3>Solution</h3>
#
# We use python to construct the new matrix.
# +
# the given matrix
M = [
[0.05, 0, 0.70, 0.60],
[0.45, 0.50, 0.20, 0.25],
[0.20, 0.35, 0.10, 0],
[0.30, 0.15, 0, 0.15]
]
print("Matrix M is")
for row in M:
print(row)
print()
# the target matrix is K
# we create it and filled with zeros
K = []
for i in range(8):
K.append([])
for j in range(8):
K[i].append(0)
# for each transition in M, we create four transitions in K, two of which are always zeros
for col in ['00','01','10','11']:
for row in ['00','01','10','11']:
prob = M[int(col,2)][int(row,2)]
# second bit is 0
newcol = col[0]+'0'+col[1]
newrow = row[0]+'0'+row[1]
K[int(newcol,2)][int(newrow,2)] = prob
# second bit is 1
newcol = col[0]+'1'+col[1]
newrow = row[0]+'1'+row[1]
K[int(newcol,2)][int(newrow,2)] = prob
print("Matrix K is")
for row in K:
print(row)
# -
# $$
# M = \mymatrix{rrrr}{0.05 & 0 & 0.70 & 0.60 \\ 0.45 & 0.50 & 0.20 & 0.25 \\ 0.20 & 0.35 & 0.10 & 0 \\ 0.30 & 0.15 & 0 & 0.15 }
# \Rightarrow
# K = \mymatrix{rrrrrrrr}{
# 0.05 & 0 & 0 & 0 & 0.70 & 0.60 & 0 & 0\\
# 0.45 & 0.50 & 0 & 0 & 0.20 & 0.25 & 0 & 0 \\
# 0 & 0 & 0.05 & 0 & 0 & 0 & 0.7 & 0.6 \\
# 0 & 0 & 0.45 & 0.50 & 0 & 0 & 0.20 & 0.25 \\
# 0.20 & 0.35 & 0 & 0 & 0.1 & 0 & 0 & 0 \\
# 0.30 & 0.15 & 0 & 0 & 0 & 0.15 & 0 & 0 \\
# 0 & 0 & 0.20 & 0.35 & 0 & 0 & 0.10 & 0 \\
# 0 & 0 & 0.30 & 0.15 & 0 & 0 & 0 & 0.15
# }
# $$
# <a id="task5"></a>
# <h3> Task 5</h3>
#
# Let $ M = \mymatrix{cc}{0.7 & 0.4 \\ 0.3 & 0.6} $ be a single bit operator. What is the matrix form of the controlled-$M$ operator where the first bit is the target bit and the second bit is the control bit.
# <h3>Solution</h3>
#
# When the second bit is zero, the state of the first bit does not change. We can write this as
# * $ 00 \xrightarrow{1} 00 $ and
# * $ 10 \xrightarrow{1} 10 $,
#
# So, we have the first and third columns as $ \myvector{ 1 \\ 0 \\ 0 \\0 } $ and $ \myvector{0 \\ 0 \\ 1 \\ 0} $, respectively.
#
# When the second bit is one, the operator $ M $ is appled to the first bit. We can write this as
# * $ \pstate{ \bluebit{0} \redbit{1} } \rightarrow 0.7 \pstate{ \bluebit{0} \redbit{1} } + 0.3 \pstate{ \bluebit{1} \redbit{1} } $, and
# * $ \pstate{ \bluebit{1} \redbit{1} } \rightarrow 0.4 \pstate{ \bluebit{0} \redbit{1} } + 0.6 \pstate{ \bluebit{1} \redbit{1} } $.
#
# Thus, we also have the second and fourth columns as $ \myvector{ 0 \\ 0.7 \\ 0 \\ 0.3 } $ and $ \myvector{0 \\ 0.4 \\ 0 \\ 0.6} $.
#
# Therefore, the overall matrix is
# $ \mymatrix{cccc}{ 1 & 0 & 0 & 0 \\ 0 & 0.7 & 0 & 0.4 \\ 0 & 0 & 1 & 0 \\ 0 & 0.3 & 0 & 0.6 }. $
# <a id="task6"></a>
# <h3> Task 6</h3>
#
# Verify that $ C_0M = (X \otimes I) \cdot (CM) \cdot ( X \otimes I ) = \mymatrix{c|c}{ M & \mathbf{0} \\ \hline \mathbf{0} & I } $.
# <h3>Solution</h3>
#
# We start with $ X \otimes I $, which is equal to $ \X \otimes \I = \mymatrix{cc|cc}{ 0 & 0 & 1 & 0 \\ 0 & 0 & 0 & 1 \\ \hline 1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 } = \mymatrix{c|c}{ \mathbf{0} & I \\ \hline I & \mathbf{0} } $.
#
# $$
# C_0M = (X \otimes I) \cdot (CM) \cdot ( X \otimes I ) =
# \mymatrix{c|c}{ \mathbf{0} & I \\ \hline I & \mathbf{0} } \mymatrix{c|c}{ I & \mathbf{0} \\ \hline \mathbf{0} & M } \mymatrix{c|c}{ \mathbf{0} & I \\ \hline I & \mathbf{0} }
# $$
#
# This multiplication can be easily done by seeing the sub-matrices as the entries of $ (2 \times 2) $-matrices (*[block matrix multiplication](https://en.wikipedia.org/wiki/Block_matrix)*).
#
# The multiplication of the first two matrices are
# $
# \mymatrix{c|c}{ \mathbf{0} & I \\ \hline I & \mathbf{0} } \mymatrix{c|c}{ I & \mathbf{0} \\ \hline \mathbf{0} & M }
# = \mymatrix{c|c}{ \mathbf{0} & M \\ \hline I & \mathbf{0} }.
# $
#
# Then, its multiplication with the third matrix is $ \mymatrix{c|c}{ \mathbf{0} & M \\ \hline I & \mathbf{0} } \mymatrix{c|c}{ \mathbf{0} & I \\ \hline I & \mathbf{0} } = \mymatrix{c|c}{ M & \mathbf{0} \\ \hline \mathbf{0} & I } $.
#
#
# Alternatively, we define $ M $ as $ \mymatrix{cc}{a & b \\ c & d} $, and then verify the result by doing all multiplications explicitly.
#
# $$ C_0M = (X \otimes I) \cdot (CM) \cdot ( X \otimes I ) = \mymatrix{cc|cc}{ 0 & 0 & 1 & 0 \\ 0 & 0 & 0 & 1 \\ \hline 1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 } \cdot \mymatrix{cc|cc}{1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 \\ \hline 0 & 0 & a & b \\ 0 & 0 & c & d} \cdot \mymatrix{cc|cc}{ 0 & 0 & 1 & 0 \\ 0 & 0 & 0 & 1 \\ \hline 1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 } =
# $$
#
# $$
# \mymatrix{cc|cc}{ 0 & 0 & a & b \\ 0 & 0 & c & d \\ \hline 1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 } \mymatrix{cc|cc}{ 0 & 0 & 1 & 0 \\ 0 & 0 & 0 & 1 \\ \hline 1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 } =
# \mymatrix{cc|cc}{ a & b & 0 & 0 \\ c & d & 0 & 0 \\ \hline 0 & 0 & 1 & 0 \\ 0 & 0 & 0 & 1 } =
# \mymatrix{c|c}{ M & \mathbf{0} \\ \hline \mathbf{0} & I }.
# $$
| classical-systems/CS40_Operators_on_Multiple_Bits_Solutions.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/mo-9/img_segmentation/blob/main/deeplab.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="lcNI3tVWdTyj" colab={"base_uri": "https://localhost:8080/"} outputId="299256cc-b733-4cda-aa0b-8bca9217a0f1"
import tensorflow as tf
print(tf.__version__)
# I/O libraries
import os
from io import BytesIO
import tarfile
import tempfile
from six.moves import urllib
# Helper libraries
import matplotlib
from matplotlib import gridspec
from matplotlib import pyplot as plt
import numpy as np
from PIL import Image
import cv2 as cv
from tqdm import tqdm
import IPython
from sklearn.metrics import confusion_matrix
from tabulate import tabulate
# Comment this out if you want to see Deprecation warnings
import warnings
warnings.simplefilter("ignore", DeprecationWarning)
# + id="o_i79Wm9I3Vg"
class DeepLabModel(object):
"""Class to load deeplab model and run inference."""
FROZEN_GRAPH_NAME = 'frozen_inference_graph'
def __init__(self, tarball_path):
"""Creates and loads pretrained deeplab model."""
self.graph = tf.Graph()
graph_def = None
# Extract frozen graph from tar archive.
tar_file = tarfile.open(tarball_path)
for tar_info in tar_file.getmembers():
if self.FROZEN_GRAPH_NAME in os.path.basename(tar_info.name):
file_handle = tar_file.extractfile(tar_info)
graph_def = tf.compat.v1.GraphDef.FromString(file_handle.read())
break
tar_file.close()
if graph_def is None:
raise RuntimeError('Cannot find inference graph in tar archive.')
with self.graph.as_default():
tf.import_graph_def(graph_def, name='')
self.sess = tf.compat.v1.Session(graph=self.graph)
def run(self, image, INPUT_TENSOR_NAME = 'ImageTensor:0', OUTPUT_TENSOR_NAME = 'SemanticPredictions:0'):
"""Runs inference on a single image.
Args:
image: A PIL.Image object, raw input image.
INPUT_TENSOR_NAME: The name of input tensor, default to ImageTensor.
OUTPUT_TENSOR_NAME: The name of output tensor, default to SemanticPredictions.
Returns:
resized_image: RGB image resized from original input image.
seg_map: Segmentation map of `resized_image`.
"""
width, height = image.size
target_size = (2049,1025) # size of Cityscapes images
resized_image = image.convert('RGB').resize(target_size, Image.ANTIALIAS)
batch_seg_map = self.sess.run(
OUTPUT_TENSOR_NAME,
feed_dict={INPUT_TENSOR_NAME: [np.asarray(resized_image)]})
seg_map = batch_seg_map[0] # expected batch size = 1
if len(seg_map.shape) == 2:
seg_map = np.expand_dims(seg_map,-1) # need an extra dimension for cv.resize
seg_map = cv.resize(seg_map, (width,height), interpolation=cv.INTER_NEAREST)
return seg_map
# + id="6HTBF1KFJqAX"
def create_label_colormap():
"""Creates a label colormap used in Cityscapes segmentation benchmark.
Returns:
A Colormap for visualizing segmentation results.
"""
colormap = np.array([
[128, 64, 128],
[244, 35, 232],
[ 70, 70, 70],
[102, 102, 156],
[190, 153, 153],
[153, 153, 153],
[250, 170, 30],
[220, 220, 0],
[107, 142, 35],
[152, 251, 152],
[ 70, 130, 180],
[220, 20, 60],
[255, 0, 0],
[ 0, 0, 142],
[ 0, 0, 70],
[ 0, 60, 100],
[ 0, 80, 100],
[ 0, 0, 230],
[119, 11, 32],
[ 0, 0, 0]], dtype=np.uint8)
return colormap
def label_to_color_image(label):
"""Adds color defined by the dataset colormap to the label.
Args:
label: A 2D array with integer type, storing the segmentation label.
Returns:
result: A 2D array with floating type. The element of the array
is the color indexed by the corresponding element in the input label
to the PASCAL color map.
Raises:
ValueError: If label is not of rank 2 or its value is larger than color
map maximum entry.
"""
if label.ndim != 2:
raise ValueError('Expect 2-D input label')
colormap = create_label_colormap()
if np.max(label) >= len(colormap):
raise ValueError('label value too large.')
return colormap[label]
def vis_segmentation(image, seg_map):
"""Visualizes input image, segmentation map and overlay view."""
plt.figure(figsize=(20, 4))
grid_spec = gridspec.GridSpec(1, 4, width_ratios=[6, 6, 6, 1])
plt.subplot(grid_spec[0])
plt.imshow(image)
plt.axis('off')
plt.title('input image')
plt.subplot(grid_spec[1])
seg_image = label_to_color_image(seg_map).astype(np.uint8)
plt.imshow(seg_image)
plt.axis('off')
plt.title('segmentation map')
plt.subplot(grid_spec[2])
plt.imshow(image)
plt.imshow(seg_image, alpha=0.7)
plt.axis('off')
plt.title('segmentation overlay')
unique_labels = np.unique(seg_map)
ax = plt.subplot(grid_spec[3])
plt.imshow(FULL_COLOR_MAP[unique_labels].astype(np.uint8), interpolation='nearest')
ax.yaxis.tick_right()
plt.yticks(range(len(unique_labels)), LABEL_NAMES[unique_labels])
plt.xticks([], [])
ax.tick_params(width=0.0)
plt.grid('off')
plt.show()
LABEL_NAMES = np.asarray([
'road', 'sidewalk', 'building', 'wall', 'fence', 'pole', 'traffic light',
'traffic sign', 'vegetation', 'terrain', 'sky', 'person', 'rider', 'car', 'truck',
'bus', 'train', 'motorcycle', 'bicycle', 'void'])
FULL_LABEL_MAP = np.arange(len(LABEL_NAMES)).reshape(len(LABEL_NAMES), 1)
FULL_COLOR_MAP = label_to_color_image(FULL_LABEL_MAP)
# + colab={"base_uri": "https://localhost:8080/"} id="9eytn_dBJ_Oq" outputId="e7538914-d4d8-476b-f55a-a54b23e14d42"
MODEL_NAME = 'mobilenetv2_coco_cityscapes_trainfine'
#MODEL_NAME = 'xception65_cityscapes_trainfine'
_DOWNLOAD_URL_PREFIX = 'http://download.tensorflow.org/models/'
_MODEL_URLS = {
'mobilenetv2_coco_cityscapes_trainfine':
'deeplabv3_mnv2_cityscapes_train_2018_02_05.tar.gz',
'xception65_cityscapes_trainfine':
'deeplabv3_cityscapes_train_2018_02_06.tar.gz',
}
_TARBALL_NAME = 'deeplab_model.tar.gz'
model_dir = tempfile.mkdtemp()
tf.compat.v1.gfile.MakeDirs(model_dir)
download_path = os.path.join(model_dir, _TARBALL_NAME)
print('downloading model, this might take a while...')
urllib.request.urlretrieve(_DOWNLOAD_URL_PREFIX + _MODEL_URLS[MODEL_NAME], download_path)
print('download completed! loading DeepLab model...')
MODEL = DeepLabModel(download_path)
print('model loaded successfully!')
# + colab={"base_uri": "https://localhost:8080/", "height": 233} id="fCEK5CBCKHIB" outputId="e3d3d8c3-9a8e-4f3d-cad9-cb62ac553962"
SAMPLE_IMAGE = '/content/drive/MyDrive/24099144_160766414670145_1899733346_n.jpg'
def run_visualization(SAMPLE_IMAGE):
"""Inferences DeepLab model and visualizes result."""
original_im = Image.open(SAMPLE_IMAGE)
seg_map = MODEL.run(original_im)
vis_segmentation(original_im, seg_map)
run_visualization(SAMPLE_IMAGE)
# + id="1GK-DKahQU8g"
| deeplab.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8.2 64-bit
# metadata:
# interpreter:
# hash: 31f2aee4e71d21fbe5cf8b01ff0e069b9275f58929596ceb00d14d90e3e16cd6
# name: python3
# ---
# # Top Charts Exploratory Data Analysis
# ## Loading Dependencies
import pandas as pd
from collections import Counter
import altair as alt
import nltk
import regex as re
# ## Loading in Data
df = pd.read_csv('cleaned_data/all_top_songs_with_genres_nolist.csv')
# preview of dataframe
df.head()
# ## Cleaning Up List of Genres
# cleaning up the genres column on copy of dataframe
df_ = df.copy()
df_['genre'] = df_['genre'].str.split(", ")
# add all values to a list to generate a unique list of values
genres_list = []
for idx, value in enumerate(df_['genre']):
genres_list.extend(value)
# ### Adding in Columns for genres
df_['pop'] = df.genre.str.contains('pop')==True
df_['rb'] = df.genre.str.contains('r-b')==True
df_['rap'] = df.genre.str.contains('rap')==True
df_['rock'] = df.genre.str.contains('rock')==True
df_['non-music'] = df.genre.str.contains('non-music')==True
df_['country'] = df.genre.str.contains('country')==True
df_['no_genre'] = df.genre.str.contains('m')==True
df_['pop'] = df_['pop'].astype(int)
df_['rb'] = df_['rb'].astype(int)
df_['rap'] = df_['rap'].astype(int)
df_['rock'] = df_['rock'].astype(int)
df_['non-music'] = df_['non-music'].astype(int)
df_['country'] = df_['country'].astype(int)
df_['no_genre'] = df_['no_genre'].astype(int)
df_.head()
### Saving to CSV
df_.to_csv('cleaned_data/OHE_all_top_songs.csv', index=False)
df_[df_['non-music'] == 1]['artist']
# drop non-music and bc they are all either having another genre or missing a genre
df_ = df_.drop(columns=['non-music'])
missing_genres = []
for i in range(len(df_.artist)):
if sum(df_.iloc[i,6:11]) > 0:
item = 0
missing_genres.append(item)
else:
item = 1
missing_genres.append(item)
df_['no_genre'] = missing_genres
# ## Visualizations
genre_frequencies = dict(Counter(genres_list))
genre_frequencies
genre_frequencies_df = pd.DataFrame.from_records([genre_frequencies])
genre_frequencies_df = genre_frequencies_df.rename(index={0:'counts'}).T.reset_index().rename(columns={'index':'genres'})
genre_frequencies_df = genre_frequencies_df[genre_frequencies_df['genres'].isin(['r-b', 'pop', 'rap', 'rock', 'country'])]
genre_frequencies_df.to_csv('cleaned_data/genre_song_counts.csv', index = False)
# +
bars = alt.Chart(data=genre_frequencies_df).mark_bar().encode(
x= 'genres',
y = 'counts',
color = 'genres'
)
text = bars.mark_text(
align='center',
# baseline='top',
dy=-10
).encode(
text='counts:Q',
)
(bars + text).properties(height=500, width = 400,title = "Frequency of Genres on Top 200 Charts").configure_range(
category={'scheme': 'tableau10'}
)
# -
# There seem to be data that is labeled as non-music which is strange because there shouldn't be any labeled non-music. If there is another genre listed, remove non-music
# # Keyword Extraction of all Genres
### Importing More Dependencies
from resources.word_extraction.text_cleaning import lem_stem_text
from resources.word_extraction.stopwords import remove_stopw, get_stopwords
from resources.analyze import find_keywords, find_instances
df_['cleaned_lyrics'] = df_['lyrics'].str.replace('[^\w\s]','')
df_['cleaned_lyrics'] = df_['cleaned_lyrics'].str.replace('missing lyrics','')
df_['cleaned_lyrics'] = df_['cleaned_lyrics'].apply(remove_stopw)
df_['cleaned_lyrics'] = df_['cleaned_lyrics'].apply(lem_stem_text)
df_['cleaned_lyrics'] = df_.cleaned_lyrics.str.strip().str.split(' ')
df_
## getting a list of all lemmed and stemmed keywords without stopwords
lyrics_wordlist = df_['cleaned_lyrics'].tolist()
words_list = []
for i in lyrics_wordlist:
words_list.extend(i)
len(words_list)
# Creating a DataFrame of the Word Counts
lyric_word_frequencies = pd.DataFrame.from_dict(Counter(words_list), orient = 'index').reset_index()
lyric_word_frequencies = lyric_word_frequencies.rename(columns={'index':'word', 0:'count'})
lyric_word_frequencies = lyric_word_frequencies.sort_values(by = "count", ascending = False)
lyric_word_frequencies
lyric_word_frequencies.head(20)
lyric_word_frequencies.to_csv('cleaned_data/lyric_word_frequencies.csv', index = False)
top_100 = lyric_word_frequencies[:100]
top_100
# ## Top Words by Genre
pd.Series(genres_list).unique()
pop = df_[df_['pop'] == 1]
rb = df_[df_['rb'] == 1]
rap = df_[df_['rap'] == 1]
rock = df_[df_['rock'] == 1]
country = df_[df_['country'] == 1]
m = df_[df_['no_genre'] == 1]
def top_lyrics(df, dfname):
'''Function to find the top lyric unigrams based on a df containing lyrics'''
## getting a list of all lemmed and stemmed keywords without stopwords
lyrics_wordlist = df['cleaned_lyrics'].tolist()
words_list = []
for i in lyrics_wordlist:
words_list.extend(i)
len(words_list)
# Creating a DataFrame of the Word Counts
lyric_word_frequencies = pd.DataFrame.from_dict(Counter(words_list), orient = 'index').reset_index()
lyric_word_frequencies = lyric_word_frequencies.rename(columns={'index':'word', 0:'count'})
lyric_word_frequencies = lyric_word_frequencies.sort_values(by = "count", ascending = False)
lyric_word_frequencies['genre'] = dfname
return lyric_word_frequencies
rb_lyrics = top_lyrics(rb, 'r-b')[:15]
rb_lyrics
pop_lyrics = top_lyrics(pop, 'pop')[:15]
country_lyrics = top_lyrics(country, 'country')[:15]
rock_lyrics = top_lyrics(rock, 'rock')[:15]
rap_lyrics = top_lyrics(rap, 'rap')[:15]
full_lyrics = pd.concat([pop_lyrics,country_lyrics,rock_lyrics,rap_lyrics,rb_lyrics])
full_lyrics
full_lyrics.to_csv('cleaned_data/lyric_frequencies/top15_all_genres_lyric_frequencies.csv', index = False)
# ## Top Songs By Genre
# I forgot to get the top songs by genre streams so I am re importing the top 200 files and the previously created OHE (one-hot-encoded) df to create a new df with the streams
import pandas as pd
## OTHER MISC DATA CLEANING
df1 = pd.read_csv('/Users/daphneyang/Desktop/5YMIDS_SP21/w209/spotify-visualizations/cleaned_data/2017_weekly_all_locations_top200.csv')
df2 = pd.read_csv('/Users/daphneyang/Desktop/5YMIDS_SP21/w209/spotify-visualizations/cleaned_data/2018_weekly_all_locations_top200.csv')
df3 = pd.read_csv('/Users/daphneyang/Desktop/5YMIDS_SP21/w209/spotify-visualizations/cleaned_data/2019_weekly_all_locations_top200.csv')
df4 = pd.read_csv('/Users/daphneyang/Desktop/5YMIDS_SP21/w209/spotify-visualizations/cleaned_data/2020_weekly_all_locations_top200.csv')
df = pd.concat([df1, df2, df3, df4])
df['streams'] = df['streams'].str.replace(",", '').astype(int)
global_df = df[df['country_chart'].str.contains("Global")]
global_df_total = global_df.groupby(["track", 'spotify_link']).sum().reset_index()
lyrics_df = pd.read_csv('/Users/daphneyang/Desktop/5YMIDS_SP21/w209/spotify-visualizations/cleaned_data/OHE_all_top_songs.csv')
merged_df = pd.merge(lyrics_df, global_df_total, "inner", on = "track")
merged_df = merged_df.rename(columns={'streams': "total_streams"})
merged_df
pop = merged_df[merged_df['pop'] == 1][['track', 'artist', 'total_streams', 'spotify_link']].reset_index(drop=True).sort_values(by=['total_streams'], ascending = False)[:11]
pop['genre'] = 'pop'
rb = merged_df[merged_df['rb'] == 1][['track', 'artist', 'total_streams', 'spotify_link']].reset_index(drop=True).sort_values(by=['total_streams'], ascending = False)[:11]
rb['genre'] = 'r-b'
rap = merged_df[merged_df['rap'] == 1][['track', 'artist', 'total_streams', 'spotify_link']].reset_index(drop=True).sort_values(by=['total_streams'], ascending = False)[:13]
rap['genre'] = 'rap'
rock = merged_df[merged_df['rock'] == 1][['track', 'artist', 'total_streams', 'spotify_link']].reset_index(drop=True).sort_values(by=['total_streams'], ascending = False)[:13]
rock['genre'] = 'rock'
country = merged_df[merged_df['country'] == 1][['track', 'artist', 'total_streams', 'spotify_link']].reset_index(drop=True).sort_values(by=['total_streams'], ascending = False)[:12]
country['genre'] = 'country'
df_output = pd.concat([pop, rb, rap, rock, country])
df_output
df_output.iloc[59][3]
# Change all links to embed links
df_output.to_csv('../cleaned_data/top10_by_genre_all_time.csv', index = False)
# ### Creating All Topic Songs With Years
# +
import pandas as pd
df1 = pd.read_csv('/Users/daphneyang/Desktop/5YMIDS_SP21/w209/spotify-visualizations/cleaned_data/2017_weekly_all_locations_top200.csv')
df1['year'] = '2017'
df2 = pd.read_csv('/Users/daphneyang/Desktop/5YMIDS_SP21/w209/spotify-visualizations/cleaned_data/2018_weekly_all_locations_top200.csv')
df2['year'] = '2018'
df3 = pd.read_csv('/Users/daphneyang/Desktop/5YMIDS_SP21/w209/spotify-visualizations/cleaned_data/2019_weekly_all_locations_top200.csv')
df3['year'] = '2019'
df4 = pd.read_csv('/Users/daphneyang/Desktop/5YMIDS_SP21/w209/spotify-visualizations/cleaned_data/2020_weekly_all_locations_top200.csv')
df4['year'] = '2020'
# +
df = pd.concat([df1, df2, df3, df4])
all_locations_df_max = df.groupby(["track", 'artist','country_chart', 'year']).max().reset_index()[['track','artist',"year", 'streams', "country_chart",'spotify_link']]
all_locations_df_max
# -
all_locations_df_max.to_csv("cleaned_data/2017_2020_all_locations_max_streams.csv", index = False)
| eda_notebooks/top_charts_eda.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: venv-MPSF
# language: python
# name: venv-mpsf
# ---
import sys,os
sys.path.append('../')
from deep_rl import *
import matplotlib.pyplot as plt
import torch
from tqdm import trange, tqdm
import random
import numpy as np
# %load_ext autoreload
# %reload_ext autoreload
# %autoreload 2
# !mkdir log
import sys,os
sys.path.append('')
from deep_rl import *
import matplotlib.pyplot as plt
import torch
from tqdm import trange, tqdm
import random
import numpy as np
from deep_rl.component.fourrooms_collect import *
from deep_rl.component.fourrooms import *
# %load_ext autoreload
# %reload_ext autoreload
# %autoreload 2
# +
layout = 'open'
if layout == '3rooms' or layout == '3roomsh':
cell_num = 101
max_step_dqn = 1e5
linear_schedule_dqn = 6e4
elif layout == 'maze':
cell_num = 75
max_step_dqn = 1.5e5
linear_schedule_dqn = 9e4
else:
cell_num = 104
max_step_dqn = 7e4
linear_schedule_dqn = 4e4
# +
t = torch.zeros([1, 4, 13, 13])
conv1 = nn.Conv2d(4, 16, 3, 1)
conv2 = nn.Conv2d(16, 32, 3, 1)
pool = nn.MaxPool2d(2, stride=2)
print(t.shape)
t = conv1(t)
print(t.shape, 'after conv1')
t = conv2(t)
print(t.shape, 'after conv2')
t = F.max_pool2d(t, kernel_size=2)
print(t.shape, 'after pooling')
t = torch.flatten(t, 1)
print(t.shape, 'after flatten')
# +
dims = 32 * ((13 - (3-1)*2) // 2) ** 2
print(dims)
# layers = nn.ModuleList(
# [layer_init(nn.Linear(dim_in, dim_out)) for dim_in, dim_out in zip(dims[:-1], dims[1:])])
# +
# from .network_utils import *
# from .network_bodies import *
# from torch.nn.parameter import Parameter
class DQNCNN(nn.Module):
"""
Added by Surya.
SR fully connected body network.
"""
def __init__(self, output_dim, body=None, hidden_units=(), gate=F.relu, config=1):
"""
config -> type of learning on top of state abstraction
0 - typical SR with weights sharing
1 - learning SR without weights sharing
"""
super(DQNCNN, self).__init__()
self.body = body
self.output_dim = output_dim
# width 是 observation的边长
self.width = 13
# CNN layers
# Conv2d 的参数 (输入channel, 输出channel,kernel size, kernel位移/stride)
self.conv1 = nn.Conv2d(4, 64, 3, 1)
self.conv2 = nn.Conv2d(64, 128, 3, 1)
self.fc_size = 128 * ((self.width - (3-1)*4) // 2) ** 2
self.fc = nn.Linear(2048, 4)
self.gate = gate
def forward(self, x):
phi = torch.Tensor(x)
# print(phi.shape, 'original shape')
# if self.body is not None:
# phi = self.body(tensor(x)) # shape: b x state_dim
psi = phi
# print(psi.shape, 'after body shape')
# Convert to image
psi = psi.view(phi.size(0), 4, self.width, self.width)
# Conv layers
psi = self.conv1(psi)
# print(psi.shape, 'after conv1')
psi = F.relu(psi)
psi = self.conv2(psi)
# print(psi.shape, 'after conv2')
psi = F.relu(psi)
psi = F.max_pool2d(psi, 2)
# print(psi.shape, 'after pooling')
psi = torch.flatten(psi, 1)
# FC layers
psi = self.fc(psi)
return psi
# +
learning_rate_dqn = 2e-3 #from 0.05 - 0.001
max_step_dqn = 1e5
linear_schedule_dqn = 6e4
def dqn_feature(**kwargs):
generate_tag(kwargs)
kwargs.setdefault('log_level', 0)
config = Config()
config.DEVICE = torch.device('cpu')
config.merge(kwargs)
config.task_fn = lambda: Task(config.game)
config.eval_env = config.task_fn()
config.optimizer_fn = lambda params: torch.optim.RMSprop(params, learning_rate_dqn)
config.network_fn = lambda: DQNCNN(config.action_dim, SRIdentityBody(config.state_dim), \
hidden_units=(2000,))
# config.network_fn = lambda: VanillaNet(config.action_dim, FCBody(config.state_dim, hidden_units=(16,)))
config.replay_fn = lambda: AsyncReplay(memory_size=int(1e5), batch_size=10)
config.random_action_prob = LinearSchedule(1.0, 0.1, linear_schedule_dqn)
config.discount = 0.9
config.target_network_update_freq = 200
config.exploration_steps = 0
# config.double_q = True
config.double_q = False
config.sgd_update_frequency = 4
config.gradient_clip = 5
config.eval_interval = int(5e3)
config.max_steps = max_step_dqn
config.async_actor = False
agent = DQNAgent(config)
#run_steps function below
config = agent.config
agent_name = agent.__class__.__name__
t0 = time.time()
# agent.step()
# plt.figure(figsize=(10,4))
while True:
# print(agent.actor._task.env.envs[0].goal)
if config.save_interval and not agent.total_steps % config.save_interval:
agent.save('data/%s-%s-%d' % (agent_name, config.tag, agent.total_steps))
if config.log_interval and not agent.total_steps % config.log_interval:
t0 = time.time()
if config.eval_interval and not agent.total_steps % config.eval_interval:
agent.eval_episodes()
pass
if config.max_steps and agent.total_steps >= config.max_steps:
return agent
break
agent.step()
# plt.title('step: {}'.format(agent.total_steps), fontsize=20)
# plt.imshow(agent.actor._task.env.envs[0].render(), cmap='Blues', )
agent.switch_task()
return agent
# -
dqn = dqn_feature(game='FourRoomsCollect')
# +
env = ClctFourRooms(config=2)
env.reset()
print(env.observation().shape)
# +
env = FourRoomsMatrix(layout='3roomsh')
print(len(env.init_states))
plt.figure()
plt.imshow(env.reset().reshape([13, 13]))
# +
task = Task('FourRoomsCollect')
# task.state_dim
cnn = DQNCNN(4, None, hidden_units=(2000,))
cnn([env.observation()])
# +
env = ClctFourRooms()
env.reset()
plt.figure()
plt.subplot(2, 1, 1)
plt.imshow(env.render())
plt.subplot(2, 1, 2)
plt.imshow(env.render())
plt.colorbar()
# +
class SRNetCNN_MultiChannel(nn.Module):
"""
Added by Surya.
SR fully connected body network.
"""
def __init__(self, output_dim, body, hidden_units=(3000,), gate=F.relu, config=0):
"""
config -> type of learning on top of state abstraction
0 - typical SR with weights sharing
1 - learning SR without weights sharing
"""
super(SRNetCNN_MultiChannel, self).__init__()
self.body = body
self.output_dim = output_dim
self.width = 13
self.init_channels = int(self.body.feature_dim / np.square(self.width))
# CNN layers
self.conv1 = nn.Conv2d(self.init_channels, 64, 3, 1)
self.conv2 = nn.Conv2d(64, 128, 3, 1)
# self.conv3 = nn.Conv2d(64, 128, 3, 1)
self.fc_size = 128 * ((self.width - (3-1)*2) // 2) ** 2
# # FC layers
# dims = (self.fc_size,) + hidden_units + (body.feature_dim * output_dim,)
# # print(self.fc_size, hidden_units, body.feature_dim, output_dim)
# # print(dims)
# self.layers = nn.ModuleList(
# [layer_init(nn.Linear(dim_in, dim_out)) for dim_in, dim_out in zip(dims[:-1], dims[1:])])
# print(self.layers)
self.fc = nn.Linear(self.fc_size, int(self.body.feature_dim * output_dim / self.init_channels))
self.gate = gate
self.feature_dim = body.feature_dim * output_dim
if(config == 0):
self.psi2q = Psi2QNet(output_dim, 169)
if(config == 1):
self.psi2q = Psi2QNetFC(output_dim, 169)
self.to(Config.DEVICE)
def forward(self, x):
# print(x.shape, 'original shape')
phi = self.body(tensor(x)) # shape: b x state_dim
# print(phi.shape)
# print(phi.shape)
psi = phi
# print(psi.shape, 'after body shape')
# Convert to image
psi = psi.view(phi.size(0), 4, self.width, self.width)
# Conv layers
psi = self.conv1(psi)
# print(psi.shape, 'after conv1')
psi = F.relu(psi)
psi = self.conv2(psi)
# print(psi.shape, 'after conv2')
psi = F.relu(psi)
psi = F.max_pool2d(psi, 2)
# print(psi.shape, 'after pooling')
psi = torch.flatten(psi, 1)
# FC layers
# for layer in self.layers[:-1]:
# psi = self.gate(layer(psi))
# psi = self.layers[-1](psi)
psi = self.fc(psi)
# print(f'debug : {psi.shape}, {psi.size(0)}, {self.output_dim}, {self.body.feature_dim}')
psi = psi.view(psi.size(0), self.output_dim, 169) # shape: b x action_dim x state_dim
# print(f'after view shape : {psi.shape}')
out = self.psi2q(psi)
phi = phi[:, 1, :, :]
return phi, psi, out
# +
task = Task('FourRoomsCollectNoTerm')
cnn = SRNetCNN_MultiChannel(4, SRIdentityBody(task.state_dim), \
hidden_units=(2000,), config=0)
ipt = np.zeros([10, 4, 13, 13])
a, b, c = cnn(ipt)
print(cnn)
print(a.shape)
print(b.shape)
print(c.shape)
# -
def dsr_feature_init(ref,**kwargs):
generate_tag(kwargs)
kwargs.setdefault('log_level', 0)
config = Config()
config.async_actor = False
config.DEVICE = torch.device('0')
config.merge(kwargs)
config.task_fn = lambda: Task(config.game)
config.eval_env = config.task_fn()
config.c = 1
config.optimizer_fn = lambda params: torch.optim.RMSprop(params, 0.002)
# config.network_fn = lambda: SRNetCNN(config.action_dim, SRIdentityBody(config.state_dim), \
# hidden_units=(2000,), config=config.style) #CHECK
config.network_fn = lambda: SRNet(config.action_dim, SRIdentityBody(config.state_dim), \
hidden_units=(2000,), config=config.style) #CHECK
config.replay_fn = lambda: AsyncReplay(memory_size=int(1e5), batch_size=10)
config.random_action_prob = LinearSchedule(1.0, 0.1, linear_schedule_dqn)
config.discount = 0.99
config.target_network_update_freq = 200
config.exploration_steps = 0
# config.double_q = True
config.double_q = False
config.sgd_update_frequency = 4
config.gradient_clip = 5
config.eval_interval = int(5e3)
config.max_steps = max_step_dqn
config.async_actor = False
agent = DSRAgent(config)
#run_steps function below
config = agent.config
agent_name = agent.__class__.__name__
if(ref is not None):
print(agent.network.load_state_dict(ref.network.state_dict(), strict=False))
t0 = time.time()
while True:
if config.save_interval and not agent.total_steps % config.save_interval:
agent.save('data/%s-%s-%d' % (agent_name, config.tag, agent.total_steps))
if config.log_interval and not agent.total_steps % config.log_interval:
# agent.logger.info('steps %d, %.2f steps/s' % (agent.total_steps, config.log_interval / (time.time() - t0)))
t0 = time.time()
if config.eval_interval and not agent.total_steps % config.eval_interval:
agent.eval_episodes()
if config.max_steps and agent.total_steps >= config.max_steps:
return agent
break
# import pdb; pdb.set_trace()
agent.step()
agent.switch_task()
return agent
dsr = dsr_feature_init(ref=None, game='FourRoomsMatrix', freeze=0, style=0)
# +
max_step_avdsr = 1e5
linear_schedule_avdsr = 1e5
learning_rate_avdsr = 1e-3 #from 0.05 - 0.001
def avdsr_feature_A(**kwargs):
kwargs['tag'] = 'Training avDSR based on DQN agents'
generate_tag(kwargs)
kwargs.setdefault('log_level', 0)
config = Config()
config.merge(kwargs)
config.task_fn = lambda: Task(config.game)
config.eval_env = config.task_fn()
config.c = 1
config.optimizer_fn = lambda params: torch.optim.RMSprop(params, learning_rate_avdsr)
# config.network_fn = lambda: SRNet(config.action_dim, SRIdentityBody(config.state_dim), \
# hidden_units=(), config=0) #CHECK
config.network_fn = lambda: SRNetCNN_MultiChannel(config.action_dim, SRIdentityBody(config.state_dim), \
hidden_units=(2000,), config=config.style) #CHECK
config.replay_fn = lambda: Replay(memory_size=int(3e5), batch_size=10)
config.random_action_prob = LinearSchedule(1, 1, linear_schedule_avdsr) # CHECK
config.discount = 0.99
config.target_network_update_freq = 200
config.exploration_steps = 0
config.double_q = False
config.sgd_update_frequency = 4
config.gradient_clip = 5
config.max_steps = max_step_avdsr
config.async_actor = False
agent = avDSRAgent(config, config.agents, style='DQN')
#run_steps function below
config = agent.config
agent_name = agent.__class__.__name__
t0 = time.time()
# agent.network = load_agent('avdsr-A')
while True:
if config.log_interval and not agent.total_steps % config.log_interval:
agent.logger.info('steps %d, %.2f steps/s' % (agent.total_steps, config.log_interval / (time.time() - t0)))
t0 = time.time()
if config.max_steps and agent.total_steps >= config.max_steps:
# store_agent(agent.network, 'avdsr-A-'+game)
return agent
break
# import pdb; pdb.set_trace()
agent.step()
agent.switch_task()
# -
avdsr = avdsr_feature_A(game='FourRoomsMatrix', agents=[None], choice=0, style=0)
| notebooks/env_test.ipynb |
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.3.1
# language: julia
# name: julia-1.3
# ---
# + [markdown] colab_type="text" id="view-in-github"
# <a href="https://colab.research.google.com/github/GdMacmillan/ml_flux_tutorial/blob/master/ML_Flux_Tutorial.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] colab_type="text" id="55upadRUMEL-"
# # ML/FLux Tutorial
#
# by <NAME>
# -
# ## 0. Google Colaboratory
#
# This environment is meant to be used in Google Colaboratory so that the user can take advantage of Google cloud hardware accelerators. A Python 3 environment is the default for Colab notebooks, but a hack can be used to install and run a Julia Kernel.
#
# Using Colab is free with a few constraint on how long a session can run, how many sessions can be run concurrently, what exact GPU hardware is allocated. The full details are available on the follow FAQ: https://research.google.com/colaboratory/faq.html
#
# To run this, you obviously need access to the notebook within Colab. Colab makes it very easy either to use github notebooks:
#
# - From within colab, you can `File>Open` a notebook. A tab called Github enables quick access to repositories.
#
# - Simpler, just click on the button at the top of this page.
#
#
# + [markdown] colab_type="text" id="55upadRUMEL-"
# ## 1. Setting up environment
#
# The below code will download and install dependencies so that this hack can be applied and Julia becoming available as kernel. Please execute the following cell.
# + colab={"base_uri": "https://localhost:8080/", "height": 1000} colab_type="code" id="oMSuTc3pDlHv" outputId="0b171d07-74e4-4ae6-deaa-91cbbc6fa7b5"
!curl -sSL "https://julialang-s3.julialang.org/bin/linux/x64/1.3/julia-1.3.0-linux-x86_64.tar.gz" -o julia.tar.gz
!tar -xzf julia.tar.gz -C /usr --strip-components 1
!rm -rf julia.tar.gz*
!julia -e 'using Pkg; pkg"add IJulia; add CuArrays; add CUDAnative; add CUDAdrv; add Flux; add BenchmarkTools; add MLDatasets; add ImageMagick; add ImageCore; add Plots; precompile"'
# + [markdown] colab_type="text" id="n08M7vQ3rFbN"
# Now a full Julia environment is available. To use it, two choices:
#
# Change the notebook settings: click on `Edit > Notebook Settings` and select `julia`. Even if the kernel is already selected, click on `save`.
#
# Alternatively, refresh the page with *** Ctrl-r***.
# + [markdown] colab_type="text" id="n08M7vQ3rFbN"
# ### Set up some Cuda arrays and test for GPU
#
# Now the Julia environment should be ready to go. First, lets test to make sure the GPU is being utilized. You need to have CUDA available to use CuArrays.
#
# But before that, let's make sure everything is fine:
#
# -
[1, 2, 3]
# If this is a python kernel, it will be interpreted as list. But you should instead expect a Julia array and see:
#
# ```
# 3-element Array{Int64,1}:
# 1
# 2
# 3
# ```
# Now let's check that CUDA is fine reading which version is installed:
using CUDAdrv, CUDAnative
print(CUDAdrv.version())
print("\n")
print(CUDAnative.version())
# We can also see which hardware is allocated:
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="42zhTaxpZqC7" outputId="3b39a9fb-6cdb-4770-c557-2cc219ecffd6"
CUDAdrv.name(CuDevice(0))
# -
# Let's try a few simple array operations to check:
# + colab={"base_uri": "https://localhost:8080/", "height": 51} colab_type="code" id="5aV5UN_CuMq5" outputId="dd71d114-6140-43d9-9af9-71f2a0dda8db"
using CuArrays
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="DdlwETEvpb9k" outputId="874b5be1-e0d3-48c7-f280-054cfb4cd9a7"
W = cu(rand(2, 5)) # a 2×5 CuArray
b = cu(rand(2))
predict(x) = W*x .+ b
loss(x, y) = sum((predict(x) .- y).^2)
x, y = cu(rand(5)), cu(rand(2)) # Dummy data
loss(x, y) # ~ 3
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="LIbugAuFroLc" outputId="c8eab39c-7cb5-4128-b8e1-9379120be9c0"
function say(num)
"""print number using cuda threads"""
@cuprintf("Thread %ld says: %ld\n",
threadIdx().x, num)
return
end
# + colab={} colab_type="code" id="XMTg1KjZr1f4"
@cuda threads=8 say(42)
# + [markdown] colab_type="text" id="SVlkDI_jRuS2"
# Better example: Apply function to array
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="lVYthO5Hr4Nl" outputId="9c4b1657-5971-40ad-e7b1-b4e31ab17c6f"
a = CuArray([1., 2., 3.])
b = CuArray([3., -3., 6.])
function diff_sq(a, b)
f = (x, y) -> (x - y)^2
i = threadIdx().x
a[i] = f(a[i], b[i])
return
end
norm = x -> sqrt(sum(x))
@cuda threads = length(a) diff_sq(a, b)
norm(a)
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="W-BW4HVgqdi_" outputId="a340fc4e-3a75-40f4-b886-fec20a9a73cf"
f(x, y) = sum((x .- y).^2) |> gpu
# TODO: check if faster than the above
# + colab={} colab_type="code" id="tioMXu5DuQ7C"
using Flux: Conv, gpu
using BenchmarkTools
# + [markdown] colab_type="text" id="pw_hWS9MG7kO"
# ### Benchmark image convolution test
# + colab={"base_uri": "https://localhost:8080/", "height": 119} colab_type="code" id="vriNU_nUr6xg" outputId="c514da2e-59a1-418d-d907-4b827be3f6a9"
random_image_cpu = randn(100, 100, 3, 100)
random_image_gpu = cu(randn(100, 100, 3, 100))
m = Conv((7, 7), 3 => 64)
println("""Time (s) to convolve 7x7x3 filter over random 100x100x3x100 images
(width x height x channel x batch).""")
println("CPU (s):")
@btime m(random_image_cpu);
m = Conv((7, 7), 3 => 3) |> gpu
println("GPU (s):")
@btime m(random_image_gpu);
# + [markdown] colab_type="text" id="WBR8nefdCePe"
# TODO: Perform a benchmark of julia wrapper for tensorflow: [TensorFlow.jl](https://github.com/malmaud/TensorFlow.jl)
# + [markdown] colab_type="text" id="i32J1df0KLzN"
# ## 2. Use Case: Fashion MNIST
#
# + colab={"base_uri": "https://localhost:8080/", "height": 765} colab_type="code" id="HhLu9aGNI25B" outputId="9e4467f1-bb8d-4dcf-c4b8-0a7401873bde"
using MLDatasets
FashionMNIST.download(i_accept_the_terms_of_use=true)
# load full training set
train_x, train_y = FashionMNIST.traindata();
# load full test set
test_x, test_y = FashionMNIST.testdata();
# + colab={"base_uri": "https://localhost:8080/", "height": 587} colab_type="code" id="tuMdJsFLjMBU" outputId="7f352bc1-0444-48c6-8cda-8ceef7288b4b"
using ImageCore
FashionMNIST.convert2image(FashionMNIST.traintensor(4))
# + colab={"base_uri": "https://localhost:8080/", "height": 68} colab_type="code" id="6mTjYiUBjOQ-" outputId="084167eb-385e-4217-c6da-1c6b2e278371"
@doc(FashionMNIST.convert2image) # same as `? FashionMNIST.convert2image`
# + colab={"base_uri": "https://localhost:8080/", "height": 54} colab_type="code" id="OLaXDqZekPv9" outputId="3877918c-59d8-48bb-897a-9caf79c99671"
using Flux, Statistics
using Flux: onehotbatch, onecold, crossentropy, throttle, params
using Statistics: mean
using Base.Iterators: partition
using Random
Random.seed!(32)
# + colab={"base_uri": "https://localhost:8080/", "height": 68} colab_type="code" id="OZ1iyJltL8is" outputId="6922836c-8201-48ec-bd15-e8eaf5696c95"
@info("Constructing MLP model...")
model() = Chain(
Conv((5, 5), 1 => 64, elu, pad=(2, 2), stride=(1, 1)),
BatchNorm(64),
MaxPool((3, 3), pad=(2, 2), stride=(2, 2)),
Dropout(0.25),
Conv((5, 5), 64 => 128, elu, pad=(2, 2), stride=(1, 1)),
BatchNorm(128),
MaxPool((2, 2), stride=(2, 2)),
Dropout(0.25),
Conv((5, 5), 128 => 256, elu, pad=(2, 2), stride=(1, 1)),
BatchNorm(256),
MaxPool((2, 2), stride=(2, 2)),
Dropout(0.25),
x -> reshape(x, :, size(x, 4)),
Dense(2304, 256, elu),
Dropout(0.5),
Dense(256, 10),
softmax) |> gpu
# + colab={} colab_type="code" id="0FzGsjlGkrni"
N = size(train_x)[end]
# random permute train indexes
ixs = collect(1:N)
shuffle!(ixs)
n = Int(floor(.9 * N))
# batch size
bs = 100
sz = (28, 28, 1, bs)
# 90/10 hold out split
train_split, val_split = ixs[1:n], ixs[n + 1:end]
train_data = train_x[:, :, train_split], train_y[train_split]
val_data = train_x[:, :, val_split], train_y[val_split]
test_data = test_x, test_y
```
Make batches of x, y data
returns: list of tuples
```
function make_batches(data; bs=100)
n = size(data[1])[end]
sz = (28, 28, 1, bs)
iter = [(reshape(Float32.(data[1][:, :, i]), sz), onehotbatch(data[2][i], 0:9)) for i in partition(1:n, bs)] |> gpu
end
train = make_batches(train_data)
val = make_batches(val_data)
test = make_batches(test_data);
# + [markdown] colab_type="text" id="wVDk10Cc18yE"
# ### Testing the forward pass on a single image
# + colab={"base_uri": "https://localhost:8080/", "height": 54} colab_type="code" id="74SWEXwTH0yM" outputId="5b088f2e-68d1-4101-9360-35c86f55d23d"
m = model()
# + colab={"base_uri": "https://localhost:8080/", "height": 493} colab_type="code" id="L9zHE1IecSrk" outputId="20ad5c3a-31f1-4276-c83b-a1b0b91c518e"
img = reshape(train[1][1][:, :, :, 1], (28, 28, 1, 1))
# img = reshape(val[1][1][:, :, :, 1], (28, 28, 1, 1))
# + colab={"base_uri": "https://localhost:8080/", "height": 204} colab_type="code" id="fn4eY1ZpuyAn" outputId="a85e8c14-ed6b-42c3-c868-8dc1d6c4c070"
m(img)
# + [markdown] colab_type="text" id="tmpQzSPs59lY"
# ### Train on loss function for one Epoch
#
# Submitting a callback function can be used to interupt the execution of a training loop after each batch to compute test statistics.
#
#
# + colab={"base_uri": "https://localhost:8080/", "height": 119} colab_type="code" id="Q6m7Frq3uUkg" outputId="02d55028-ebac-456d-c735-340a83dacdd7"
eval_acc = []
batch_idx = 0
```
Defines accuracy metric to compute on data set
Pushes computed value to eval_acc array
Increments the batch index
```
function calc_metrics(data)
global batch_idx
acc = 0
for batch in data
x, y = batch
pred = m(x) .> 0.5
tp = Float32(sum((pred .+ y) .== Int16(2)))
fp = Float32(sum((pred .- y) .== Int16(1)))
fn = Float32(sum((pred .- y) .== Int16(-1)))
tn = Float32(sum((pred .+ y) .== Int16(0)))
acc += (tp + tn) / (tp + tn + fp + fn)
end
acc /= length(data)
push!(eval_acc, acc)
if batch_idx % 100 == 0
@show(batch_idx)
end
batch_idx += 1
end
# Define the loss, callback and optimizer
loss(x, y) = crossentropy(m(x), y)
evalcb = () -> calc_metrics(val)
opt = ADAM()
# Start model training
Flux.train!(loss, params(m), train, opt, cb = evalcb)
# + colab={"base_uri": "https://localhost:8080/", "height": 421} colab_type="code" id="YcGykDlh5GT0" outputId="f6dc1831-f8ab-489b-b872-a5dd807b8ff9"
using Plots
x = 1:length(train); y = eval_acc;
plot(x,y)
# + [markdown] colab_type="text" id="gwXLYNW54qid"
# ### Check the results (inference)
#
# Now that training is complete, let's see how well the model can predict on the test set...
# + colab={"base_uri": "https://localhost:8080/", "height": 34} colab_type="code" id="BuEkqB1Em68i" outputId="c2aee6f0-c3fa-4637-8071-b755f3429be6"
eval_acc = [] # need to reset to calc test accuracy. this could be done better
calc_metrics(test)
println("accuracy on test data: ", eval_acc[1])
| ML_Flux_Tutorial.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %matplotlib inline
from dask.distributed import Client
import numpy as np
import pandas as pd
import xarray as xr
import importlib
import ELMlib
importlib.reload(ELMlib)
# -
client = Client(n_workers=2, threads_per_worker=2, memory_limit='1GB')
client
#ds = xr.open_dataset('../Data/14C_spinup_holger_fire.2x2_small.nc')
from netCDF4 import Dataset
ds = Dataset('../Data/14C_spinup_holger_fire.2x2_small.nc')
#lat, lon = ds.coords['lat'], ds.coords['lon']
lat, lon = ds['lat'][:], ds['lon'][:]
lat_indices, lon_indices = np.meshgrid(
range(len(lat)),
range(len(lon)),
indexing='ij'
)
lats, lons = np.meshgrid(lat, lon, indexing='ij')
df_pd = pd.DataFrame({
'cell_nr': range(len(lat)*len(lon)),
'lat_index': lat_indices.flatten(),
'lon_index': lon_indices.flatten(),
'lat': lats.flatten(),
'lon': lons.flatten()
})
df_pd
import dask.dataframe as dask_df
df_dask = dask_df.from_pandas(df_pd, npartitions=4)
df_dask
parameter_set = ELMlib.load_parameter_set(
ds_filename = '../Data/14C_spinup_holger_fire.2x2_small.nc',
time_shift = -198*365,
nstep = 10
)
# +
def func(line):
#pass
location_dict = {
'cell_nr': int(line.cell_nr),
'lat_index': int(line.lat_index),
'lon_index': int(line.lon_index),
}
#print(line, flush=True)
cell_nr, log, xs_12C_data, us_12C_data, rs_12C_data= ELMlib.load_model_12C_data(parameter_set, location_dict)
#return (1, 'log', [1,2,3], [4,5,6], [7,8,9])
return cell_nr, log, xs_12C_data, us_12C_data, rs_12C_data
df_dask_2 = df_dask.apply(func, axis=1, meta=('A', 'object'))
# -
df_dask_2.compute()
type(df_dask_2)
df_dask_2
# +
#list(df_dask_2)
# -
pd.DataFrame(list(df_dask_2), columns=('cell_nr', 'log', 'xs_12C_data', 'us_12C_data', 'rs_12C_data'))
| notebooks/ELM_dask.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # How does the SWR vary along a line?
# Let's assume a 10 MHz source powering an antenna (of load $Z_L$) through a transmission line of length $L$. Depending on the location of the SWR-meter, what does one would read?
#
# This notebook is inspired from the reference: ["Facts About SWR, Reflected Power, and Power Transfer on Real Transmission Lines with Loss"](https://www.fars.k6ya.org/docs/Facts-about-SWR-and-Loss.pdf) by <NAME> given at ARRL Pacificon Antenna Seminar in 2010.
#
# 
#
# Let solve this question using `scikit-rf`. But first, the traditionnal Python imports:
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
import skrf as rf
rf.stylely()
# ## Lossless lines
# Let's start with a lossless line of propagation constant $\gamma=j\beta$ and characteristic impedance $z_0=50\Omega$ (real).
freq = rf.Frequency(10, unit='MHz', npoints=1)
# +
# load and line properties
Z_L = 75 # Ohm
Z_0 = 50 # Ohm
L = 50 # m
# propagation constant
beta = freq.w/rf.c
gamma = 1j*beta
# -
# Below we calculate the SWR of the line as a function of $z$ the line length measured from the load ($z=0$ at the load, $z=L$ at the source).
z = np.linspace(start=L, stop=0, num=301)
SWRs = rf.zl_2_swr(z0=Z_0, zl=rf.zl_2_zin(Z_0, Z_L, gamma*z))
fig, ax = plt.subplots()
ax.plot(z, SWRs, lw=2)
ax.set_xlabel('z [m]')
ax.set_ylabel('SWR')
ax.set_title('SWR along the (lossless) line')
ax.invert_xaxis()
ax.axvline(0, lw=8, color='k')
ax.axvline(L, lw=8, color='k')
ax.annotate('Load', xy=(0, 1.55), xytext=(10, 1.575),
arrowprops=dict(facecolor='black', shrink=0.05),
)
ax.annotate('Source', xy=(50, 1.55), xytext=(40, 1.575),
arrowprops=dict(facecolor='black', shrink=0.05),
)
# As expected, the SWR is the same everywhere along the line as the forward and backward wave amplitudes are also the same along the line.
# ## Lossy Lines
# Let's take the previous example but this time on a lossy line. The line is defined with a propagation constant $\gamma=\alpha + j\beta$ :
alpha = 0.01 # Np/m. Here a dummy value, just for the sake of the example
gamma = alpha + 1j*beta
z = np.linspace(0, L, num=101)
SWRs = rf.zl_2_swr(z0=Z_0, zl=rf.zl_2_zin(Z_0, Z_L, gamma*z))
fig, ax = plt.subplots()
ax.plot(z, SWRs, lw=2)
ax.set_xlabel('z [m]')
ax.set_ylabel('SWR')
ax.set_title('SWR along the (lossy) line')
ax.invert_xaxis()
ax.axvline(0, lw=8, color='k')
ax.axvline(L, lw=8, color='k')
ax.annotate('Load', xy=(0, 1.15), xytext=(10, 1.2),
arrowprops=dict(facecolor='black', shrink=0.05),
)
ax.annotate('Source', xy=(50, 1.4), xytext=(40, 1.5),
arrowprops=dict(facecolor='black', shrink=0.05),
)
# For a lossy line, the SWR is maximum at the load and decreases to be minimum at the source side.
#
# Let's see how the impedance varies along the line:
# +
Zins = rf.zl_2_zin(Z_0, Z_L, gamma*z)
fig, ax = plt.subplots()
ax.plot(z, np.abs(Zins/Z_0), lw=2, label='$Z/z_0$')
ax.plot(z, SWRs, lw=2, ls='--', label=r'$SWR$')
ax.plot(z, 1/SWRs, lw=2, ls='--', label=r'$1/SWR$')
ax.set_xlabel('z [m]')
ax.set_ylabel('Z (normalized to $z_0$)')
ax.set_title('Impedance along the line')
ax.invert_xaxis()
ax.axvline(0, lw=8, color='k')
ax.axvline(L, lw=8, color='k')
ax.annotate('Load', xy=(0, 1.2), xytext=(10, 1.275),
arrowprops=dict(facecolor='black', shrink=0.05),
)
ax.annotate('Source', xy=(50, 0.6), xytext=(40, 0.7),
arrowprops=dict(facecolor='black', shrink=0.05),
)
ax.legend()
# -
# The previous result is due to the fact that voltages and currents vary along the transmission line:
V_s = 1
Z_in = rf.zl_2_zin(Z_0, Z_L, gamma*z)
# Z_s = Z_0
V_in = V_s * Z_in/(Z_0 + Z_in)
I_in = V_in/(Z_0 + Z_in)
# note that here we are going from source to load
V, I = rf.voltage_current_propagation(V_in, I_in, Z_0, gamma*z)
fig, ax = plt.subplots(2,1,sharex=True)
ax[0].plot(z, np.abs(V), lw=2)
ax[1].plot(z, np.abs(I), lw=2, color='C1')
ax[1].set_xlabel('z [m]')
ax[0].set_ylabel('|V| (V)')
ax[1].set_ylabel('|I| (A)')
ax[0].set_title('Voltage')
ax[1].set_title('Current')
[a.axvline(0, lw=8, color='k') for a in ax]
[a.axvline(L, lw=8, color='k') for a in ax]
ax[1].annotate('Load', xy=(50, 0.0075), xytext=(40, 0.0075),
arrowprops=dict(facecolor='black', shrink=0.05))
ax[1].annotate('Source', xy=(0, 0.001), xytext=(10, 0.001),
arrowprops=dict(facecolor='black', shrink=0.05))
| doc/source/examples/networktheory/Transmission Lines and SWR.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # PyGraphistry Example: Graphing the Marvel Universe
# ### Plots hero social network based on co-appearences between heroes
# **Install: `pip install "graphistry[igraph]"`**
#
# Note: `pip install igraph` is the wrong package. if installing manually, use `python-igraph`
#
# * Uses pandas, igraph, and PyGraphistry
# * Combines comic book and hero data
# * Near end, computes clusters and to avoid a hairball, weakens the edge weights between nodes of different clusters
#
# +
from __future__ import print_function
from io import open
import pandas as pd
import igraph # Install Igraph with pip install python-igraph
import graphistry
# To specify Graphistry account & server, use:
# graphistry.register(api=3, username='...', password='...', protocol='https', server='hub.graphistry.com')
# For more options, see https://github.com/graphistry/pygraphistry#configure
# -
# # Load heroes, comics, appearences
with open('../../data/characters.txt', encoding="latin-1") as f:
lines = f.readlines()
heroes = pd.DataFrame(
list(map(lambda x: (int(x.split(':')[0].split(' ')[1]), x.split(': ', 1)[1].split('\n')[0]), lines)),
columns=['hero_id', 'hero_name'])
print('#Heroes:', len(heroes))
heroes[:3]
with open('../../data/comics.txt', encoding="latin-1") as f:
lines = f.readlines()
comics = pd.DataFrame(
list(map(lambda x: (int(x.split(':')[0].split(' ')[1]), x.split(': ', 1)[1].split('\n')[0]), lines)),
columns=['comic_id', 'comic_name'])
print('#Comics: ', len(comics))
comics[:3]
with open('../../data/appearances.txt', encoding="latin-1") as f:
lines = f.readlines()[len(heroes) + len(comics) + 2:]
def expand (line):
parts = list(map(int, line.split(' ')))
return [(parts[0], role) for role in parts[1:]]
appearences = pd.DataFrame(
[item for sublist in list(map(expand, lines)) for item in sublist],
columns=['hero', 'comic'])
appearences[:3]
# #Link heroes who co-appear
# You may need to install numexpr: pip install numexpr
coappearences = \
appearences\
.merge(appearences, on='comic')\
.merge(comics, left_on='comic', right_on='comic_id')\
[['hero_x', 'hero_y']]\
.query('hero_x > hero_y')
unique_coappearences = coappearences.drop_duplicates(['hero_x', 'hero_y']).set_index(['hero_x', 'hero_y'])
unique_coappearences['counts'] = coappearences.groupby(['hero_x', 'hero_y']).size()
unique_coappearences = unique_coappearences.reset_index()
print('#edges', len(unique_coappearences))
unique_coappearences[:3]
# # Plot!
g = graphistry.bind(source='hero_x', destination='hero_y', edge_title='counts')
g.plot(unique_coappearences)
# # Label Nodes
# Here we are using two dataframes, one for edges and one for nodes
g2 = g.bind(node='hero_id', point_title='hero_name')
g2.plot(unique_coappearences, heroes)
# # Color using igraph infomap
# ### Infomap Community Detection
#Warning: slow
ig = g2.pandas2igraph(unique_coappearences, directed=False)
clusters = ig.community_infomap()
(i_edges, i_nodes) = g2.igraph2pandas(ig)
print('#clusters', str(len(list(set(clusters.membership)))))
nodes_colored = pd.DataFrame({'cluster': clusters.membership})\
.reset_index().rename(columns={'index': 'denseid'})\
.merge(i_nodes.reset_index().rename(columns={'index':'denseid'}), on='denseid')\
.merge(heroes, left_on='hero_id', right_on='hero_id')
print('#colored nodes', str(len(nodes_colored)))
nodes_colored[:3]
nodes_colored['color'] = nodes_colored.apply(lambda x: x['cluster'] % 9, axis=1)
nodes_colored.pivot_table(index=['color'], aggfunc=lambda x: len(x.unique()))
g3 = g2.bind(point_color='color', edge_weight='counts')
g3.plot(unique_coappearences, nodes_colored)
# # Restrict to biggest communities
big_clusters = nodes_colored\
.pivot_table(index=['cluster'], aggfunc=lambda x: len(x.unique()))\
.rename(columns={'hero_id': 'cluster_size'})\
.query('cluster_size > 100')\
.reset_index()[['cluster', 'cluster_size']]
print('# big clusters', len(big_clusters))
big_clusters[:3]
good_nodes = nodes_colored.merge(big_clusters, on='cluster')
print('# nodes', len(good_nodes))
good_nodes[:3]
good_edges = unique_coappearences\
.merge(good_nodes, left_on='hero_x', right_on='hero_id')\
.merge(good_nodes, left_on='hero_y', right_on='hero_id')\
[['hero_x', 'hero_y', 'counts']]
print('# edges', len(good_edges))
good_edges[:3]
g3.plot(good_edges, good_nodes)
# # Seperate communities
# ### Treat intra-community edges as strong edge weights, and inter-community as weak edge weight
# +
#label edges whether they stay inside a cluster or connect nodes in different clusters
good_edges2 = good_edges\
.merge(\
good_nodes[['cluster', 'hero_id']].rename(columns={'cluster': 'cluster_x'}),\
left_on='hero_x', right_on='hero_id')\
.merge(\
good_nodes[['cluster', 'hero_id']].rename(columns={'cluster': 'cluster_y'}),\
left_on='hero_y', right_on='hero_id')
good_edges2['is_inner'] = good_edges2.apply(lambda x: x['cluster_x'] == x['cluster_y'], axis=1)
#bind to edge_weight
good_edges2['weight'] = good_edges2.apply(lambda x: 10 if x['is_inner'] else 8, axis=1)
good_edges2 = good_edges2[['hero_x', 'hero_y', 'counts', 'is_inner', 'weight']]
good_edges2[:3]
# -
# ### Plot; control the edge weight in the settings panel
g3.bind(edge_weight='weight').plot(good_edges2, good_nodes)
# # Filter by k-core shell
shells = ig.shell_index()
print('#shells', str(len(list(set(shells)))))
nodes_shelled = pd.DataFrame({'shell': shells})\
.reset_index().rename(columns={'index': 'denseid'})\
.merge(nodes_colored, on='denseid')
print('#shelled nodes', str(len(nodes_shelled)))
nodes_shelled[:3]
# ### Plot: Use the histogram tool to filter for the smaller shells
g3.plot(unique_coappearences, nodes_shelled)
| demos/more_examples/simple/MarvelTutorial.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2 (Proactive)
# language: python
# name: proactive
# ---
# %load_ext memory_profiler
# %memit
# +
# %load_ext autoreload
# %autoreload 2
# %matplotlib inline
# Set below to 0 or 1 for GPU
# %env CUDA_VISIBLE_DEVICES=1
from __future__ import absolute_import, division, print_function
import json
import os
if os.path.isfile("../code/config.json"):
with open("../code/config.json", "r") as f:
config = json.load(f)
else:
print("Please run setup.py in this directory before running any .ipynb's.")
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from glob import glob
import tensorflow as tf
import time
import shutil
import cPickle as pickle
from collections import OrderedDict, defaultdict
import sys
sys.path.append("../code")
from utils.bags import *
from utils.learning import *
from multimodal_generative_model import *
from st_graph import *
from data_utils import *
from stg_node import *
from experiment_details import NUM_DATAFILES, ROWS_TO_EXTRACT, EDGE_STATE_COMBINE_METHOD, EDGE_RADIUS
config
# +
# cvd = %env CUDA_VISIBLE_DEVICES
model_dir = os.path.join(config["models_dir"], ("GPU" + cvd if cvd else "CPU") + "_checkpoints")
# shutil.rmtree(model_dir, ignore_errors=True)
sc = tf.ConfigProto(device_count={'GPU': 1}, allow_soft_placement=True, log_device_placement=False)
rc = tf.estimator.RunConfig().replace(session_config=sc, model_dir=model_dir,
save_summary_steps=100, save_checkpoints_steps=5000,
keep_checkpoint_max=5000, tf_random_seed=None)
rc.environment = None # required due to a bug in tf.contrib.learn.Experiment.train_and_evaluate
model_dir
# -
# Edit model hyperparameters if desired, e.g.:
# hps.N = 2
# hps.K = 5
hps.values()
# %memit
# # Data Loading
# +
NUM_DATAFILES = 2
ROWS_TO_EXTRACT = 100
robot_node = STGNode('<NAME>', 'HomeC')
positions_map_path = "data/positions_map.pkl"
pos_dict_path = "data/pos_dict_%d_files_%s_rows.pkl" % (NUM_DATAFILES, str(ROWS_TO_EXTRACT))
if os.path.isfile(pos_dict_path):
with open(pos_dict_path, 'rb') as f:
pos_dict = pickle.load(f)
else:
pos_dict = get_pos_dict(train_files,
positions_map_path=positions_map_path,
rows_to_extract=ROWS_TO_EXTRACT)
with open(pos_dict_path, 'wb') as f:
pickle.dump(pos_dict, f, protocol=pickle.HIGHEST_PROTOCOL)
STG = SpatioTemporalGraphCVAE(pos_dict, robot_node,
edge_radius=EDGE_RADIUS,
edge_state_combine_method=EDGE_STATE_COMBINE_METHOD)
train_data_dict_path = "data/train_data_dict_%d_files_%s_rows.pkl" % (NUM_DATAFILES, ROWS_TO_EXTRACT)
if os.path.isfile(train_data_dict_path):
with open(train_data_dict_path, 'rb') as f:
train_data_dict = pickle.load(f)
else:
train_data_dict = get_data_dict(train_files,
positions_map_path=positions_map_path,
rows_to_extract=ROWS_TO_EXTRACT)
with open(train_data_dict_path, 'wb') as f:
pickle.dump(train_data_dict, f, protocol=pickle.HIGHEST_PROTOCOL)
hps.add_hparam("nodes_standardization", train_data_dict["nodes_standardization"])
hps.add_hparam("extras_standardization", {"mean": train_data_dict["extras_mean"],
"std": train_data_dict["extras_std"]})
hps.add_hparam("labels_standardization", train_data_dict["labels_standardization"])
hps.add_hparam("pred_indices", train_data_dict["pred_indices"])
eval_data_dict_path = "data/eval_data_dict_%d_files_%s_rows.pkl" % (NUM_DATAFILES, str(ROWS_TO_EXTRACT))
if os.path.isfile(eval_data_dict_path):
with open(eval_data_dict_path, 'rb') as f:
eval_data_dict = pickle.load(f)
else:
eval_data_dict = get_data_dict(eval_files, positions_map_path=positions_map_path)
with open(eval_data_dict_path, 'wb') as f:
pickle.dump(eval_data_dict, f, protocol=pickle.HIGHEST_PROTOCOL)
train_input_function = tf.estimator.inputs.numpy_input_fn(train_data_dict["input_dict"],
y = train_data_dict["labels"],
batch_size = hps.batch_size,
num_epochs = None,
shuffle = True)
# Need all possible nodes to have been seen by the STG above, does
# that mean we feed in the all_files pos_dict in order to create
# the nodes ahead of time?
token_eval_node = None
token_eval_label_node = None
for node in eval_data_dict["input_dict"]:
if isinstance(node, STGNode):
token_eval_node = node
token_eval_label_node = convert_to_label_node(node)
break
for node in train_data_dict["input_dict"]:
if isinstance(node, STGNode):
if node not in eval_data_dict["input_dict"]:
eval_data_dict["input_dict"][node] = np.zeros_like(eval_data_dict["input_dict"][token_eval_node])
eval_data_dict["labels"][convert_to_label_node(node)] = np.zeros_like(eval_data_dict["labels"][token_eval_label_node])
eval_input_function = tf.estimator.inputs.numpy_input_fn(eval_data_dict["input_dict"],
y = eval_data_dict["labels"],
batch_size = 4,
num_epochs = 1,
shuffle = False)
# -
train_data_dict["input_dict"][robot_node].shape
print(len(eval_data_dict['input_dict']))
print(len(eval_data_dict['labels']))
STG.node_edges_and_neighbors
# %memit
# # Training
# +
# JUST RUN 'cd memory_usage_info; python training_memory_usage.py'
# -
| nba-dataset/P1. Model Creation and Training.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Accuracy of Newton-Cotes
#
# Copyright (C) 2020 <NAME>
#
# <details>
# <summary>MIT License</summary>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# </details>
# + jupyter={"outputs_hidden": false}
import numpy as np
import numpy.linalg as la
import matplotlib.pyplot as pt
# -
# A function to make Vandermonde matrices:
#
# (Note that the ordering of this matrix matches the convention in our class but *disagrees* with `np.vander`.)
def vander(nodes, ncolumns=None):
if ncolumns is None:
ncolumns = len(nodes)
result = np.empty((len(nodes), ncolumns))
for i in range(ncolumns):
result[:, i] = nodes**i
return result
# Fix a set of nodes:
# nodes = [0.5] # Midpoint
# nodes = [0]
#nodes = [0, 1] # Trapezoidal
nodes = [0, 0.5, 1] # Simpson's
#nodes = [0, 1/3, 1]
# Find the weights for the Newton-Cotes rule for the given nodes on $[0,1]$:
# + jupyter={"outputs_hidden": false}
(a, b) = (0, 1)
nodes = np.array(nodes)
n = len(nodes)
degs = np.arange(n)
rhs = 1/(degs+1)*(b**(degs+1 - a**(degs+1)))
weights = la.solve(vander(nodes).T, rhs)
print(weights)
# -
# Here is a function and its definite integral from $0$ to $x$:
#
# $$\text{int_f}(x)=\int_0^x f(\xi)d\xi$$
# + jupyter={"outputs_hidden": false}
fdeg = 9
def f(x):
return sum((-x)**i for i in range(fdeg + 1))
def int_f(x):
return sum(
(-1)**i*1/(i+1)*(
(x)**(i+1)-0**(i+1)
)
for i in range(fdeg + 1))
# -
# Plotted:
# + jupyter={"outputs_hidden": false}
plot_x = np.linspace(0, 1, 200)
pt.plot(plot_x, f(plot_x), label="f")
pt.fill_between(plot_x, 0*plot_x, f(plot_x),alpha=0.3)
pt.plot(plot_x, int_f(plot_x), label="$\int f$")
pt.grid()
pt.legend(loc="best")
# -
# This here plots the function, the interpolant, and the area under the interpolant:
# + jupyter={"outputs_hidden": false}
# fix nodes
h = 1
x = nodes * h
# find interpolant
coeffs = la.solve(vander(x), f(x))
# evaluate interpolant
plot_x = np.linspace(0, h, 200)
interpolant = vander(plot_x, len(coeffs)) @ coeffs
# plot
pt.plot(plot_x, f(plot_x), label="f")
pt.plot(plot_x, interpolant, label="Interpolant")
pt.fill_between(plot_x, 0*plot_x, interpolant, alpha=0.3, color="green")
pt.plot(x, f(x), "og")
pt.grid()
pt.legend(loc="best")
# -
# Compute the following:
#
# * The true integral as `true_val` (from `int_f`)
# * The quadrature result as `quad` (using `x` and `weights` and `h`)
# * The error as `err` (the difference of the two)
#
# (Do not be tempted to compute a relative error--that has one order lower.)
#
# Compare the error for $h=1,0.5,0.25$. What order of accuracy do you observe?
# + jupyter={"outputs_hidden": false}
#clear
errors = []
for h in [1, 0.5, 0.25, 0.125, 0.125*0.5]:
true_val = int_f(h)
quad = h * weights @ f(h * nodes)
error = abs(quad - true_val)
print(h, true_val, quad, error)
errors.append(error)
# -
# Estimate the order of accuracy:
#
# We assume that the error depends on the mesh spacings $h$ as
# $E(h)\approx C h^p$ for some unknown power $p$. Taking the $\log$
# of this approximate equality reveals a linear function in $p$:
# $$
# E(h) \approx C h^p \quad \iff \quad \log E(h) \approx \log(C) +
# p\log(h).
# $$
# You can now either do a least-squares fit for $\log C$ and $p$ from
# a few data points $(h,E(h))$ (more accurate, more robust), or you
# can use just two grid sizes $h_1$ and $h_2$, and estimate the slope:
# (less accurate, less robust)
# $$
# p \approx \frac{ \log(\frac{E(h_2)}{E(h_1)}) } {\log(\frac{h_2}{h_1})}.
# $$
# This is called the *empirical order of convergence* or EOC.
#
#
# + jupyter={"outputs_hidden": false}
for i in range(len(errors)-1):
print(np.log(errors[i+1]/errors[i])/np.log(1/2))
# -
| demos/quadrature_and_diff/Accuracy of Newton-Cotes.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: medium_articles
# language: python
# name: medium_articles
# ---
# +
import logging
import os
import pathlib
import pickle
import warnings
from pathlib import Path
import catboost as cb
import joblib
import lightgbm as lgb
import matplotlib.pyplot as plt
import mlflow
import numpy as np
import pandas as pd
import seaborn as sns
import tensorflow as tf
import tf.keras as keras
import xgboost as xgb
from dagshub import DAGsHubLogger
from sklearn.compose import *
from sklearn.dummy import DummyRegressor
from sklearn.ensemble import *
from sklearn.metrics import mean_squared_error
from sklearn.model_selection import *
from sklearn.pipeline import *
from sklearn.preprocessing import *
from sklearn.tree import *
warnings.filterwarnings("ignore")
logging.basicConfig(
format="%(asctime)s - %(message)s", datefmt="%d-%b-%y %H:%M:%S", level=logging.INFO
)
SEED = 1121218
# -
logger = DAGsHubLogger(
metrics_path="../metrics/metrics.csv", hparams_path="../metrics/params.yml"
)
mlflow.set_tracking_uri("https://dagshub.com/BexTuychiev/pet_pawpularity.mlflow")
def get_metadata(random_state=SEED):
train_df = pd.read_csv("../data/raw/train.csv").drop(["Id"], axis=1)
train, test = train_test_split(train_df, random_state=random_state, test_size=0.1)
x_train, y_train = train.drop("Pawpularity", axis=1), train[["Pawpularity"]]
x_test, y_test = test.drop("Pawpularity", axis=1), test[["Pawpularity"]]
return (x_train, y_train), (x_test, y_test)
import dagshub
pd.read_csv("../data/raw/train.csv").shape
| notebooks/doodle.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [default]
# language: python
# name: python3
# ---
# + [markdown] deletable=true editable=true
# ## Data input for BIDS datasets
# `DataGrabber` and `SelectFiles` are great if you are dealing with generic datasets with arbitrary organization. However, if you have decided to use Brain Imaging Data Structure (BIDS) to organize your data (or got your hands on a BIDS dataset) you can take advantage of a formal structure BIDS imposes. In this short tutorial, you will learn how to do this.
# + [markdown] deletable=true editable=true
# ## `pybids` - a Python API for working with BIDS datasets
# `pybids` is a lightweight python API for querying BIDS folder structure for specific files and metadata. You can install it from PyPi:
# ```
# pip install pybids
# ```
# Please note it should be already installed in the tutorial Docker image.
# + [markdown] deletable=true editable=true
# ## The `layout` object and simple queries
# To begin working with pybids we need to initialize a layout object. We will need it to do all of our queries
# + deletable=true editable=true
from bids.layout import BIDSLayout
layout = BIDSLayout("/data/ds000114/")
# + deletable=true editable=true
# !tree -L 4 /data/ds000114/
# + [markdown] deletable=true editable=true
# Let's figure out what are the subject labels in this dataset
# + deletable=true editable=true
layout.get_subjects()
# + [markdown] deletable=true editable=true
# What modalities are included in this dataset?
# + deletable=true editable=true
layout.get_modalities()
# + [markdown] deletable=true editable=true
# Which different data types are included in this dataset?
# + deletable=true editable=true
layout.get_types(modality='func')
# + [markdown] deletable=true editable=true
# What are the different tasks included in this dataset?
# + deletable=true editable=true
layout.get_tasks()
# + [markdown] deletable=true editable=true
# We can also ask for all of the data for a particular subject and one modality.
# + deletable=true editable=true
layout.get(subject='01', modality="anat", session="test")
# + [markdown] deletable=true editable=true
# We can also ask for a specific subset of data. Note that we are using extension filter to get just the imaging data (BIDS allows both .nii and .nii.gz so we need to include both).
# + deletable=true editable=true
layout.get(subject='01', type='bold', extensions=['nii', 'nii.gz'])
# + [markdown] deletable=true editable=true
# You probably noticed that this method does not only return the file paths, but objects with relevant query fields. We can easily extract just the file paths.
# + deletable=true editable=true
layout.get(subject='01', type='bold', extensions=['nii', 'nii.gz'], return_type='file')
# + [markdown] deletable=true editable=true
# ### Exercise 1:
# List all files for the "linebisection" task for subject 02.
# + deletable=true editable=true solution2="shown" solution2_first=true
#write your solution here
# + deletable=true editable=true solution2="shown"
from bids.layout import BIDSLayout
layout = BIDSLayout("/data/ds000114/")
layout.get(subject='02', return_type='file', task="linebisection")
# + [markdown] deletable=true editable=true
# ## `BIDSDataGrabber`: Including `pybids` in your `nipype` workflow
# This is great, but what we really want is to include this into our nipype workflows. To do this, we can import `BIDSDataGrabber`, which provides an `Interface` for `BIDSLayout.get`
# + deletable=true editable=true
from nipype.interfaces.io import BIDSDataGrabber
from nipype.pipeline import Node, MapNode, Workflow
from nipype.interfaces.utility import Function
bg = Node(BIDSDataGrabber(), name='bids-grabber')
bg.inputs.base_dir = '/data/ds000114'
# + [markdown] deletable=true editable=true
# You can define static filters, that will apply to all queries, by modifying the appropriate input
# + deletable=true editable=true
bg.inputs.subject = '01'
res = bg.run()
res.outputs
# + [markdown] deletable=true editable=true
# Note that by default `BIDSDataGrabber` will fetch `nifti` files matching modality `func` and `anat`, and output them as two output fields.
#
# To define custom fields, simply define the arguments to pass to `BIDSLayout.get` as dictionary, like so:
# + deletable=true editable=true
bg.inputs.output_query = {'bolds': dict(type='bold')}
res = bg.run()
res.outputs
# + [markdown] deletable=true editable=true
# This results in a single output field `bold`, which returns all files with `type:bold` for `subject:"01"`
#
# Now, lets put it in a workflow. We are not going to analyze any data, but for demonstration purposes, we will add a couple of nodes that pretend to analyze their inputs
# + deletable=true editable=true
def printMe(paths):
print("\n\nanalyzing " + str(paths) + "\n\n")
analyzeBOLD = Node(Function(function=printMe, input_names=["paths"],
output_names=[]), name="analyzeBOLD")
# + deletable=true editable=true
wf = Workflow(name="bids_demo")
wf.connect(bg, "bolds", analyzeBOLD, "paths")
wf.run()
# + [markdown] deletable=true editable=true
# ### Exercise 2:
# Modify the `BIDSDataGrabber` and the workflow to collect T1ws images for subject `10`.
# + deletable=true editable=true solution2="shown" solution2_first=true
# write your solution here
# + deletable=true editable=true solution2="shown"
from nipype.pipeline import Node, MapNode, Workflow
from nipype.interfaces.io import BIDSDataGrabber
ex2_BIDSDataGrabber = BIDSDataGrabber()
ex2_BIDSDataGrabber.inputs.base_dir = '/data/ds000114'
ex2_BIDSDataGrabber.inputs.subject = '10'
ex2_BIDSDataGrabber.inputs.output_query = {'T1w': dict(modality='anat')}
ex2_res = ex2_BIDSDataGrabber.run()
ex2_res.outputs
# + [markdown] deletable=true editable=true
# ## Iterating over subject labels
# In the previous example, we demonstrated how to use `pybids` to "analyze" one subject. How can we scale it for all subjects? Easy - using `iterables` (more in [Iteration/Iterables](basic_iteration.ipynb)).
# + deletable=true editable=true
bg_all = Node(BIDSDataGrabber(), name='bids-grabber')
bg_all.inputs.base_dir = '/data/ds000114'
bg_all.inputs.output_query = {'bolds': dict(type='bold')}
bg_all.iterables = ('subject', layout.get_subjects()[:2])
wf = Workflow(name="bids_demo")
wf.connect(bg_all, "bolds", analyzeBOLD, "paths")
wf.run()
# + [markdown] deletable=true editable=true
# ## Accessing additional metadata
# Querying different files is nice, but sometimes you want to access more metadata. For example `RepetitionTime`. `pybids` can help with that as well
# + deletable=true editable=true
layout.get_metadata('/data/ds000114/sub-01/ses-test/func/sub-01_ses-test_task-fingerfootlips_bold.nii.gz')
# + [markdown] deletable=true editable=true
# Can we incorporate this into our pipeline? Yes, we can! To do so, let's use a `Function` node to use `BIDSLayout` in a custom way.
# (More about MapNode in [MapNode](basic_mapnodes.ipynb))
# + deletable=true editable=true
def printMetadata(path, data_dir):
from bids.layout import BIDSLayout
layout = BIDSLayout(data_dir)
print("\n\nanalyzing " + path + "\nTR: "+ str(layout.get_metadata(path)["RepetitionTime"]) + "\n\n")
analyzeBOLD2 = MapNode(Function(function=printMetadata, input_names=["path", "data_dir"],
output_names=[]), name="analyzeBOLD2", iterfield="path")
analyzeBOLD2.inputs.data_dir = "/data/ds000114/"
# + deletable=true editable=true
wf = Workflow(name="bids_demo")
wf.connect(bg, "bolds", analyzeBOLD2, "path")
wf.run()
# + [markdown] deletable=true editable=true
# ### Exercise 3:
# Modify the `printMetadata` function to also print `EchoTime`
# + deletable=true editable=true solution2="shown" solution2_first=true
# write your solution here
# + deletable=true editable=true solution2="shown"
from nipype.pipeline import Node, MapNode, Workflow
from nipype.interfaces.io import BIDSDataGrabber
ex3_BIDSDataGrabber = Node(BIDSDataGrabber(), name='bids-grabber')
ex3_BIDSDataGrabber.inputs.base_dir = '/data/ds000114'
ex3_BIDSDataGrabber.inputs.subject = '01'
ex3_BIDSDataGrabber.inputs.output_query = {'bolds': dict(type='bold')}
# + deletable=true editable=true solution2="shown"
# and now modify analyzeBOLD2
def printMetadata_et(path, data_dir):
from bids.layout import BIDSLayout
layout = BIDSLayout(data_dir)
print("\n\nanalyzing " + path + "\nTR: "+
str(layout.get_metadata(path)["RepetitionTime"]) +
"\nET: "+ str(layout.get_metadata(path)["EchoTime"])+ "\n\n")
ex3_analyzeBOLD2 = MapNode(Function(function=printMetadata_et,
input_names=["path", "data_dir"],
output_names=[]),
name="ex3", iterfield="path")
ex3_analyzeBOLD2.inputs.data_dir = "/data/ds000114/"
# and create a new workflow
ex3_wf = Workflow(name="ex3")
ex3_wf.connect(ex3_BIDSDataGrabber, "bolds", ex3_analyzeBOLD2, "path")
ex3_wf.run()
| notebooks/basic_data_input_bids.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8.5 64-bit
# metadata:
# interpreter:
# hash: 2db524e06e9f5f4ffedc911c917cb75e12dbc923643829bf417064a77eb14d37
# name: python3
# ---
# +
import pickle
import numpy
import music21
import tensorflow as tf
# -
def generate():
""" Generate a piano midi file """
#load the notes used to train the model
with open('data/notes', 'rb') as filepath:
notes = pickle.load(filepath)
# Get all pitch names
pitchnames = sorted(set(item for item in notes))
# Get all pitch names
n_vocab = len(set(notes))
network_input, normalized_input = prepare_sequences(notes, pitchnames, n_vocab)
model = create_network(normalized_input, n_vocab)
prediction_output = generate_notes(model, network_input, pitchnames, n_vocab)
create_midi(prediction_output)
def prepare_sequences(notes, pitchnames, n_vocab):
""" Prepare the sequences used by the Neural Network """
# map between notes and integers and back
note_to_int = dict((note, number) for number, note in enumerate(pitchnames))
sequence_length = 100
network_input = []
output = []
for i in range(0, len(notes) - sequence_length, 1):
sequence_in = notes[i:i + sequence_length]
sequence_out = notes[i + sequence_length]
network_input.append([note_to_int[char] for char in sequence_in])
output.append(note_to_int[sequence_out])
n_patterns = len(network_input)
# reshape the input into a format compatible with LSTM layers
normalized_input = numpy.reshape(network_input, (n_patterns, sequence_length, 1))
# normalize input
normalized_input = normalized_input / float(n_vocab)
return (network_input, normalized_input)
def create_network(network_input, n_vocab):
""" create the structure of the neural network """
model = Sequential()
model.add(LSTM(
512,
input_shape=(network_input.shape[1], network_input.shape[2]),
recurrent_dropout=0.3,
return_sequences=True
))
model.add(LSTM(512, return_sequences=True, recurrent_dropout=0.3,))
model.add(LSTM(512))
model.add(BatchNorm())
model.add(Dropout(0.3))
model.add(Dense(256))
model.add(Activation('relu'))
model.add(BatchNorm())
model.add(Dropout(0.3))
model.add(Dense(n_vocab))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
# Load the weights to each node
model.load_weights('weights.hdf5')
return model
def generate_notes(model, network_input, pitchnames, n_vocab):
""" Generate notes from the neural network based on a sequence of notes """
# pick a random sequence from the input as a starting point for the prediction
start = numpy.random.randint(0, len(network_input)-1)
int_to_note = dict((number, note) for number, note in enumerate(pitchnames))
pattern = network_input[start]
prediction_output = []
# generate 500 notes
for note_index in range(500):
prediction_input = numpy.reshape(pattern, (1, len(pattern), 1))
prediction_input = prediction_input / float(n_vocab)
prediction = model.predict(prediction_input, verbose=0)
index = numpy.argmax(prediction)
result = int_to_note[index]
prediction_output.append(result)
pattern.append(index)
pattern = pattern[1:len(pattern)]
return prediction_output
def create_midi(prediction_output):
""" convert the output from the prediction to notes and create a midi file
from the notes """
offset = 0
output_notes = []
# create note and chord objects based on the values generated by the model
for pattern in prediction_output:
# pattern is a chord
if ('.' in pattern) or pattern.isdigit():
notes_in_chord = pattern.split('.')
notes = []
for current_note in notes_in_chord:
new_note = note.Note(int(current_note))
new_note.storedInstrument = instrument.Piano()
notes.append(new_note)
new_chord = chord.Chord(notes)
new_chord.offset = offset
output_notes.append(new_chord)
# pattern is a note
else:
new_note = note.Note(pattern)
new_note.offset = offset
new_note.storedInstrument = instrument.Piano()
output_notes.append(new_note)
# increase offset each iteration so that notes do not stack
offset += 0.5
midi_stream = stream.Stream(output_notes)
midi_stream.write('midi', fp='test_output.mid')
if __name__ == '__main__':
generate()
| src/app/test.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Tracking an Unknown Number of Objects
#
# While SVI can be used to learn components and assignments of a mixture model, pyro.contrib.tracking provides more efficient inference algorithms to estimate assignments. This notebook demonstrates how to use the `MarginalAssignmentPersistent` inside SVI.
# +
import math
import os
import torch
from torch.distributions import constraints
from matplotlib import pyplot
import pyro
import pyro.distributions as dist
import pyro.poutine as poutine
from pyro.contrib.tracking.assignment import MarginalAssignmentPersistent
from pyro.distributions.util import gather
from pyro.infer import SVI, TraceEnum_ELBO
from pyro.optim import Adam
# %matplotlib inline
assert pyro.__version__.startswith('0.3.4')
pyro.enable_validation(True)
smoke_test = ('CI' in os.environ)
# -
# Let's consider a model with deterministic dynamics, say sinusoids with known period but unknown phase and amplitude.
def get_dynamics(num_frames):
time = torch.arange(float(num_frames)) / 4
return torch.stack([time.cos(), time.sin()], -1)
# It's tricky to define a fully generative model, so instead we'll separate our data generation process `generate_data()` from a factor graph `model()` that will be used in inference.
def generate_data(args):
# Object model.
num_objects = int(round(args.expected_num_objects)) # Deterministic.
states = dist.Normal(0., 1.).sample((num_objects, 2))
# Detection model.
emitted = dist.Bernoulli(args.emission_prob).sample((args.num_frames, num_objects))
num_spurious = dist.Poisson(args.expected_num_spurious).sample((args.num_frames,))
max_num_detections = int((num_spurious + emitted.sum(-1)).max())
observations = torch.zeros(args.num_frames, max_num_detections, 1+1) # position+confidence
positions = get_dynamics(args.num_frames).mm(states.t())
noisy_positions = dist.Normal(positions, args.emission_noise_scale).sample()
for t in range(args.num_frames):
j = 0
for i, e in enumerate(emitted[t]):
if e:
observations[t, j, 0] = noisy_positions[t, i]
observations[t, j, 1] = 1
j += 1
n = int(num_spurious[t])
if n:
observations[t, j:j+n, 0] = dist.Normal(0., 1.).sample((n,))
observations[t, j:j+n, 1] = 1
return states, positions, observations
def model(args, observations):
with pyro.plate("objects", args.max_num_objects):
exists = pyro.sample("exists",
dist.Bernoulli(args.expected_num_objects / args.max_num_objects))
with poutine.mask(mask=exists.byte()):
states = pyro.sample("states", dist.Normal(0., 1.).expand([2]).to_event(1))
positions = get_dynamics(args.num_frames).mm(states.t())
with pyro.plate("detections", observations.shape[1]):
with pyro.plate("time", args.num_frames):
# The combinatorial part of the log prob is approximated to allow independence.
is_observed = (observations[..., -1] > 0)
with poutine.mask(mask=is_observed):
assign = pyro.sample("assign",
dist.Categorical(torch.ones(args.max_num_objects + 1)))
is_spurious = (assign == args.max_num_objects)
is_real = is_observed & ~is_spurious
num_observed = is_observed.float().sum(-1, True)
pyro.sample("is_real",
dist.Bernoulli(args.expected_num_objects / num_observed),
obs=is_real.float())
pyro.sample("is_spurious",
dist.Bernoulli(args.expected_num_spurious / num_observed),
obs=is_spurious.float())
# The remaining continuous part is exact.
observed_positions = observations[..., 0]
with poutine.mask(mask=is_real):
bogus_position = positions.new_zeros(args.num_frames, 1)
augmented_positions = torch.cat([positions, bogus_position], -1)
predicted_positions = gather(augmented_positions, assign, -1)
pyro.sample("real_observations",
dist.Normal(predicted_positions, args.emission_noise_scale),
obs=observed_positions)
with poutine.mask(mask=is_spurious):
pyro.sample("spurious_observations", dist.Normal(0., 1.),
obs=observed_positions)
# This guide uses a smart assignment solver but a naive state estimator. A smarter implementation would use message passing also for state estimation, e.g. a Kalman filter-smoother.
def guide(args, observations):
# Initialize states randomly from the prior.
states_loc = pyro.param("states_loc", lambda: torch.randn(args.max_num_objects, 2))
states_scale = pyro.param("states_scale",
lambda: torch.ones(states_loc.shape) * args.emission_noise_scale,
constraint=constraints.positive)
positions = get_dynamics(args.num_frames).mm(states_loc.t())
# Solve soft assignment problem.
real_dist = dist.Normal(positions.unsqueeze(-2), args.emission_noise_scale)
spurious_dist = dist.Normal(0., 1.)
is_observed = (observations[..., -1] > 0)
observed_positions = observations[..., 0].unsqueeze(-1)
assign_logits = (real_dist.log_prob(observed_positions) -
spurious_dist.log_prob(observed_positions) +
math.log(args.expected_num_objects * args.emission_prob /
args.expected_num_spurious))
assign_logits[~is_observed] = -float('inf')
exists_logits = torch.empty(args.max_num_objects).fill_(
math.log(args.max_num_objects / args.expected_num_objects))
assignment = MarginalAssignmentPersistent(exists_logits, assign_logits)
with pyro.plate("objects", args.max_num_objects):
exists = pyro.sample("exists", assignment.exists_dist, infer={"enumerate": "parallel"})
with poutine.mask(mask=exists.byte()):
pyro.sample("states", dist.Normal(states_loc, states_scale).to_event(1))
with pyro.plate("detections", observations.shape[1]):
with poutine.mask(mask=is_observed):
with pyro.plate("time", args.num_frames):
assign = pyro.sample("assign", assignment.assign_dist, infer={"enumerate": "parallel"})
return assignment
# We'll define a global config object to make it easy to port code to `argparse`.
# +
args = type('Args', (object,), {}) # A fake ArgumentParser.parse_args() result.
args.num_frames = 5
args.max_num_objects = 3
args.expected_num_objects = 2.
args.expected_num_spurious = 1.
args.emission_prob = 0.8
args.emission_noise_scale = 0.1
assert args.max_num_objects >= args.expected_num_objects
# -
# ## Generate data
pyro.set_rng_seed(0)
true_states, true_positions, observations = generate_data(args)
true_num_objects = len(true_states)
max_num_detections = observations.shape[1]
assert true_states.shape == (true_num_objects, 2)
assert true_positions.shape == (args.num_frames, true_num_objects)
assert observations.shape == (args.num_frames, max_num_detections, 1+1)
print("generated {:d} detections from {:d} objects".format(
(observations[..., -1] > 0).long().sum(), true_num_objects))
# ## Train
def plot_solution(message=''):
assignment = guide(args, observations)
states_loc = pyro.param("states_loc")
positions = get_dynamics(args.num_frames).mm(states_loc.t())
pyplot.figure(figsize=(12,6)).patch.set_color('white')
pyplot.plot(true_positions.numpy(), 'k--')
is_observed = (observations[..., -1] > 0)
pos = observations[..., 0]
time = torch.arange(float(args.num_frames)).unsqueeze(-1).expand_as(pos)
pyplot.scatter(time[is_observed].view(-1).numpy(),
pos[is_observed].view(-1).numpy(), color='k', marker='+',
label='observation')
for i in range(args.max_num_objects):
p_exist = assignment.exists_dist.probs[i].item()
position = positions[:, i].detach().numpy()
pyplot.plot(position, alpha=p_exist, color='C0')
pyplot.title('Truth, observations, and predicted tracks ' + message)
pyplot.plot([], 'k--', label='truth')
pyplot.plot([], color='C0', label='prediction')
pyplot.legend(loc='best')
pyplot.xlabel('time step')
pyplot.ylabel('position')
pyplot.tight_layout()
pyro.set_rng_seed(1)
pyro.clear_param_store()
plot_solution('(before training)')
infer = SVI(model, guide, Adam({"lr": 0.01}), TraceEnum_ELBO(max_plate_nesting=2))
losses = []
for epoch in range(101 if not smoke_test else 2):
loss = infer.step(args, observations)
if epoch % 10 == 0:
print("epoch {: >4d} loss = {}".format(epoch, loss))
losses.append(loss)
pyplot.plot(losses);
plot_solution('(after training)')
| tutorial/source/tracking_1d.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 4. PostProccess
# Kaggle score: 0.000
# ## Run name
# +
import time
import os
import pandas as pd
project_name = 'Google_LandMark_Rec'
step_name = 'PostProccess'
time_str = time.strftime("%Y%m%d_%H%M%S", time.localtime())
run_name = project_name + '_' + step_name + '_' + time_str
print('run_name: ' + run_name)
# -
# ## Project folder
# +
cwd = os.getcwd()
input_folder = os.path.join(cwd, 'input')
output_folder = os.path.join(cwd, 'output')
model_folder = os.path.join(cwd, 'model')
feature_folder = os.path.join(cwd, 'feature')
post_pca_feature_folder = os.path.join(cwd, 'post_pca_feature')
log_folder = os.path.join(cwd, 'log')
print('input_folder: \t' + input_folder)
print('output_folder: \t' + output_folder)
print('model_folder: \t' + model_folder)
print('feature_folder: \t' + feature_folder)
print('post_pca_feature_folder: \t' + post_pca_feature_folder)
print('log_folder: \t' + log_folder)
org_train_folder = os.path.join(input_folder, 'org_train')
org_test_folder = os.path.join(input_folder, 'org_test')
train_folder = os.path.join(input_folder, 'data_train')
test_folder = os.path.join(input_folder, 'data_test')
test_sub_folder = os.path.join(test_folder, 'test')
if not os.path.exists(post_pca_feature_folder):
os.mkdir(post_pca_feature_folder)
print('Create folder: %s' % post_pca_feature_folder)
# -
train_csv_file = os.path.join(input_folder, 'train.csv')
test_csv_file = os.path.join(input_folder, 'test.csv')
sample_submission_file = os.path.join(input_folder, 'sample_submission.csv')
# ## Proccess result
submission_csv_file = os.path.join(output_folder, 'pred_Google_LandMark_Rec_Train-Predict_20180311_104053_10136.csv')
submission_csv = pd.read_csv(submission_csv_file)
print(submission_csv.shape)
print(submission_csv.head(2))
# +
float_count = 0
str_count = 0
empty_str_count = 0
less_than_count = 0
larger_than_count = 0
for i in range(submission_csv.shape[0]):
landmarks = submission_csv['landmarks'][i]
# print(landmarks, end=' ')
if type(landmarks) == float:
float_count = float_count + 1
submission_csv['landmarks'][i] = ''
else:
str_count = str_count + 1
if landmarks != '':
proba_strs = landmarks.split(' ')
proba = float(proba_strs[1])
submission_csv['landmarks'][i] = '%s %s' % (proba_strs[0], '1')
# if proba < 0.7:
# less_than_count = less_than_count + 1
# submission_csv['landmarks'][i] = ''
# else:
# larger_than_count = larger_than_count + 1
# submission_csv['landmarks'][i] = '%s %s' % (proba_strs[0], '1')
else:
empty_str_count = empty_str_count + 1
if i % 1000 == 0:
print(i // 1000, end=' ')
print('\nfloat_count: \t%s' % float_count)
print('str_count: \t%s' % str_count)
print('empty_str_count: \t%s' % empty_str_count)
print('less_than_count: \t%s' % less_than_count)
print('larger_than_count: \t%s' % larger_than_count)
# -
print(submission_csv.shape)
print(submission_csv.head(2))
pred_file = os.path.join(output_folder, run_name + '.csv')
print(pred_file)
submission_csv.to_csv(pred_file, index=None)
print('Done!')
| landmark-recognition-challenge/4. PostProccess.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.8.6 64-bit ('opt-qiskit')
# metadata:
# interpreter:
# hash: 9c837c7ac14898b4157c3602a52e89b1bd57ef10d6bdaa28fc65fa6f9116fa6b
# name: python3
# ---
# # Credit Risk Analysis
# ### Introduction
# This tutorial shows how quantum algorithms can be used for credit risk analysis.
# More precisely, how Quantum Amplitude Estimation (QAE) can be used to estimate risk measures with a quadratic speed-up over classical Monte Carlo simulation.
# The tutorial is based on the following papers:
#
# - [Quantum Risk Analysis. <NAME>, <NAME>.](https://www.nature.com/articles/s41534-019-0130-6) [Woerner2019]
# - [Credit Risk Analysis using Quantum Computers. Egger et al. (2019)](https://arxiv.org/abs/1907.03044) [Egger2019]
#
# A general introduction to QAE can be found in the following paper:
#
# - [Quantum Amplitude Amplification and Estimation. Gilles Brassard et al.](http://arxiv.org/abs/quant-ph/0005055)
#
# The structure of the tutorial is as follows:
#
# 1. [Problem Definition](#Problem-Definition)
# 2. [Uncertainty Model](#Uncertainty-Model)
# 3. [Expected Loss](#Expected-Loss)
# 4. [Cumulative Distribution Function](#Cumulative-Distribution-Function)
# 5. [Value at Risk](#Value-at-Risk)
# 6. [Conditional Value at Risk](#Conditional-Value-at-Risk)
# +
import numpy as np
import matplotlib.pyplot as plt
from qiskit import QuantumRegister, QuantumCircuit, Aer, execute
from qiskit.circuit.library import IntegerComparator
from qiskit.utils import QuantumInstance
from qiskit.algorithms import IterativeAmplitudeEstimation, EstimationProblem
# -
# ### Problem Definition
#
# In this tutorial we want to analyze the credit risk of a portfolio of $K$ assets.
# The default probability of every asset $k$ follows a *Gaussian Conditional Independence* model, i.e., given a value $z$ sampled from a latent random variable $Z$ following a standard normal distribution, the default probability of asset $k$ is given by
#
# $$p_k(z) = F\left( \frac{F^{-1}(p_k^0) - \sqrt{\rho_k}z}{\sqrt{1 - \rho_k}} \right) $$
#
# where $F$ denotes the cumulative distribution function of $Z$, $p_k^0$ is the default probability of asset $k$ for $z=0$ and $\rho_k$ is the sensitivity of the default probability of asset $k$ with respect to $Z$. Thus, given a concrete realization of $Z$ the individual default events are assumed to be independent from each other.
#
# We are interested in analyzing risk measures of the total loss
#
# $$ L = \sum_{k=1}^K \lambda_k X_k(Z) $$
#
# where $\lambda_k$ denotes the _loss given default_ of asset $k$, and given $Z$, $X_k(Z)$ denotes a Bernoulli variable representing the default event of asset $k$. More precisely, we are interested in the expected value $\mathbb{E}[L]$, the Value at Risk (VaR) of $L$ and the Conditional Value at Risk of $L$ (also called Expected Shortfall). Where VaR and CVaR are defined as
#
# $$ \text{VaR}_{\alpha}(L) = \inf \{ x \mid \mathbb{P}[L <= x] \geq 1 - \alpha \}$$
#
# with confidence level $\alpha \in [0, 1]$, and
#
# $$ \text{CVaR}_{\alpha}(L) = \mathbb{E}[ L \mid L \geq \text{VaR}_{\alpha}(L) ].$$
#
# For more details on the considered model, see, e.g.,<br>
# [Regulatory Capital Modeling for Credit Risk. <NAME>, <NAME>](https://arxiv.org/abs/1412.1183)
#
#
#
# The problem is defined by the following parameters:
# - number of qubits used to represent $Z$, denoted by $n_z$
# - truncation value for $Z$, denoted by $z_{\text{max}}$, i.e., Z is assumed to take $2^{n_z}$ equidistant values in $\{-z_{max}, ..., +z_{max}\}$
# - the base default probabilities for each asset $p_0^k \in (0, 1)$, $k=1, ..., K$
# - sensitivities of the default probabilities with respect to $Z$, denoted by $\rho_k \in [0, 1)$
# - loss given default for asset $k$, denoted by $\lambda_k$
# - confidence level for VaR / CVaR $\alpha \in [0, 1]$.
# set problem parameters
n_z = 2
z_max = 2
z_values = np.linspace(-z_max, z_max, 2**n_z)
p_zeros = [0.15, 0.25]
rhos = [0.1, 0.05]
lgd = [1, 2]
K = len(p_zeros)
alpha = 0.05
# ### Uncertainty Model
#
# We now construct a circuit that loads the uncertainty model. This can be achieved by creating a quantum state in a register of $n_z$ qubits that represents $Z$ following a standard normal distribution. This state is then used to control single qubit Y-rotations on a second qubit register of $K$ qubits, where a $|1\rangle$ state of qubit $k$ represents the default event of asset $k$. The resulting quantum state can be written as
#
# $$ |\Psi\rangle = \sum_{i=0}^{2^{n_z}-1} \sqrt{p_z^i} |z_i \rangle \bigotimes_{k=1}^K
# \left( \sqrt{1 - p_k(z_i)}|0\rangle + \sqrt{p_k(z_i)}|1\rangle\right),
# $$
#
# where we denote by $z_i$ the $i$-th value of the discretized and truncated $Z$ [Egger2019].
from qiskit_finance.circuit.library import GaussianConditionalIndependenceModel as GCI
u = GCI(n_z, z_max, p_zeros, rhos)
u.draw()
# We now use the simulator to validate the circuit that constructs $|\Psi\rangle$ and compute the corresponding exact values for
# - expected loss $\mathbb{E}[L]$
# - PDF and CDF of $L$
# - value at risk $VaR(L)$ and corresponding probability
# - conditional value at risk $CVaR(L)$
# run the circuit and analyze the results
job = execute(u, backend=Aer.get_backend('statevector_simulator'))
# +
# analyze uncertainty circuit and determine exact solutions
p_z = np.zeros(2**n_z)
p_default = np.zeros(K)
values = []
probabilities = []
num_qubits = u.num_qubits
for i, a in enumerate(job.result().get_statevector()):
# get binary representation
b = ('{0:0%sb}' % num_qubits).format(i)
prob = np.abs(a)**2
# extract value of Z and corresponding probability
i_normal = int(b[-n_z:], 2)
p_z[i_normal] += prob
# determine overall default probability for k
loss = 0
for k in range(K):
if b[K - k - 1] == '1':
p_default[k] += prob
loss += lgd[k]
values += [loss]
probabilities += [prob]
values = np.array(values)
probabilities = np.array(probabilities)
expected_loss = np.dot(values, probabilities)
losses = np.sort(np.unique(values))
pdf = np.zeros(len(losses))
for i, v in enumerate(losses):
pdf[i] += sum(probabilities[values == v])
cdf = np.cumsum(pdf)
i_var = np.argmax(cdf >= 1-alpha)
exact_var = losses[i_var]
exact_cvar = np.dot(pdf[(i_var+1):], losses[(i_var+1):])/sum(pdf[(i_var+1):])
# -
print('Expected Loss E[L]: %.4f' % expected_loss)
print('Value at Risk VaR[L]: %.4f' % exact_var)
print('P[L <= VaR[L]]: %.4f' % cdf[exact_var])
print('Conditional Value at Risk CVaR[L]: %.4f' % exact_cvar)
# + tags=["nbsphinx-thumbnail"]
# plot loss PDF, expected loss, var, and cvar
plt.bar(losses, pdf)
plt.axvline(expected_loss, color='green', linestyle='--', label='E[L]')
plt.axvline(exact_var, color='orange', linestyle='--', label='VaR(L)')
plt.axvline(exact_cvar, color='red', linestyle='--', label='CVaR(L)')
plt.legend(fontsize=15)
plt.xlabel('Loss L ($)', size=15)
plt.ylabel('probability (%)', size=15)
plt.title('Loss Distribution', size=20)
plt.xticks(size=15)
plt.yticks(size=15)
plt.show()
# -
# plot results for Z
plt.plot(z_values, p_z, 'o-', linewidth=3, markersize=8)
plt.grid()
plt.xlabel('Z value', size=15)
plt.ylabel('probability (%)', size=15)
plt.title('Z Distribution', size=20)
plt.xticks(size=15)
plt.yticks(size=15)
plt.show()
# plot results for default probabilities
plt.bar(range(K), p_default)
plt.xlabel('Asset', size=15)
plt.ylabel('probability (%)', size=15)
plt.title('Individual Default Probabilities', size=20)
plt.xticks(range(K), size=15)
plt.yticks(size=15)
plt.grid()
plt.show()
# ### Expected Loss
#
# To estimate the expected loss, we first apply a weighted sum operator to sum up individual losses to total loss:
#
# $$ \mathcal{S}: |x_1, ..., x_K \rangle_K |0\rangle_{n_S} \mapsto |x_1, ..., x_K \rangle_K |\lambda_1x_1 + ... + \lambda_K x_K\rangle_{n_S}. $$
#
# The required number of qubits to represent the result is given by
#
# $$ n_s = \lfloor \log_2( \lambda_1 + ... + \lambda_K ) \rfloor + 1. $$
#
# Once we have the total loss distribution in a quantum register, we can use the techniques described in [Woerner2019] to map a total loss $L \in \{0, ..., 2^{n_s}-1\}$ to the amplitude of an objective qubit by an operator
#
# $$ | L \rangle_{n_s}|0\rangle \mapsto
# | L \rangle_{n_s} \left( \sqrt{1 - L/(2^{n_s}-1)}|0\rangle + \sqrt{L/(2^{n_s}-1)}|1\rangle \right), $$
#
# which allows to run amplitude estimation to evaluate the expected loss.
# add Z qubits with weight/loss 0
from qiskit.circuit.library import WeightedAdder
agg = WeightedAdder(n_z + K, [0]*n_z + lgd)
# +
from qiskit.circuit.library import LinearAmplitudeFunction
# define linear objective function
breakpoints = [0]
slopes = [1]
offsets = [0]
f_min = 0
f_max = sum(lgd)
c_approx = 0.25
objective = LinearAmplitudeFunction(
agg.num_sum_qubits,
slope=slopes,
offset=offsets,
# max value that can be reached by the qubit register (will not always be reached)
domain=(0, 2**agg.num_sum_qubits-1),
image=(f_min, f_max),
rescaling_factor=c_approx,
breakpoints=breakpoints
)
# -
# Create the state preparation circuit:
# +
# define the registers for convenience and readability
qr_state = QuantumRegister(u.num_qubits, 'state')
qr_sum = QuantumRegister(agg.num_sum_qubits, 'sum')
qr_carry = QuantumRegister(agg.num_carry_qubits, 'carry')
qr_obj = QuantumRegister(1, 'objective')
# define the circuit
state_preparation = QuantumCircuit(qr_state, qr_obj, qr_sum, qr_carry, name='A')
# load the random variable
state_preparation.append(u.to_gate(), qr_state)
# aggregate
state_preparation.append(agg.to_gate(), qr_state[:] + qr_sum[:] + qr_carry[:])
# linear objective function
state_preparation.append(objective.to_gate(), qr_sum[:] + qr_obj[:])
# uncompute aggregation
state_preparation.append(agg.to_gate().inverse(), qr_state[:] + qr_sum[:] + qr_carry[:])
# draw the circuit
state_preparation.draw()
# -
# Before we use QAE to estimate the expected loss, we validate the quantum circuit representing the objective function by just simulating it directly and analyzing the probability of the objective qubit being in the $|1\rangle$ state, i.e., the value QAE will eventually approximate.
job = execute(state_preparation, backend=Aer.get_backend('statevector_simulator'))
# +
# evaluate resulting statevector
value = 0
for i, a in enumerate(job.result().get_statevector()):
b = ('{0:0%sb}' % (len(qr_state) + 1)).format(i)[-(len(qr_state) + 1):]
am = np.round(np.real(a), decimals=4)
if np.abs(am) > 1e-6 and b[0] == '1':
value += am**2
print('Exact Expected Loss: %.4f' % expected_loss)
print('Exact Operator Value: %.4f' % value)
print('Mapped Operator value: %.4f' % objective.post_processing(value))
# -
# Next we run QAE to estimate the expected loss with a quadratic speed-up over classical Monte Carlo simulation.
# +
# set target precision and confidence level
epsilon = 0.01
alpha = 0.05
qi = QuantumInstance(Aer.get_backend('aer_simulator'), shots=100)
problem = EstimationProblem(state_preparation=state_preparation,
objective_qubits=[len(qr_state)],
post_processing=objective.post_processing)
# construct amplitude estimation
ae = IterativeAmplitudeEstimation(epsilon, alpha=alpha, quantum_instance=qi)
result = ae.estimate(problem)
# print results
conf_int = np.array(result.confidence_interval_processed)
print('Exact value: \t%.4f' % expected_loss)
print('Estimated value:\t%.4f' % result.estimation_processed)
print('Confidence interval: \t[%.4f, %.4f]' % tuple(conf_int))
# -
# ### Cumulative Distribution Function
#
# Instead of the expected loss (which could also be estimated efficiently using classical techniques) we now estimate the cumulative distribution function (CDF) of the loss.
# Classically, this either involves evaluating all the possible combinations of defaulting assets, or many classical samples in a Monte Carlo simulation. Algorithms based on QAE have the potential to significantly speed up this analysis in the future.
#
# To estimate the CDF, i.e., the probability $\mathbb{P}[L \leq x]$, we again apply $\mathcal{S}$ to compute the total loss, and then apply a comparator that for a given value $x$ acts as
#
# $$ \mathcal{C}: |L\rangle_n|0> \mapsto
# \begin{cases}
# |L\rangle_n|1> & \text{if}\quad L \leq x \\
# |L\rangle_n|0> & \text{if}\quad L > x.
# \end{cases} $$
#
# The resulting quantum state can be written as
#
# $$ \sum_{L = 0}^{x} \sqrt{p_{L}}|L\rangle_{n_s}|1\rangle +
# \sum_{L = x+1}^{2^{n_s}-1} \sqrt{p_{L}}|L\rangle_{n_s}|1\rangle, $$
#
# where we directly assume the summed up loss values and corresponding probabilities instead of presenting the details of the uncertainty model.
#
# The CDF($x$) equals the probability of measuring $|1\rangle$ in the objective qubit and QAE can be directly used to estimate it.
# +
# set x value to estimate the CDF
x_eval = 2
comparator = IntegerComparator(agg.num_sum_qubits, x_eval + 1, geq=False)
comparator.draw()
# +
def get_cdf_circuit(x_eval):
# define the registers for convenience and readability
qr_state = QuantumRegister(u.num_qubits, 'state')
qr_sum = QuantumRegister(agg.num_sum_qubits, 'sum')
qr_carry = QuantumRegister(agg.num_carry_qubits, 'carry')
qr_obj = QuantumRegister(1, 'objective')
qr_compare = QuantumRegister(1, 'compare')
# define the circuit
state_preparation = QuantumCircuit(qr_state, qr_obj, qr_sum, qr_carry, name='A')
# load the random variable
state_preparation.append(u, qr_state)
# aggregate
state_preparation.append(agg, qr_state[:] + qr_sum[:] + qr_carry[:])
# comparator objective function
comparator = IntegerComparator(agg.num_sum_qubits, x_eval + 1, geq=False)
state_preparation.append(comparator, qr_sum[:] + qr_obj[:] + qr_carry[:])
# uncompute aggregation
state_preparation.append(agg.inverse(), qr_state[:] + qr_sum[:] + qr_carry[:])
return state_preparation
state_preparation = get_cdf_circuit(x_eval)
# -
# Again, we first use quantum simulation to validate the quantum circuit.
job = execute(state_preparation, backend=Aer.get_backend('statevector_simulator'))
state_preparation.draw()
# evaluate resulting statevector
var_prob = 0
for i, a in enumerate(job.result().get_statevector()):
b = ('{0:0%sb}' % (len(qr_state) + 1)).format(i)[-(len(qr_state) + 1):]
prob = np.abs(a)**2
if prob > 1e-6 and b[0] == '1':
var_prob += prob
print('Operator CDF(%s)' % x_eval + ' = %.4f' % var_prob)
print('Exact CDF(%s)' % x_eval + ' = %.4f' % cdf[x_eval])
# Next we run QAE to estimate the CDF for a given $x$.
# +
# set target precision and confidence level
epsilon = 0.01
alpha = 0.05
qi = QuantumInstance(Aer.get_backend('aer_simulator'), shots=100)
problem = EstimationProblem(state_preparation=state_preparation,
objective_qubits=[len(qr_state)])
# construct amplitude estimation
ae_cdf = IterativeAmplitudeEstimation(epsilon, alpha=alpha, quantum_instance=qi)
result_cdf = ae_cdf.estimate(problem)
# print results
conf_int = np.array(result_cdf.confidence_interval)
print('Exact value: \t%.4f' % cdf[x_eval])
print('Estimated value:\t%.4f' % result_cdf.estimation)
print('Confidence interval: \t[%.4f, %.4f]' % tuple(conf_int))
# -
# ### Value at Risk
#
# In the following we use a bisection search and QAE to efficiently evaluate the CDF to estimate the value at risk.
def run_ae_for_cdf(x_eval, epsilon=0.01, alpha=0.05, simulator='aer_simulator'):
# construct amplitude estimation
state_preparation = get_cdf_circuit(x_eval)
qi = QuantumInstance(Aer.get_backend('aer_simulator'), shots=100)
problem = EstimationProblem(state_preparation=state_preparation,
objective_qubits=[len(qr_state)])
ae_var = IterativeAmplitudeEstimation(epsilon, alpha=alpha, quantum_instance=qi)
result_var = ae_var.estimate(problem)
return result_var.estimation
def bisection_search(objective, target_value, low_level, high_level, low_value=None, high_value=None):
"""
Determines the smallest level such that the objective value is still larger than the target
:param objective: objective function
:param target: target value
:param low_level: lowest level to be considered
:param high_level: highest level to be considered
:param low_value: value of lowest level (will be evaluated if set to None)
:param high_value: value of highest level (will be evaluated if set to None)
:return: dictionary with level, value, num_eval
"""
# check whether low and high values are given and evaluated them otherwise
print('--------------------------------------------------------------------')
print('start bisection search for target value %.3f' % target_value)
print('--------------------------------------------------------------------')
num_eval = 0
if low_value is None:
low_value = objective(low_level)
num_eval += 1
if high_value is None:
high_value = objective(high_level)
num_eval += 1
# check if low_value already satisfies the condition
if low_value > target_value:
return {'level': low_level, 'value': low_value, 'num_eval': num_eval, 'comment': 'returned low value'}
elif low_value == target_value:
return {'level': low_level, 'value': low_value, 'num_eval': num_eval, 'comment': 'success'}
# check if high_value is above target
if high_value < target_value:
return {'level': high_level, 'value': high_value, 'num_eval': num_eval, 'comment': 'returned low value'}
elif high_value == target_value:
return {'level': high_level, 'value': high_value, 'num_eval': num_eval, 'comment': 'success'}
# perform bisection search until
print('low_level low_value level value high_level high_value')
print('--------------------------------------------------------------------')
while high_level - low_level > 1:
level = int(np.round((high_level + low_level) / 2.0))
num_eval += 1
value = objective(level)
print('%2d %.3f %2d %.3f %2d %.3f' \
% (low_level, low_value, level, value, high_level, high_value))
if value >= target_value:
high_level = level
high_value = value
else:
low_level = level
low_value = value
# return high value after bisection search
print('--------------------------------------------------------------------')
print('finished bisection search')
print('--------------------------------------------------------------------')
return {'level': high_level, 'value': high_value, 'num_eval': num_eval, 'comment': 'success'}
# run bisection search to determine VaR
objective = lambda x: run_ae_for_cdf(x)
bisection_result = bisection_search(objective, 1-alpha, min(losses)-1, max(losses), low_value=0, high_value=1)
var = bisection_result['level']
print('Estimated Value at Risk: %2d' % var)
print('Exact Value at Risk: %2d' % exact_var)
print('Estimated Probability: %.3f' % bisection_result['value'])
print('Exact Probability: %.3f' % cdf[exact_var])
# ### Conditional Value at Risk
#
# Last, we compute the CVaR, i.e. the expected value of the loss conditional to it being larger than or equal to the VaR.
# To do so, we evaluate a piecewise linear objective function $f(L)$, dependent on the total loss $L$, that is given by
#
# $$
# f(L) = \begin{cases}
# 0 & \text{if}\quad L \leq VaR \\
# L & \text{if}\quad L > VaR.
# \end{cases}
# $$
#
# To normalize, we have to divide the resulting expected value by the VaR-probability, i.e. $\mathbb{P}[L \leq VaR]$.
# +
# define linear objective
breakpoints = [0, var]
slopes = [0, 1]
offsets = [0, 0] # subtract VaR and add it later to the estimate
f_min = 0
f_max = 3 - var
c_approx = 0.25
cvar_objective = LinearAmplitudeFunction(
agg.num_sum_qubits,
slopes,
offsets,
domain=(0, 2**agg.num_sum_qubits - 1),
image=(f_min, f_max),
rescaling_factor=c_approx,
breakpoints=breakpoints
)
cvar_objective.draw()
# +
# define the registers for convenience and readability
qr_state = QuantumRegister(u.num_qubits, 'state')
qr_sum = QuantumRegister(agg.num_sum_qubits, 'sum')
qr_carry = QuantumRegister(agg.num_carry_qubits, 'carry')
qr_obj = QuantumRegister(1, 'objective')
qr_work = QuantumRegister(cvar_objective.num_ancillas - len(qr_carry), 'work')
# define the circuit
state_preparation = QuantumCircuit(qr_state, qr_obj, qr_sum, qr_carry, qr_work, name='A')
# load the random variable
state_preparation.append(u, qr_state)
# aggregate
state_preparation.append(agg, qr_state[:] + qr_sum[:] + qr_carry[:])
# linear objective function
state_preparation.append(cvar_objective, qr_sum[:] + qr_obj[:] + qr_carry[:] + qr_work[:])
# uncompute aggregation
state_preparation.append(agg.inverse(), qr_state[:] + qr_sum[:] + qr_carry[:])
# -
# Again, we first use quantum simulation to validate the quantum circuit.
job = execute(state_preparation, backend=Aer.get_backend('statevector_simulator'))
# +
# evaluate resulting statevector
value = 0
for i, a in enumerate(job.result().get_statevector()):
b = ('{0:0%sb}' % (len(qr_state) + 1)).format(i)[-(len(qr_state) + 1):]
am = np.round(np.real(a), decimals=4)
if np.abs(am) > 1e-6 and b[0] == '1':
value += am**2
# normalize and add VaR to estimate
value = cvar_objective.post_processing(value)
d = (1.0 - bisection_result['value'])
v = value / d if d != 0 else 0
normalized_value = v + var
print('Estimated CVaR: %.4f' % normalized_value)
print('Exact CVaR: %.4f' % exact_cvar)
# -
# Next we run QAE to estimate the CVaR.
# +
# set target precision and confidence level
epsilon = 0.01
alpha = 0.05
qi = QuantumInstance(Aer.get_backend('aer_simulator'), shots=100)
problem = EstimationProblem(state_preparation=state_preparation,
objective_qubits=[len(qr_state)],
post_processing=cvar_objective.post_processing)
# construct amplitude estimation
ae_cvar = IterativeAmplitudeEstimation(epsilon, alpha=alpha, quantum_instance=qi)
result_cvar = ae_cvar.estimate(problem)
# -
# print results
d = (1.0 - bisection_result['value'])
v = result_cvar.estimation_processed / d if d != 0 else 0
print('Exact CVaR: \t%.4f' % exact_cvar)
print('Estimated CVaR:\t%.4f' % (v + var))
import qiskit.tools.jupyter
# %qiskit_version_table
# %qiskit_copyright
| docs/tutorials/09_credit_risk_analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python [conda env:spatem]
# language: python
# name: conda-env-spatem-py
# ---
# # Get started
#
# In this tutorial, we will go through how to load trajectory outputs from dynverse, (briefly) explore the results and prepare for the inputs required by TraSig. We will assume you already know how to run trajectory inference methods on your data using dynverse (see [dynverse](https://dynverse.org/) for their tutorials). Note that in order to make use of their trajectory inference outputs, you need to add the following code to your R script to save the output:
#
# ```R
#
# # run trajectory inference method
# model <- infer_trajectory(dataset, list(ti_slingshot()), give_priors = c("start_id"),
# verbose = TRUE)
#
# # Add the following CODE to your R script
# library(dyncli) # this package is also provided by the dynverse group
#
# output_path <- "../trajectory/output/"
# output_filename <- paste0(output_path, "output.h5")
# write_output(model, output_filename)
#
# ```
#
# After you obtain your trajectory inference result, follow the steps below to prepare the inputs for TraSig. We will use ti_slingshot (Slingshot in dynverse) on the dataset "oligodendrocyte-differentiation-clusters_marques.rds" as an example.
#
# Alternatively, you may run the script [prepare_inputs.py](prepare_inputs.py) directly to prepare the inputs for TraSig. This script includes all the key steps (un-optional steps) listed below. Please refer to [Obtain the inputs using the command-line tool](#1) and **Command-line tools** session in README.md for more details.
#
# **Table of Content**
# 1. [(optional) Obtain the inputs using the command-line tool](#1)
# 2. [Load expression and true labels](#2)
# 3. [Load trajectory inference result](#3)
# 4. [Explore and evaluate trajectory results](#4)
# 5. [Prepare and save inputs for TraSig](#5)
#
# **Extra Package Requirements**
# * h5py >= 3.1.0 (required to load dynverse trajectory results)
# * rpy2 >= 3.3.6 (required to load dynverse datasets)
# * matplotlib-base >= 3.3.4 (required for plotting)
# * scikit-learn >= 0.23.2 (required for evaluating trajectory results)
# * scipy >= 1.5.4 (required to prepare sampling time input)
#
#
# **Updates log**
# * 10-21-21: change the output name of the filtered expression, to include ligand-receptor list name
# +
import os, sys
import argparse
import time
from os.path import exists
import collections
from typing import Iterable
import pickle
from collections import Counter
import requests
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
import h5py
import rpy2.robjects as robjects
# +
# example data set
project = "oligodendrocyte-differentiation-clusters_marques"
# set the path to the inputs for the trajectory inference (e.g. expression)
input_path = "../trajectory/input"
# set the path to save the outputs of this script (place to save inputs for TraSig)
output_path = "../example/input"
# set the path to the trajectory output
trajectory_filename = f"../trajectory/output/output.h5"
# +
# set the names for output files
preprocess = "None"
model_name = "ti_slingshot"
others = "None"
if preprocess != "None":
_preprocess = f"_{preprocess}"
else:
_preprocess = ""
if others == "None":
condition = ""
suffix = f"{_preprocess}_{model_name}{condition}"
# -
# # 1. (optional) Obtain the inputs using the command-line tool
# <a id=1></a>
#
# * You can run the following script to obtain all inputs for TraSig.
# * Alternatively, you may follow the break-downs starting from [Load expression](#2) to prepare for the inputs.
# %time ! python prepare_inputs.py -i ../trajectory/input -o ../example/input -d oligodendrocyte-differentiation-clusters_marques -t ../trajectory/output/output.h5 -g None -b ti_slingshot -e None
# # 2. Load expression and true labels
#
# * While you can locate the expression file dynverse downloaded to run trajectory inference tools, you may also download it yourself from their repository.
#
# <a id=2></a>
# +
filepath = f"{input_path}/{project}.rds"
if os.path.exists(filepath):
pass
else:
url = f"https://zenodo.org/record/1443566/files/real/silver/{project}.rds?download=1"
r = requests.get(url)
with open(filepath, 'wb') as f:
f.write(r.content)
# -
# ## 2.1 Load expression
# +
filepath = f"{input_path}/{project}.rds"
from rpy2.robjects import pandas2ri
pandas2ri.activate()
readRDS = robjects.r['readRDS']
df = readRDS(filepath)
# df = pandas2ri.rpy2py_dataframe(df)
data_keys = list(df.names)
cell_ids = df[data_keys.index('cell_ids')]
expression = df[data_keys.index('expression')]
genes = df[data_keys.index('feature_info')]['feature_id'].values
N = len(cell_ids) # number of cells
G = len(genes) # number of genes
# -
# ## 2.2 Load true trajectory and labels
# +
# true trajectory
milestones_true = df[data_keys.index('milestone_ids')]
network_true = df[data_keys.index('milestone_network')]
M_true = len(milestones_true)
# add node index; node index consistent with index in 'milestone_ids'
# will use node index to present node from now on
network_true['idx_from'] = [list(milestones_true).index(i) for i in network_true['from']]
network_true['idx_to'] = [list(milestones_true).index(i) for i in network_true['to']]
membership_true = df[data_keys.index('milestone_percentages')]
# assign cells to the most probable node
assignment_true = membership_true[membership_true.groupby(['cell_id'])['percentage'].transform(max) == membership_true['percentage']]
assignment_true.set_index('cell_id', inplace=True)
assignment_true = assignment_true.reindex(cell_ids)
clusters_true = [list(milestones_true).index(c) for c in assignment_true['milestone_id'].values]
# -
# # 3. Load trajectory inference result
# <a id=3></a>
# +
f = h5py.File(trajectory_filename, 'r')
# # Check what keys are
# for key in f.keys():
# print(key)
key = 'data'
# Get the HDF5 group
group = f[key]
# #Checkout what keys are inside that group.
# for key in group.keys():
# print(key)
_percentages = group['milestone_percentages']
_network = group['milestone_network']
_progressions = group['progressions']
# # Check what keys are
# data.keys()
# data['data'].keys()
_cell_ids = list(_percentages['data']['cell_id'])
_cell_ids = [i.decode('utf-8') for i in _cell_ids]
estimated_percentages = pd.DataFrame(zip(_cell_ids, list(_percentages['data']['milestone_id']), list(_percentages['data']['percentage'])))
estimated_percentages.columns = ['cell_id', 'milestone_id', 'percentage']
_cell_ids = list(_progressions['data']['cell_id'])
_cell_ids = [i.decode('utf-8') for i in _cell_ids]
estimated_progressions = pd.DataFrame(zip(_cell_ids, list(_progressions['data']['from']), list(_progressions['data']['to']), list(_progressions['data']['percentage'])))
estimated_progressions.columns = ['cell_id', 'from', 'to' ,'percentage']
estimated_progressions = estimated_progressions.set_index("cell_id")
estimated_progressions = estimated_progressions.reindex(assignment_true.index.values) # assignment_true already reindexed by cell_ids
estimated_network = pd.DataFrame(pd.DataFrame(zip(list(_network['data']['from']), list(_network['data']['to']), list(_network['data']['length']))))
estimated_clusters = estimated_percentages.loc[estimated_percentages.groupby(["cell_id"])["percentage"].idxmax()].set_index('cell_id').reindex(cell_ids)
estimated_clusters['milestone_id'] = [_c.decode("utf-8") for _c in estimated_clusters['milestone_id']]
# -
# # 4. Exploring and evaluating trajectory inference results (optional step)
# <a id=4></a>
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.metrics import f1_score
from sklearn.preprocessing import LabelEncoder
# +
clusters_estimated = estimated_clusters['milestone_id'].values
le = LabelEncoder()
clusters_estimated = le.fit_transform(clusters_estimated)
_ari = adjusted_rand_score(clusters_true, clusters_estimated)
_f1_score = f1_score(clusters_true, clusters_estimated, average = None)
print(f"ARI: {_ari}, F1: {_f1_score}")
# -
# ##### predicted trajectory network
estimated_network
# ##### evaluation of the correspondence in clustering assignment, compared with true
df_plot = pd.DataFrame([assignment_true['milestone_id'].values, clusters_estimated]).T
df_plot.index = cell_ids
df_plot.columns = ['true', 'pred']
pd.crosstab(df_plot['true'], df_plot['pred'])
# ##### number of cells assigned to different positions along an edge
for n1, n2 in estimated_network.iloc[:, :2].values:
condition = np.logical_and(estimated_progressions['from'] == n1,
estimated_progressions['to'] == n2)
estimated_progressions[condition]['percentage'].hist()
plt.show()
plt.close()
# # 5. Prepare and save input for TraSig
# <a id=5></a>
#
# * 1. filter expression to leave only ligands and receptors in database
# * 2. save estimated clusters, progressions and true cell labels
# * 3. save cells' s real time (if unknow, then put all as 0)
# ## 5.1 Save estimated cluster and progression time
#
# 1. assigned path (edge)
# 2. assigned time / progression on the edge
# 3. cell type labels (ground truth)
estimated_progressions['from'] = [i.decode('utf-8') for i in estimated_progressions['from']]
estimated_progressions['to'] = [i.decode('utf-8') for i in estimated_progressions['to']]
estimated_progressions['edge'] = estimated_progressions['from'] + '_' + estimated_progressions['to']
# +
# assign unique label (integer) to each edge
edges = np.unique(estimated_progressions['edge'])
edge2idx = {}
for i, v in enumerate(edges):
edge2idx[v] = i
# -
edge2idx
estimated_progressions['idx_edge'] = estimated_progressions['edge'].replace(edge2idx)
hid_var = {'cell_path': estimated_progressions['idx_edge'].values,
'cell_time': estimated_progressions['percentage'].values,
'cell_labels':assignment_true['milestone_id'].values}
# save
filename = f"{project}{_preprocess}_{model_name}_it2_hid_var.pickle"
with open(os.path.join(output_path, filename), 'wb') as handle:
pickle.dump(hid_var, handle, protocol=pickle.HIGHEST_PROTOCOL)
# ###### check percent of cell types (true) in each edge
estimated_progressions['label'] = assignment_true['milestone_id'].values
estimated_progressions['count'] = 0 # place holder
for cur_edge in edges:
print(f"Edge {cur_edge} with label {edge2idx[cur_edge]}")
print(estimated_progressions[['label', 'edge', 'count']].groupby(['edge', 'label']).count().loc[cur_edge])
print("\n")
# ## 5.2 Subsetting expression data (to keep only ligand-receptors )
#
# 1. the following take expression and ligand-receptor list (database) as input
# +
# get interaction file (list of (ligand, receptor))
lr_list_path = "../ligand_receptor_lists"
list_type = 'ligand_receptor'
filename = f"{list_type }_FANTOM.pickle"
with open(os.path.join(lr_list_path, filename), 'rb') as handle:
interaction_list = pickle.load(handle)
ligands_receptors = np.unique([i[0] for i in interaction_list] + [i[1] for i in interaction_list])
# get list of genes identified as ligand or receptor
genes_upper = [g.upper() for g in genes]
kepted_genes = list(set(genes_upper).intersection(set(ligands_receptors)))
df = pd.DataFrame(expression)
df.columns = genes_upper
df.index = cell_ids
df_sub = df[kepted_genes]
# save filtered expression
filename = f"{project}{_preprocess}_{list_type}.txt"
data_file = os.path.join(output_path, filename)
df_sub.to_csv(data_file)
# save filtered interactions (list of (ligand, receptor) that are expressed)
filtered_interactions = []
for i, j in interaction_list:
if i in kepted_genes and j in kepted_genes:
filtered_interactions.append((i, j))
filename = f"{list_type}_{project}{_preprocess}.pickle"
with open(os.path.join(output_path, filename), 'wb') as handle:
pickle.dump(filtered_interactions, handle, protocol=pickle.HIGHEST_PROTOCOL)
# -
# ## 5.3 Save correspondence from sampling time to paths
#
# 1. Note here cell_path refers to the edge where the cell is assigned to
# 2. We will only find interactions between cells from the same sampling time and those from consecutive sampling times:
# - i.e., between the ones from the same time, the ones from 1 sampling time before the ones from 1 sampling time after
# 3. Given we don't know the sampling time for the example data, we set all sampling time as 0. For your own data, if you are not certain about sampling time, just assign the time for all cells as 0.
# 4. If sampling time is known, rank the real time (e.g. day 0, day 17) first and assign the rank to the cell_ori_time variable below.
# - e.g., for cells from two sampling time day 0 and day 17, assign those from day 0 as 0 and those from day 17 as 1.
from scipy import stats
# #### If known sampling time, then set the following variable = the sampling time of cells
cell_ori_time = np.repeat(0, N) # put all cells at time 0 if sampling time unknow
# #### The following is trying to assign each cluster / branch / edge a sampling time, determined by the majority of cells
unique_days = np.unique(cell_ori_time)
sorted_days = list(np.sort(unique_days))
cell_paths = np.unique(hid_var["cell_path"])
sampleT2path = dict.fromkeys(range(len(sorted_days))) # use index of sorted sampling time as key
for k, v in sampleT2path.items():
sampleT2path[k] = []
for i, cur_path in enumerate(cell_paths):
print("current path (edge)", cur_path)
# get data corresponding to a path
condition = hid_var["cell_path"] == cur_path
cur_days = np.array(cell_ori_time)[condition]
# get the sampling time for the majority cells
mode, count = stats.mode(cur_days)
print(f"Sampling time for the majority of cells: {mode[0]}, making {round(float(count[0])/len(cur_days), 2)}% percent")
cur_sampleT = mode[0]
# will use index instead of input time
sampleT2path[sorted_days.index(cur_sampleT)].append(cur_path)
# +
# save the dictionary
filename = 'sampling_time_per_path_' + project + suffix + '.pickle'
with open(os.path.join(output_path, filename), 'wb') as handle:
pickle.dump(sampleT2path, handle, protocol=pickle.HIGHEST_PROTOCOL)
| tutorials/Prepare_input_from_dynverse_ti_methods.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 2. Serving a Private Model (GPT-2)
# **protecting model owner's intellectual property (IP)**
#
# In these series of tutorials you'll learn how to serve and query a private model on Grid. We use the GPT-2 model as an use case.
#
#
# ## Motivation
#
# Machine Learning as a Service (MLaaS) is already quite relevant in industry, companies train large models on big amounts of data and offer a model's predictions as a service to parties that don't have access to data or the expertise to train their own models. Is convenient for companies provinding MLaaS because they can keep their sIP privates while external organizations can also benefit from the model's predictions.
#
# Even though this is a trend in industry, in academia this approach is not as explored. We believe this could be a potential interesting direction for research as well, since researchers and organizations can protect their IP but also mitigate malicious use cases or even limit user access to their model while allowing researchers to analyse and interact with the model's outputs.
| examples/Serving and Querying models on Grid/2. Serving a Private Model (GPT-2).ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # 08 - K-Nearest Neighbors Classifier
#
# by [<NAME>](albahnsen.com/)
#
# version 0.1, Mar 2016
#
# ## Part of the class [Practical Machine Learning](https://github.com/albahnsen/PracticalMachineLearningClass)
#
#
#
# This notebook is licensed under a [Creative Commons Attribution-ShareAlike 3.0 Unported License](http://creativecommons.org/licenses/by-sa/3.0/deed.en_US). Special thanks goes to [<NAME>](https://github.com/justmarkham), [Scikit-learn docs](http://scikit-learn.org/) & [<NAME>](http://machinelearningmastery.com/tutorial-to-implement-k-nearest-neighbors-in-python-from-scratch/)
# ## K-Nearest Neighbors
#
# The model for kNN is the entire training dataset. When a prediction is required for a unseen data instance, the kNN algorithm will search through the training dataset for the k-most similar instances. The prediction attribute of the most similar instances is summarized and returned as the prediction for the unseen instance.
#
# The similarity measure is dependent on the type of data. For real-valued data, the Euclidean distance can be used. Other other types of data such as categorical or binary data, Hamming distance can be used.
#
# In the case of regression problems, the average of the predicted attribute may be returned. In the case of classification, the most prevalent class may be returned.
#
# ## How does k-Nearest Neighbors Work
#
# The kNN algorithm is belongs to the family of instance-based, competitive learning and lazy learning algorithms.
#
# Instance-based algorithms are those algorithms that model the problem using data instances (or rows) in order to make predictive decisions. The kNN algorithm is an extreme form of instance-based methods because all training observations are retained as part of the model.
#
# It is a competitive learning algorithm, because it internally uses competition between model elements (data instances) in order to make a predictive decision. The objective similarity measure between data instances causes each data instance to compete to “win” or be most similar to a given unseen data instance and contribute to a prediction.
#
# Lazy learning refers to the fact that the algorithm does not build a model until the time that a prediction is required. It is lazy because it only does work at the last second. This has the benefit of only including data relevant to the unseen data, called a localized model. A disadvantage is that it can be computationally expensive to repeat the same or similar searches over larger training datasets.
#
# Finally, kNN is powerful because it does not assume anything about the data, other than a distance measure can be calculated consistently between any two instances. As such, it is called non-parametric or non-linear as it does not assume a functional form.
#
# ## Example using the iris dataset
# read the iris data into a DataFrame
import pandas as pd
url = 'http://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data'
col_names = ['sepal_length', 'sepal_width', 'petal_length', 'petal_width', 'species']
iris = pd.read_csv(url, header=None, names=col_names)
iris.head()
# ## Human learning on the iris dataset
#
# How did we (as humans) predict the species of an iris?
#
# 1. We observed that the different species had (somewhat) dissimilar measurements.
# 2. We focused on features that seemed to correlate with the response.
# 3. We created a set of rules (using those features) to predict the species of an unknown iris.
#
# We assumed that if an **unknown iris** has measurements similar to **previous irises**, then its species is most likely the same as those previous irises.
# +
# allow plots to appear in the notebook
# %matplotlib inline
import matplotlib.pyplot as plt
# increase default figure and font sizes for easier viewing
plt.rcParams['figure.figsize'] = (6, 4)
plt.rcParams['font.size'] = 14
# create a custom colormap
from matplotlib.colors import ListedColormap
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
# -
# map each iris species to a number
iris['species_num'] = iris.species.map({'Iris-setosa':0, 'Iris-versicolor':1, 'Iris-virginica':2})
# create a scatter plot of PETAL LENGTH versus PETAL WIDTH and color by SPECIES
iris.plot(kind='scatter', x='petal_length', y='petal_width', c='species_num', colormap=cmap_bold)
# create a scatter plot of SEPAL LENGTH versus SEPAL WIDTH and color by SPECIES
iris.plot(kind='scatter', x='sepal_length', y='sepal_width', c='species_num', colormap=cmap_bold)
# ## Creating a KNN classifier
#
# ### Estimate Similarity
#
# In order to make predictions we need to calculate the similarity between any two given data instances. This is needed so that we can locate the k most similar data instances in the training dataset for a given member of the test dataset and in turn make a prediction.
#
# Given that all four flower measurements are numeric and have the same units, we can directly use the Euclidean distance measure. This is defined as the square root of the sum of the squared differences between the two arrays of numbers (read that again a few times and let it sink in).
#
# Additionally, we want to control which fields to include in the distance calculation. Specifically, we only want to include the first 4 attributes. One approach is to limit the euclidean distance to a fixed length, ignoring the final dimension.
#
# Putting all of this together we can define the `euclideanDistance` function as follows:
import numpy as np
def euclideanDistance(instance1, instance2):
distance = (instance1 - instance2) ** 2
# Check if either instance1 or instance2 is a matrix
if distance.shape[0] == distance.size:
return distance.sum() ** 0.5
else:
return distance.sum(axis=1) ** 0.5
data1 = np.array([2, 2])
data2 = np.array([4, 4])
distance = euclideanDistance(data1, data2)
print('Distance: ' + repr(distance))
# %matplotlib inline
import matplotlib.pyplot as plt
plt.scatter(data1[0], data1[1])
plt.scatter(data2[0], data2[1])
plt.plot([data1[0], data2[0]], [data1[1], data2[1]], '--r')
# ### Find Neighbors
#
# Now that we have a similarity measure, we can use it collect the k most similar instances for a given unseen instance.
#
# This is a straight forward process of calculating the distance for all instances and selecting a subset with the smallest distance values.
#
# Below is the `getNeighbors` function that returns k most similar neighbors from the training set for a given test instance (using the already defined `euclideanDistance` function)
trainSet = np.array([[2, 2], [4, 4], [7, 7], [4, 1], [3, 4], [5, 2]])
testInstance = np.array([5, 5])
dist = euclideanDistance(trainSet, testInstance)
dist
# Which are the closest two points
dist.argsort()[:2]
def getNeighbors(trainSet, testInstance, k):
dist = euclideanDistance(trainSet, testInstance)
neighbors = dist.argsort()[:k]
return neighbors
k = 1
neighbors = getNeighbors(trainSet, testInstance, k)
print(neighbors)
plt.scatter(trainSet[:, 0], trainSet[:, 1], s=50)
plt.scatter(testInstance[0], testInstance[1], c='green', s=100)
plt.plot([testInstance[0], trainSet[1, 0]], [testInstance[1], trainSet[1, 1]], '--r')
testInstance = np.array([3.4, 3])
k = 3
neighbors = getNeighbors(trainSet, testInstance, k)
print(neighbors)
plt.scatter(trainSet[:, 0], trainSet[:, 1], s=50)
plt.scatter(testInstance[0], testInstance[1], c='green', s=100)
for neighbor in neighbors:
plt.plot([testInstance[0], trainSet[neighbor, 0]], [testInstance[1], trainSet[neighbor, 1]], '--r')
# ### Response
#
# Once we have located the most similar neighbors for a test instance, the next task is to devise a predicted response based on those neighbors.
#
# We can do this by allowing each neighbor to vote for their class attribute, and take the majority vote as the prediction.
#
# Lets first define the label of each instance.
trainSet_y = np.array([0, 0, 1, 0, 1, 1])
plt.scatter(trainSet[trainSet_y==0, 0], trainSet[trainSet_y==0, 1], s=50)
plt.scatter(trainSet[trainSet_y==1, 0], trainSet[trainSet_y==1, 1], c='y', s=50)
# Below provides a function for getting the majority voted response from a number of neighbors. It assumes the class is the last attribute for each neighbor.
plt.scatter(trainSet[trainSet_y==0, 0], trainSet[trainSet_y==0, 1], s=50)
plt.scatter(trainSet[trainSet_y==1, 0], trainSet[trainSet_y==1, 1], c='y', s=50)
plt.scatter(testInstance[0], testInstance[1], c='green', s=100)
for neighbor in neighbors:
plt.plot([testInstance[0], trainSet[neighbor, 0]], [testInstance[1], trainSet[neighbor, 1]], '--r')
trainSet_y[neighbors]
from scipy.stats import itemfreq
freq = itemfreq(trainSet_y[neighbors])
freq
freq[:, 1].argmax()
freq[:, 0][freq[:, 1].argmax()]
# ### Estimate probaliy
#
# Similarly, the probabily can be estimated as the percentage of neighbor's voting for each class
freq[:, 1] / freq[:, 1].sum()
np.vstack((freq[:, 0], freq[:, 1] / freq[:, 1].sum())).T
# ### creating the response
def getResponse(trainSet_y, neighbors):
votes = trainSet_y[neighbors]
freq = itemfreq(votes)
return freq[:, 0][freq[:, 1].argmax()], np.vstack((freq[:, 0], freq[:, 1] / freq[:, 1].sum())).T
# We can test out this function with some test neighbors, as follows:
response = getResponse(trainSet_y, neighbors)
print(response)
# ### Classifier
#
# Lets put everything together
def knn_classifier_one(trainSet, trainSet_y, testInstance, k):
neighbors = getNeighbors(trainSet, testInstance, k)
pred_y, pred_prob = getResponse(trainSet_y, neighbors)
return pred_y, pred_prob, neighbors
testInstance = np.array([4.2, 4.1])
plt.scatter(trainSet[trainSet_y==0, 0], trainSet[trainSet_y==0, 1], s=50)
plt.scatter(trainSet[trainSet_y==1, 0], trainSet[trainSet_y==1, 1], c='y', s=50)
plt.scatter(testInstance[0], testInstance[1], c='green', s=100)
for k in range(2, 6):
print('k = ', k)
pred_y, pred_prob, neighbors = knn_classifier_one(trainSet, trainSet_y, testInstance, k)
print('pred_y = ', pred_y)
print('pred_prob = ', pred_prob)
plt.scatter(trainSet[trainSet_y==0, 0], trainSet[trainSet_y==0, 1], s=50)
plt.scatter(trainSet[trainSet_y==1, 0], trainSet[trainSet_y==1, 1], c='y', s=50)
plt.scatter(testInstance[0], testInstance[1], c='green', s=100)
for neighbor in neighbors:
plt.plot([testInstance[0], trainSet[neighbor, 0]], [testInstance[1], trainSet[neighbor, 1]], '--r')
plt.show()
# ### Allow more than one instances
testInstances = np.array([[4.2, 4.1], [1, 3], [6, 6]])
plt.scatter(trainSet[trainSet_y==0, 0], trainSet[trainSet_y==0, 1], s=50)
plt.scatter(trainSet[trainSet_y==1, 0], trainSet[trainSet_y==1, 1], c='y', s=50)
plt.scatter(testInstances[:,0], testInstances[:,1], c='green', s=100)
def knn_classifier(trainSet, trainSet_y, testInstances, k):
n_samples_test = testInstances.shape[0]
pred_y = np.zeros(n_samples_test)
y_unique = np.unique(trainSet_y)
pred_prob = np.zeros((n_samples_test, y_unique.shape[0]))
for i in range(n_samples_test):
neighbors = getNeighbors(trainSet, testInstances[i], k)
pred_y_, pred_prob_ = getResponse(trainSet_y, neighbors)
pred_y[i] = pred_y_
# pred_y may not include all values of y
for j in range(y_unique.shape[0]):
pred_prob[i, j] = pred_prob_[pred_prob_[:,0] == y_unique[j], 1].sum()
return pred_y, pred_prob
k = 3
knn_classifier(trainSet, trainSet_y, testInstances, k)
# ## Apply to Iris dataset
y = iris.species_num
X = iris[['sepal_length', 'sepal_width', 'petal_length', 'petal_width']]
from sklearn.cross_validation import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X.values, y.values, random_state=123)
y_pred, y_pred_prob = knn_classifier(X_train, y_train, X_test, k=5)
y_pred_prob[:5]
from sklearn.metrics import confusion_matrix
confusion_matrix(y_test, y_pred)
# ## Using Sklearn
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(n_neighbors=5)
knn.fit(X_train, y_train)
y_pred = knn.predict(X_test)
y_pred_prob = knn.predict_proba(X_test)
y_pred_prob[:5]
confusion_matrix(y_test, y_pred)
# ## Comparing KNN with other models
#
# **Advantages of KNN:**
#
# - Simple to understand and explain
# - Model training is fast
# - Can be used for classification and regression
#
# **Disadvantages of KNN:**
#
# - Must store all of the training data
# - Prediction phase can be slow when n is large
# - Sensitive to irrelevant features
# - Sensitive to the scale of the data
# - Accuracy is (generally) not competitive with the best supervised learning methods
| notebooks/10-KNN.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python (fastai_dev)
# language: python
# name: fastai_dev
# ---
# +
# default_exp core
# -
# export
import numpy as np
import json
from pathlib import Path
# hide
from nbdev.showdoc import show_doc
from nbdev.export import notebook2script
# # Core
# > Functions that implement basic functionality that will be used in the library.
# ## Util functions
# A set of functions that provide usefull functionality
# +
# export
def filter_files(files, include=[], exclude=[]):
"Filter list of files using a list of strings to inculde and/or exclude"
for incl in include:
files = [f for f in files if incl in f.name]
for excl in exclude:
files = [f for f in files if excl not in f.name]
return sorted(files)
def ls(x, recursive=False, include=[], exclude=[]):
"List files in folder, if recursive is True also list subfolders"
if not recursive:
out = list(x.iterdir())
else:
out = [o for o in x.glob('**/*')]
out = filter_files(out, include=include, exclude=exclude)
return sorted(out)
Path.ls = ls
def hdf_attr_check(attr, hdf, default):
"Check if attribute is in hdf_attr_dict and return default"
return default if not hasattr(hdf, attr) else hdf.__getattr__(attr)
def dict2json(data:dict, file):
"Writes json file from dict"
with open(file, 'w') as f:
f.write(json.dumps(data))
# -
# Examples:
path = Path('.')
path.ls()
path = Path('.')
path.ls(include=['.ipynb'])
path = Path('.')
path.ls(include=['.ipynb'], exclude=['_checkpoints'])
# export
def monthlen(year, month):
"Gives lenght of the month"
base = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
if (year % 4) == 0:
if (year % 100) == 0:
if (year % 400) == 0:
base[1] += 1
else:
base[1] += 1
return base[month-1]
year = 2000
month = 2
monthlen(year, month)
# export
class InOutPath():
"""Keeps track of an input and a output path. Creates paths if they don't exist and mkdir=True"""
def __init__(self, input_path:str, output_path:str, mkdir=True):
if isinstance(input_path, str): input_path = Path(input_path)
if isinstance(output_path, str): output_path = Path(output_path)
self.input_path = input_path
self.output_path = output_path
if mkdir: self.mkdirs()
@property
def src(self):
"Shortcut to input_path"
return self.input_path
@property
def dst(self):
"Shortcut to output_path"
return self.output_path
def mkdirs(self):
self.input_path.mkdir(exist_ok=True, parents=True)
self.output_path.mkdir(exist_ok=True, parents=True)
def __truediv__(self, s):
return InOutPath(self.src/s, self.dst/s)
def __repr__(self):
return '\n'.join([f'{i}: {o}' for i, o in self.__dict__.items()]) + '\n'
show_doc(InOutPath.src)
show_doc(InOutPath.dst)
# hide
notebook2script()
| nbs/00_core.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Leticia's Version
# Hi! :D
# # Problem Statement
# The Society of Professional Journalists hired The East-West Alliance (TEWA) as consultants to create a model that predicts and classifies articles as either fake or real based on the title and content of the news article. The goal aims to mitigate the spread of misinformation and promote accountability in the media. It would help out social media sites like Facebook and Twitter to filter out spam and/or fake stories that would make a culpable person believe them.
# # Imports
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer, TfidfTransformer
from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS
# -
# # Read Data
Fake_df = pd.read_csv('./Data/Fake.csv')
Fake_df.head()
Fake_df['target'] = 1
True_df = pd.read_csv('./Data/True.csv')
True_df.head()
True_df['target'] = 0
Fake_df.shape[0] + True_df.shape[0]
Full_df = pd.concat([Fake_df,True_df],)
Full_df.shape
Full_df.reset_index(inplace=True)
Full_df.drop(columns="index", inplace=True)
Full_df
Full_df["all_text"] = Full_df["title"] + " " + Full_df["text"]
Full_df
# # EDA
# #### Custom Stop Words
my_words=["said",'trump','reuters', 'hilary', "president", "united", "states", "state", "government", "getty images", "people", "told", "people", "percent", "featured news" ]
my_stop_words=ENGLISH_STOP_WORDS.union(my_words)
# ##### Unigram - Fake News Set
Fake_df_txt=Full_df[Full_df['target']==1]['all_text']
cvec = CountVectorizer(
min_df = 3,
max_features = 5000,
stop_words='english'
)
# Fit our vectorizier on our corpus
cvec.fit(Fake_df_txt)
# Transform the corpus
Fake_df_txt = cvec.transform(Fake_df_txt)
Fake_df_txt
#Sparse Matrix to DataFrame
Fake_df_txt = pd.DataFrame(Fake_df_txt.todense(),
columns=cvec.get_feature_names())
Fake_df_txt
# Plot of top occuring words across all documents - a lot of these are stop words - articles
plt.figure(figsize=(15,10))
Fake_df_txt.sum().sort_values(ascending=False).head(25).plot(kind='barh', color='red')
plt.title("Top 10 Unigrams in Fake News", c='black', horizontalalignment='center')
plt.ylabel("Top 10 Unigrams", size=10, c='black')
plt.xlabel("Frequency", c="black")
# plt.savefig('./images/z_10_bigrams_stopwords.png');
# ##### Unigram - True News Set
True_df_txt=Full_df[Full_df['target']==0]['all_text']
cvec1 = CountVectorizer(
min_df = 3,
max_features = 5000,
stop_words='english'
)
# Fit our vectorizier on our corpus
cvec1.fit(True_df_txt)
# Transform the corpus
True_df_txt = cvec1.transform(True_df_txt)
True_df_txt
#Sparse Matrix to DataFrame
True_df_txt = pd.DataFrame(True_df_txt.todense(),
columns=cvec1.get_feature_names())
True_df_txt
# Plot of top occuring words across all documents - a lot of these are stop words - articles
plt.figure(figsize=(15,10))
True_df_txt.sum().sort_values(ascending=False).head(25).plot(kind='barh', color='red')
plt.title("Top 10 Unigrams in True News", c='black', horizontalalignment='center')
plt.ylabel("Top 10 Unigrams", size=10, c='black')
plt.xlabel("Frequency", c="black")
# plt.savefig('./images/z_10_bigrams_stopwords.png');
# #### Unigram - Full Set
Full_df_txt=Full_df['all_text']
cvec2 = CountVectorizer(
min_df = 3,
max_features = 5000,
stop_words='english'
)
# Fit our vectorizier on our corpus
cvec2.fit(Full_df_txt)
# Transform the corpus
Full_df_txt = cvec2.transform(Full_df_txt)
Full_df_txt
#Sparse Matrix to DataFrame
Full_df_txt = pd.DataFrame(Full_df_txt.todense(),
columns=cvec2.get_feature_names())
Full_df_txt
# Plot of top occuring words across all documents - a lot of these are stop words - articles
plt.figure(figsize=(15,10))
True_df_txt.sum().sort_values(ascending=False).head(25).plot(kind='barh', color='red')
plt.title("Top 10 Unigrams All News", c='black', horizontalalignment='center')
plt.ylabel("Top 10 Unigrams", size=10, c='black')
plt.xlabel("Frequency", c="black")
# plt.savefig('./images/z_10_bigrams_stopwords.png');
# ##### Bi-gram - Fake News Set
Fake_df_txt=Full_df[Full_df['target']==1]['all_text']
cvec = CountVectorizer(
min_df = 3,
max_features = 5000,
stop_words='english'
ngram_range=(1,2)
)
# Fit our vectorizier on our corpus
cvec.fit(Fake_df_txt)
# Transform the corpus
Fake_df_txt = cvec.transform(Fake_df_txt)
Fake_df_txt
#Sparse Matrix to DataFrame
Fake_df_txt = pd.DataFrame(Fake_df_txt.todense(),
columns=cvec.get_feature_names())
Fake_df_txt
# Plot of top occuring words across all documents - a lot of these are stop words - articles
plt.figure(figsize=(15,10))
Fake_df_txt.sum().sort_values(ascending=False).head(25).plot(kind='barh', color='red')
plt.title("Top 10 Unigrams in Fake News", c='black', horizontalalignment='center')
plt.ylabel("Top 10 Unigrams", size=10, c='black')
plt.xlabel("Frequency", c="black")
# plt.savefig('./images/z_10_bigrams_stopwords.png');
# ##### Unigram - True News Set
True_df_txt=Full_df[Full_df['target']==0]['all_text']
cvec1 = CountVectorizer(
min_df = 3,
max_features = 5000,
stop_words='english'
)
# Fit our vectorizier on our corpus
cvec1.fit(True_df_txt)
# Transform the corpus
True_df_txt = cvec1.transform(True_df_txt)
True_df_txt
#Sparse Matrix to DataFrame
True_df_txt = pd.DataFrame(True_df_txt.todense(),
columns=cvec1.get_feature_names())
True_df_txt
# Plot of top occuring words across all documents - a lot of these are stop words - articles
plt.figure(figsize=(15,10))
True_df_txt.sum().sort_values(ascending=False).head(25).plot(kind='barh', color='red')
plt.title("Top 10 Unigrams in True News", c='black', horizontalalignment='center')
plt.ylabel("Top 10 Unigrams", size=10, c='black')
plt.xlabel("Frequency", c="black")
# plt.savefig('./images/z_10_bigrams_stopwords.png');
# #### Unigram - Full Set
Full_df_txt=Full_df['all_text']
cvec2 = CountVectorizer(
min_df = 3,
max_features = 5000,
stop_words='english'
)
# Fit our vectorizier on our corpus
cvec2.fit(Full_df_txt)
# Transform the corpus
Full_df_txt = cvec2.transform(Full_df_txt)
Full_df_txt
#Sparse Matrix to DataFrame
Full_df_txt = pd.DataFrame(Full_df_txt.todense(),
columns=cvec2.get_feature_names())
Full_df_txt
# Plot of top occuring words across all documents - a lot of these are stop words - articles
plt.figure(figsize=(15,10))
True_df_txt.sum().sort_values(ascending=False).head(25).plot(kind='barh', color='red')
plt.title("Top 10 Unigrams All News", c='black', horizontalalignment='center')
plt.ylabel("Top 10 Unigrams", size=10, c='black')
plt.xlabel("Frequency", c="black")
# plt.savefig('./images/z_10_bigrams_stopwords.png');
| FakeNewsLG.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="si6RJuIywdn7"
# %%capture
# !wget -O data.zip --no-check-certificate https://diskcitylink.pro/arj/vFkdaTX
# !unzip data.zip
# !pip install catboost
# + id="ARrKTEUXw-UB"
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from catboost import CatBoostClassifier
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.metrics import roc_auc_score
import matplotlib.pyplot as plt
from multiprocessing import cpu_count
from gensim.models.word2vec import Word2Vec
# + id="AQVHRJT9yrpk"
def date_to_data(frame, drop=False, name="event_date", suffix=""):
assert name in frame, "Нужна колонка с датой"
date = frame[name].dt
info = pd.concat([date.days_in_month,
date.month,
date.quarter,
date.dayofyear,
date.hour,
date.minute], axis=1)
info.columns = ["days_in_month", "month", "quarter",
"dayofyear", "hour", "minute"]
temp = pd.concat([date.isocalendar(), info], axis=1)
temp.columns = temp.columns + suffix
frame = pd.concat([frame, temp], axis=1)
if drop:
return frame.drop([name, "year" + suffix], axis=1)
return frame
# + id="S3C6GqRyxApR"
all_users = pd.read_csv("01_all_users.csv")
events_log = pd.read_csv("02_events_log.csv")
lk_events_log = pd.read_csv("03_lk_events_log.csv")
is_blocked = pd.read_csv("04_is_blocked.csv")
events_log["event_date"] = pd.to_datetime(events_log["event_date"])
events_log = events_log.sort_values(by=["event_date"]).reset_index(drop=True)
lk_events_log["event_date"] = pd.to_datetime(lk_events_log["event_date"])
lk_events_log = lk_events_log.sort_values(by="event_date").reset_index(drop=True)
events_log = date_to_data(events_log, drop=True, suffix="_events_log")
events_log["contract_id"] = events_log["contract_id"].astype(np.int32)
lk_events_log = date_to_data(lk_events_log, drop=True, suffix="_lk_events_log")
lk_events_log["contract_id"] = lk_events_log["contract_id"].astype(np.int32)
# + id="ObDmLHpR8npP"
# events_log = events_log.drop(["event_date"], axis=1)
# lk_events_log = lk_events_log.drop(["event_date"], axis=1)
# + id="nuKDL1K9xRl4"
df = all_users.merge(is_blocked, on=["contract_id"], how="left").dropna()
df["contract_id"] = df["contract_id"].astype(np.int32)
df["blocked"] = df["blocked"].astype(np.int32)
# + id="A3AU362ixS2V"
events_log_temp = events_log[["contract_id", "event_type"]].groupby("contract_id").agg(list)
events_log_event_type_dict = {j:i for i, j in enumerate(sorted(events_log["event_type"].unique()))}
events_log_temp["event_type_len"] = events_log_temp["event_type"].apply(lambda x: len(x))
events_log_temp["event_type_set"] = events_log_temp["event_type"].apply(lambda x: len(set(x)))
events_log_temp["event_type_diff"] = events_log_temp["event_type_len"] - events_log_temp["event_type_set"]
events_log_temp["event_type"] = events_log_temp["event_type"].apply(lambda x: " ".join(str(events_log_event_type_dict[i]) for i in x))
data = events_log_temp["event_type"].apply(lambda x: x.split()).to_list()
# Training the Word2Vec model
w2v_model = Word2Vec(data, min_count=0, workers=cpu_count(), size=100)
w2v_df_ev = [np.mean([w2v_model.wv[i] for i in j], axis=0) for j in data]
w2v_df_ev = pd.DataFrame(w2v_df_ev, columns=[f"{i}_ev" for i in range(len(w2v_df_ev[0]))])
vectorizer = CountVectorizer()
X = vectorizer.fit_transform(events_log_temp["event_type"])
events_log_tfidf = pd.DataFrame(X.toarray(), columns=vectorizer.get_feature_names_out())
events_log_tfidf.columns += "_events_log_tfidf"
events_log_temp = pd.concat([events_log_temp.reset_index(), events_log_tfidf, w2v_df_ev], axis=1)
events_log_temp = events_log_temp.set_index("contract_id")
events_log = events_log.merge(events_log_temp, on="contract_id", how="left")
events_log = events_log.drop(["event_type_x", "event_type_y"], axis=1)
events_log = events_log.drop_duplicates()
# + id="VKgjWbmm3syO"
lk_events_log_temp = lk_events_log[["contract_id", "name"]].groupby("contract_id").agg(list)
lk_events_log_event_type_dict = {j:i for i, j in enumerate(sorted(lk_events_log["name"].unique()))}
lk_events_log_temp["event_type_len"] = lk_events_log_temp["name"].apply(lambda x: len(x))
lk_events_log_temp["event_type_set"] = lk_events_log_temp["name"].apply(lambda x: len(set(x)))
lk_events_log_temp["event_type_diff"] = lk_events_log_temp["event_type_len"] - lk_events_log_temp["event_type_set"]
lk_events_log_temp["event_type"] = lk_events_log_temp["name"].apply(lambda x: " ".join(str(lk_events_log_event_type_dict[i]) for i in x))
data = lk_events_log_temp["event_type"].apply(lambda x: x.split()).to_list()
# Training the Word2Vec model
w2v_model = Word2Vec(data, min_count=0, workers=cpu_count(), size=100)
w2v_df_lk = [np.mean([w2v_model.wv[i] for i in j], axis=0) for j in data]
w2v_df_lk = pd.DataFrame(w2v_df_lk, columns=[f"{i}_ev" for i in range(len(w2v_df_lk[0]))])
vectorizer = CountVectorizer()
X = vectorizer.fit_transform(lk_events_log_temp["event_type"])
lk_events_log_tfidf = pd.DataFrame(X.toarray(), columns=vectorizer.get_feature_names_out())
lk_events_log_tfidf.columns += "_lk_events_log_tfidf"
lk_events_log_temp = pd.concat([lk_events_log_temp.reset_index(), lk_events_log_tfidf, w2v_df_lk], axis=1)
lk_events_log_temp = lk_events_log_temp.set_index("contract_id")
lk_events_log = lk_events_log.merge(lk_events_log_temp, on="contract_id", how="left")
lk_events_log = lk_events_log.drop(["name_x", "name_y", "event_type"], axis=1)
lk_events_log = lk_events_log.drop_duplicates()
# + id="Qbv2-fZoBIeK"
lk_events_log.columns = lk_events_log.columns + "_lk_events_log"
lk_events_log.columns = ["contract_id", *lk_events_log.columns[1:]]
# + id="zY-nM2JvBHhf"
events_log.columns = events_log.columns + "_events_log"
events_log.columns = ["contract_id", *events_log.columns[1:]]
# + id="q2z-Vnuc9_qT"
lk_events_log = lk_events_log.groupby("contract_id").agg(["mean", "median", "min", "max", "var", "std"])
lk_events_log.columns = ["_".join(i) for i in lk_events_log]
# + id="LaAiVJf9AQON"
events_log = events_log.groupby("contract_id").agg(["mean", "median", "min", "max", "var", "std"])
events_log.columns = ["_".join(i) for i in events_log]
# + id="pKcYN3cL5SrJ"
df = df.merge(lk_events_log, on="contract_id", how="left")\
.merge(events_log, on="contract_id", how="left")
# + id="Wa6o9iQDCJHi"
df[df.columns[3:]] = df[df.columns[3:]].astype(np.float32)
# + id="1pU9ovLNQPrq"
df = df.drop(["contract_id"], axis=1).drop_duplicates()
# + id="UE5Fc6vh7bm-"
X, y = df.drop(["blocked"], axis=1), df["blocked"]
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=0.01,
random_state=42,
shuffle=True)
# + id="4umc59tDB91S"
model = CatBoostClassifier(iterations=150,
random_state=42,
verbose=1,
learning_rate=0.1,
eval_metric="AUC",
task_type="CPU",
score_function="Cosine",
max_depth=3)
# + colab={"base_uri": "https://localhost:8080/"} id="V679scaiCCwA" outputId="80ed1f4a-f8a8-4bbf-c5a7-c6d22f5bfd4c"
model.fit(X_train, y_train, eval_set=(X_test, y_test))
# + colab={"base_uri": "https://localhost:8080/"} id="eN_83a0XGLD-" outputId="d441a547-69b7-4a52-a2c4-47237cc7eb8f"
sorted([i for i in zip(model.feature_importances_, model.feature_names_) if i[0] > 0], reverse=True)
# + colab={"base_uri": "https://localhost:8080/"} id="Fi_5ZG_DH_zr" outputId="90b0403b-c54d-4302-ef7f-87761d09ec0f"
len([i[1] for i in zip(model.feature_importances_, model.feature_names_) if i[0] > 0])
# + id="xFxdaPSQHjCg"
golden_features = [i[1] for i in zip(model.feature_importances_, model.feature_names_) if i[0] > 0]
# + id="wK0ygJHXI9Rp"
def bootstrap(y_true, y_pred, num, count, random_state=42):
y_true = y_true.reset_index(drop=True)
indexes = y_true.index
results = []
np.random.seed(random_state)
for _ in range(num):
indexes_slice = np.random.choice(indexes, count, replace=False)
results.append(roc_auc_score(y_true[indexes_slice],
y_pred[indexes_slice]))
return results
# + colab={"base_uri": "https://localhost:8080/"} id="g6tmUcoUezsU" outputId="565ef2ae-09f8-43ad-c548-d4cc33aa98b2"
len(y_test)
# + id="A6rp07VdKDh3"
result = bootstrap(y_test, model.predict_proba(X_test)[:, 1], 100, 30, random_state=43)
# + colab={"base_uri": "https://localhost:8080/", "height": 450} id="WN0C44kjTXK6" outputId="3f38886d-9647-483e-b940-6b0ff40d790e"
plt.hist(result, bins=30)
# + id="qUEGqdQJl826"
test = pd.read_csv("test.csv").drop(["blocked"], axis=1)
source = test["contract_id"].to_list()
test = test.merge(all_users, on="contract_id", how="left").drop_duplicates()
source2 = test["contract_id"].to_list()
# + id="3fM3dOlSmqyO"
test = test.merge(lk_events_log, on="contract_id", how="left")\
.merge(events_log, on="contract_id", how="left")\
.drop(["contract_id"], axis=1)
# + id="7hopMvePnXzP"
test = test.astype(np.float32)
# + id="tsW7WorbnUWX"
predictions = model.predict_proba(test)[:, 1]
# + id="tCs_XOLcouIP"
temp = dict(zip(source2, predictions))
# + id="4oVcsmqensO1"
result = pd.DataFrame({"contract_id": source,
"blocked": [temp[i] for i in source]})
# + id="22Bghv5cpYlx"
result.to_csv("sub.csv", index=False)
# + id="6P8AjFXvqJpI"
model.save_model("catboost.cbm")
| catboost.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
class NN:
def __init__(self):
self.neurons=[]
@staticmethod
def __init_params(neurons):
np.random.seed(100)
W={};
B={}
for i in range(1,len(neurons)):
W[i] = np.random.rand(neurons[i],neurons[i-1]) *0.1
B[i] = np.zeros((neurons[i],1),dtype=np.float64)
return W,B
def dense(self,*neurons):
for neuron in neurons:
self.neurons.append(neuron)
@staticmethod
def __sigmoid(x):
return(1/(1+np.exp(-x)))
@staticmethod
def __relu(x):
return(np.maximum(0,x))
@staticmethod
def __forward_propagation(x,w,b,g):
Z = np.dot(w,x) + b
return Z, g(Z)
@staticmethod
def __full_forward_propagation(X,W,B):
Z={};A={};A[0] = X
L = len(W)
for i in range(1,L):
Z[i],A[i] = NN.__forward_propagation(A[i-1],W[i],B[i],NN.__relu)
Z[L],A[L] = NN.__forward_propagation(A[L-1],W[L],B[L],NN.__sigmoid)
return Z,A
@staticmethod
def __dsigmoid(dA,Z):
g =NN.__sigmoid(Z)
return dA * g*(1-g)
@staticmethod
def __drelu(dA,z):
dz = np.array(dA,copy=True)
dz[z<=0]=0
return dz
@staticmethod
def __backward_propagation(dAf,W,B,Z,A,dg):
m=A.shape[1]
dZ=dg(dAf,Z)
dW=np.dot(dZ,A.T)/m
dB=np.sum(dZ,axis=1,keepdims=True)/m
dA0=np.dot(W.T,dZ)
return dW,dB,dA0
@staticmethod
def __full_backward_propagation(W,B,Z,A,Y):
dW ={}
dB ={}
dZ={}
dA={}
L = len(W)
dA[L] = - (np.divide(Y,A[L]) - np.divide((1-Y),(1-A[L])))
dW[L],dB[L],dA[L-1] = NN.__backward_propagation(dA[L],W[L],B[L],Z[L],A[L-1],NN.__dsigmoid)
for i in reversed(range(1,L)):
dW[i],dB[i],dA[i-1] = NN.__backward_propagation(dA[i],W[i],B[i],Z[i],A[i-1],NN.__drelu)
return dW,dB
@staticmethod
def __update_params(W,B,dW,dB,lr):
L = len(W)
for i in range(1,L+1):
W[i]-=dW[i]*lr
B[i]-=dB[i]*lr
return W,B
@staticmethod
def __cost_function(AL,y):
m=AL.shape[1]
cost = (-1/m) * np.sum(np.multiply(y,np.log(AL)) + np.multiply((1-y),np.log(1-AL)))
# Make sure cost is a scalar
cost = np.squeeze(cost)
return cost
@staticmethod
def __convert_prob_into_class(AL):
pred = np.copy(AL)
pred[AL > 0.5] = 1
pred[AL <= 0.5] = 0
return pred
@staticmethod
def __get_accuracy(AL, Y):
pred = NN.__convert_prob_into_class(AL)
return (pred == Y).all(axis=0).mean()
def train(self,X,y,n_epochs=1,lr=1e-3,verbose=1):
epochs={}
neurons = self.neurons
#initialize params
W,B = NN.__init_params(neurons)
L=len(W)
for i in range(1,n_epochs+1):
#forward
Z,A = NN.__full_forward_propagation(X,W,B)
#calculate cost and accuracy
cost = NN.__cost_function(A[L],y)
accuracy =NN.__get_accuracy(A[L],y)
epochs[i]={"cost":cost,"accuracy":accuracy}
if verbose==1 :
print(f'epoch:{i}/{n_epochs}',end='\r')
#backward
dW,dB = NN.__full_backward_propagation(W,B,Z,A,y)
#Update
W,B = NN.__update_params(W,B,dW,dB,lr)
self.__W=W
self.__B=B
self.epochs=epochs
# +
import sklearn
from sklearn import datasets
N_SAMPLES = 1000
X, y = sklearn.datasets.make_moons(n_samples = N_SAMPLES, noise=0.2, random_state=100)
import matplotlib.pyplot as plt
plt.scatter(X[:,0],X[:,1])
# -
nn=NN()
nn.dense(2,25,100,100,10,1)
nn.train(X.T, y, 101,lr=0.01,verbose=1)
nn.epochs
| Neural network from scratch.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernel_info:
# name: python3
# kernelspec:
# argv:
# - C:\Users\<NAME>\Anaconda3\envs\py35\python.exe
# - -m
# - ipykernel_launcher
# - -f
# - '{connection_file}'
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] nteract={"transient": {"deleting": false}}
# # Stock Sterling Ratio Chart
# + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}}
# Library
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings("ignore")
from pandas_datareader import data as pdr
import yfinance as yf
yf.pdr_override()
# + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}}
start = '2019-01-01' #input
end = '2020-07-01' #input
symbol1 = '^GSPC' #input
symbol2 = 'AMD' #input
# + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}}
market = yf.download(symbol1, start=start, end=end)['Adj Close']
stocks = yf.download(symbol2, start=start, end=end)['Adj Close']
# + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}}
market_returns = market.pct_change().dropna()
stocks_returns = stocks.pct_change().dropna()
# + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}}
# risk free
rf = yf.download('BIL', start=start, end=end)['Adj Close'].pct_change()[1:]
# + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}}
def sterling_ratio(stocks_returns, market_returns):
mrk_rate_ret = (market_returns[-1] - market_returns[0])/ market_returns[0]
m = np.matrix([stocks_returns, market_returns])
beta = np.cov(m)[0][1] / np.std(market_returns)
er = rf + beta*(mrk_rate_ret-rf)
average_dd = 1.0 - (stocks_returns / np.maximum.accumulate(stocks_returns)).mean()
sterling_r = (er - rf) / average_dd
return sterling_r
# + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}}
# Compute the running Sterling Ratio
running = [sterling_ratio(stocks_returns[i-90:i], market_returns[i-90:i]) for i in range(90, len(stocks_returns))]
# Plot running Sterling Ratio up to 100 days before the end of the data set
_, ax1 = plt.subplots(figsize=(12,8))
ax1.plot(range(90, len(stocks_returns)-100), running[:-100])
ticks = ax1.get_xticks()
ax1.set_xticklabels([stocks.index[int(i)].date() for i in ticks[:-1]]) # Label x-axis with dates
plt.title(symbol1 + ' Sterling Ratio')
plt.xlabel('Date')
plt.ylabel('Sterling Ratio')
# + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}}
SR = sterling_ratio(stocks_returns, market_returns)
SR
# + jupyter={"source_hidden": false, "outputs_hidden": false} nteract={"transient": {"deleting": false}}
SR.plot(figsize=(12,8), title = symbol1 + ' Sterling Ratio')
plt.axhline(y=SR.mean(), color='r', linestyle='-')
plt.xlabel('Date')
plt.ylabel('Sterling Ratio')
| Python_Stock/Stock_Measurement_Ratio_Chart/Stock_Sterling_Ratio_Chart.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: keras-gpu
# language: python
# name: keras-gpu
# ---
# ## 10.2 정책 순환법을 이용하는 강화학습의 사례
# 정책 순환법에 대한 실습으로 얼어붙은 호수Frozen Lake를 예제로 다룹니다.
#
# ### 10.2.1 Gym을 이용한 강화학습 환경 구성하기
import gym
env = gym.make("MountainCar-v0")
k = 0
new_s = env.reset()
env.render()
print(new_s)
new_s = env.reset()
r_total = 0
for i in range(200):
a_k = env.action_space.sample()
s, r, done, info = env.step(a_k)
env.render()
r_total += r
if not ((i + 1) % 10):
print(i, s, r, done, info, r_total)
if done:
print(i, s, r, done, info, r_total)
break
not 0
# ### 10.2.2 무작위 행동에 따른 상태, 보상, 그리고 종료 여부 관찰하기
# +
import pandas as pd
def run(N_Iter = 100, render_flag=False):
"""
Return buff_df if done, otherwise return None
"""
new_s = flake.reset()
if render_flag: flake.render()
buff_df = pd.DataFrame({"S":[new_s],"S:(x,y)":[(0,0)],
"R":[0.0], "done":[False],
"A":[0], "A:name": [""]})
buff_df.index.name = 'k'
Actions = ["Left", "Down", "Right", "Up"]
for iter in range(N_Iter):
a_k = flake.action_space.sample()
buff_df.loc[iter,'A':"A:name"] = (a_k, Actions[a_k])
s, r, done, info = flake.step(a_k)
if render_flag: flake.render()
new_df = pd.DataFrame({"S":[s], "S:(x,y)":[(s%4,s//4)],
"R":[r], "done":[done],
"A":[0], "A:name": [""]})
buff_df = buff_df.append(new_df, ignore_index=True)
buff_df.index.name = 'k'
if done:
return buff_df
return None
run(10)
# -
# ### 10.2.3 반환값 구하기
# - 현재 결과에 대해 감가상각을 고려한 미래 보상들을 합친 반환값 G[k]를 구해봅니다.
import numpy as np
def calc_g(r, factor = 0.9):
g_prev = 0
g = np.copy(r[1:])
g = np.append(g, 0.0) # g[-1] is fixed to 0.0
for rev_k in range(len(g)-2,-1,-1):
g[rev_k] += factor * g_prev
g_prev = g[rev_k]
return g
# - 이제 구한 반환값을 DataFrame에 추가해 봅니다.
# +
def get_g(N_Iter=10):
buff_df = run(N_Iter)
if buff_df is not None:
r = buff_df.R.values
buff_df['G'] = calc_g(r)
else:
print('Try more iterations for each run')
return None
return buff_df
get_g()
# -
# ### 10.2.4 가치함수 구하기
# +
def get_g_many(N_Epochs=5, N_Iter=50):
gbuff_df = None
for epoch in range(N_Epochs):
buff_df = get_g(N_Iter)
if buff_df is not None:
if epoch == 0:
gbuff_df = buff_df
else:
gbuff_df = gbuff_df.append(buff_df)
return gbuff_df
get_g_many()
# +
gbuff_df = get_g_many(100)
V = np.zeros(flake.observation_space.n)
# N_V[S]: no of G values to calculate V[S]
N_V = np.zeros(flake.observation_space.n)
for s in range(flake.observation_space.n):
Gs_all = gbuff_df.G[gbuff_df.S==s].values
if len(Gs_all) > 0:
V[s] = np.average(Gs_all)
N_V[s] = len(Gs_all)
V_df = pd.DataFrame({"V": V, "No of Gs": N_V})
V_df.index.name = 's'
V_df
# -
# ### 10.2.5 행동가치함수 구하기
# +
gbuff_df = get_g_many(100)
Q = np.zeros((flake.observation_space.n, flake.action_space.n))
# N_Q[s,a]: no of G values to calculate Q[s,a]
N_Q = np.zeros((flake.observation_space.n, flake.action_space.n))
S_list = []
A_list = []
for s in range(flake.observation_space.n):
for a in range(flake.action_space.n):
Gs_all = gbuff_df.G[(gbuff_df.S==s) & (gbuff_df.A==a)].values
if len(Gs_all) > 0:
Q[s,a] = np.average(Gs_all)
N_Q[s,a] = len(Gs_all)
S_list.append(s)
A_list.append(a)
SA_df = pd.DataFrame({"S": S_list, "A": A_list})
Q_df = pd.DataFrame({"Q": Q.reshape(-1), "No of Gs": N_Q.reshape(-1)},
index=pd.MultiIndex.from_frame(SA_df))
Q_df
# -
# ### 10.2.6 새로운 정책 구하기
PI = np.argmax(Q,axis=1)
PI
# ### 10.2.7 새로운 정책 사용하기
# +
def run_with_PI(PI=None, N_Iter = 100, render_flag=False):
"""
Return buff_df if done, otherwise return None
"""
s = flake.reset()
if render_flag: flake.render()
buff_df = pd.DataFrame({"S":[s],"S:(x,y)":[(0,0)],
"R":[0.0], "done":[False],
"A":[0], "A:name": [""]})
buff_df.index.name = 'k'
Actions = ["Left", "Down", "Right", "Up"]
for iter in range(N_Iter):
if PI is not None:
a_k = PI[s]
else:
a_k = flake.action_space.sample()
buff_df.loc[iter,'A':"A:name"] = (a_k, Actions[a_k])
s, r, done, info = flake.step(a_k)
if render_flag: flake.render()
new_df = pd.DataFrame({"S":[s], "S:(x,y)":[(s%4,s//4)],
"R":[r], "done":[done],
"A":[0], "A:name": [""]})
buff_df = buff_df.append(new_df, ignore_index=True)
buff_df.index.name = 'k'
if done:
return buff_df
return None
run_with_PI(PI=PI, render_flag=True)
# -
# ---
# ### 10.2.8 전체 코드
# +
# File: ex10_1_rl_policy_iter.py
# 1. Gym을 이용한 강화학습 환경 구성하기
import gym
flake = gym.make("FrozenLake-v1", is_slippery=False)
new_s = flake.reset()
flake.render()
for _ in range(3):
a_k = flake.action_space.sample()
s, r, done, info = flake.step(a_k)
flake.render()
if done:
break
# 2. 무작위 행동에 따른 상태, 보상, 그리고 종료 여부 관찰하기
import pandas as pd
def run(N_Iter = 100, render_flag=False):
"""
Return buff_df if done, otherwise return None
"""
new_s = flake.reset()
if render_flag: flake.render()
buff_df = pd.DataFrame({"S":[new_s],"S:(x,y)":[(0,0)],
"R":[0.0], "done":[False],
"A":[0], "A:name": [""]})
buff_df.index.name = 'k'
Actions = ["Left", "Down", "Right", "Up"]
for iter in range(N_Iter):
a_k = flake.action_space.sample()
buff_df.loc[iter,'A':"A:name"] = (a_k, Actions[a_k])
s, r, done, info = flake.step(a_k)
if render_flag: flake.render()
new_df = pd.DataFrame({"S":[s], "S:(x,y)":[(s%4,s//4)],
"R":[r], "done":[done],
"A":[0], "A:name": [""]})
buff_df = buff_df.append(new_df, ignore_index=True)
buff_df.index.name = 'k'
if done:
return buff_df
return None
run(10)
# 3. 반환값 구하기
import numpy as np
def calc_g(r, factor = 0.9):
g_prev = 0
g = np.copy(r[1:])
g = np.append(g, 0.0) # g[-1] is fixed to 0.0
for rev_k in range(len(g)-2,-1,-1):
g[rev_k] += factor * g_prev
g_prev = g[rev_k]
return g
buff_df = run(100)
calc_g(buff_df.R.values)
def get_g(N_Iter=50):
buff_df = run(N_Iter)
if buff_df is not None:
r = buff_df.R.values
buff_df['G'] = calc_g(r)
else:
print('Try more iterations for each run')
return None
return buff_df
get_g()
# 4. 가치함수 구하기
def get_g_many(N_Epochs=5, N_Iter=50):
gbuff_df = None
for epoch in range(N_Epochs):
buff_df = get_g(N_Iter)
if buff_df is not None:
if epoch == 0:
gbuff_df = buff_df
else:
gbuff_df = gbuff_df.append(buff_df)
return gbuff_df
get_g_many()
gbuff_df = get_g_many(100)
V = np.zeros(flake.observation_space.n)
# N_V[S]: no of G values to calculate V[S]
N_V = np.zeros(flake.observation_space.n)
for s in range(flake.observation_space.n):
Gs_all = gbuff_df.G[gbuff_df.S==s].values
if len(Gs_all) > 0:
V[s] = np.average(Gs_all)
N_V[s] = len(Gs_all)
V_df = pd.DataFrame({"V": V, "No of Gs": N_V})
V_df.index.name = 's'
V_df
# 5. 행동가치함수 구하기
gbuff_df = get_g_many(100)
Q = np.zeros((flake.observation_space.n, flake.action_space.n))
# N_Q[s,a]: no of G values to calculate Q[s,a]
N_Q = np.zeros((flake.observation_space.n, flake.action_space.n))
S_list = []
A_list = []
for s in range(flake.observation_space.n):
for a in range(flake.action_space.n):
Gs_all = gbuff_df.G[(gbuff_df.S==s) & (gbuff_df.A==a)].values
if len(Gs_all) > 0:
Q[s,a] = np.average(Gs_all)
N_Q[s,a] = len(Gs_all)
S_list.append(s)
A_list.append(a)
SA_df = pd.DataFrame({"S": S_list, "A": A_list})
Q_df = pd.DataFrame({"Q": Q.reshape(-1), "No of Gs": N_Q.reshape(-1)},
index=pd.MultiIndex.from_frame(SA_df))
Q_df
# 6. 새로운 정책 구하기
PI = np.argmax(Q,axis=1)
PI.reshape(4,4)
# 7. 새로운 정책 사용하기
def run_with_PI(PI=None, N_Iter = 100, render_flag=False):
"""
Return buff_df if done, otherwise return None
"""
s = flake.reset()
if render_flag: flake.render()
buff_df = pd.DataFrame({"S":[s],"S:(x,y)":[(0,0)],
"R":[0.0], "done":[False],
"A":[0], "A:name": [""]})
buff_df.index.name = 'k'
Actions = ["Left", "Down", "Right", "Up"]
for iter in range(N_Iter):
if PI is not None:
a_k = PI[s]
else:
a_k = flake.action_space.sample()
buff_df.loc[iter,'A':"A:name"] = (a_k, Actions[a_k])
s, r, done, info = flake.step(a_k)
if render_flag: flake.render()
new_df = pd.DataFrame({"S":[s], "S:(x,y)":[(s%4,s//4)],
"R":[r], "done":[done],
"A":[0], "A:name": [""]})
buff_df = buff_df.append(new_df, ignore_index=True)
buff_df.index.name = 'k'
if done:
return buff_df
return None
run_with_PI(PI=PI, N_Iter=1, render_flag=True)
| all_repository/nb_ex10_1_rl_policy_iter-mountaincar.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # 2022/01/02/SUN
# > ### ***`Class 고급`***
# `-` 오브젝트
# - 클래스 오브젝트
# - 인스턴스 오브젝트
# `-` 클래스 (=클래스 오브젝트)
# `-` 인스턴스 (=인스턴스 오브젝트)
# ### ***클래스 속성 vs 인스턴스 속성***
# ### 예제1
class Testclass1:
x=0
y=0
def my_print(self):
self.x += 1
Testclass1.y +=1
print("현재 인스턴스에서 %s 회 출력" % self.x)
print("전체 인스턴스에서 총 %s 회 출력" % self.y)
f=Testclass1
a=Testclass1()
b=f()
a.my_print()
b.my_print()
b.my_print()
a.my_print()
a.my_print()
# `-` 신기한점: 각 인스턴스에서 instance.my_print()를 실행한 횟수를 서로 공유하는 듯 하다.
# ### ***`분석`***
# `-` 코드를 시점별로 분석해보자.
# `-` 분석을 위해서 커널을 재시작한다.
# #### `[시점1]` : Testclass1를 선언하는 시점
class Testclass1:
x=0
y=0
def my_print(self):
self.x += 1
Testclass1.y +=1
print("현재 인스턴스에서 %s 회 출력" % self.x)
print("전체 인스턴스에서 총 %s 회 출력" % self.y)
dir(Testclass1)
# +
# dir(a)
# dir(b)
# 이 둘은 아직 존재 X
# -
# `–` 이 시점에는 Testclass1만이 존재한다. Testclass1를 바로 클래스 오브젝트라고 부름.
Testclass1.x
Testclass1.y
# `–` 현재시점에서는 클래스 오브젝트의 수 1개, 인스턴스 오브젝트의 수 0개, 따라서 총 오브젝트 수는 1개임.
# #### `[시점2]` 클래스에 별칭을 지정하는 시점
f=Testclass1
f.x
f.y
Testclass1.x
Testclass1.y
# `–` 이 시점에서 클래스 오브젝트는 2개가 있는 것 처럼 보인다.
# `-` 그렇다면 이 2개의 클래스 오브젝트는 컴퓨터의 어딘가에 저장이 되어 있을 것이다.
# `-` 구체적으로는 메모리에 저장되어있을것.
# `-` 2개의 클래스오브젝트는 서로 다른 메모리 공간에 저장되어 있을것이다.
# `-` 진짜인가? 확인해보자. `id()`는 오브젝트(클래스 오브젝트, 인스턴스 오브젝트)가 저장된 메모리 주소를 확인하는 명령어이다.
id(f)
# `–` f라는 오브젝트는 93967322676384 메모리에 저장되어 있다.
id(Testclass1)
# `-` 어? 그런데 Testclass1의 오브젝트 역시 93967322676384 메모리에 저장되어 있다.
# `-` 추론: 사실 93967322676384라는 메모리공간에 저장된 어떠한 것은 동일한데, 그것을 어떤사람은 `Testclass1` 이라고 부르고 어떤사람은 `f`라고 부른다.
# `-` 이는 마치 별명이랑 비슷하다. 나라는 오브젝트를 어떤사람은 `최규빈`이라고 부르고, 어떤사람은 `팬더`라고 부른다. 부르는 이름이 2개라고 해서 나라는 오브젝트가 2개가 있는것은 아니다.
# `-` 결국 이 시점에서 클래스 오브젝트의 수는 여전히 1개라고 볼 수 있다. (인스턴스 오브젝트의 수는 0개)
# #### `[시점3]` : 클래스 오브젝트로부터 인스턴스 오브젝트를 만드는 시점
a=Testclass1() # 인스턴스 object 만듦
b=f() # 인스턴스 object 만듦
id(Testclass1),id(f),id(a),id(b)
# `–` 이 순간에는 클래스 오브젝트 1개, 인스턴스 오브젝트 2개 존재한다. 즉 총 3개의 오브젝트가 존재한다.
# `-` 메모리주소 93967322676384 에 존재하는 오브젝트는 클래스 오브젝트이며 Testclass1 또는 f 라고 불린다.
# `-` 메모리주소 139694857660688 에 존재하는 오브젝트는 인스턴스 오브젝트이며 a라고 불린다.
# `-` 메모리주소 139694848860656 에 존재하는 오브젝트는 인스턴스 오브젝트이며 b라고 불린다.
# ---
Testclass1.x, Testclass1.y
f.x,f.y
# ---
a.x,a.y
b.x,b.y
# #### `[시점4]`
a.my_print()
(f.x,f.y),(a.x,a.y),(b.x,b.y)
# `-` 특징
# - a.my_print()를 실행하면 a.x 의 값이 1이 증가한다.
# - a.my_print()를 실행하면 f.y, a.y, b.y 의 값이 동시에 1이 증가한다. (공유가 되는 느낌)
# #### `[시점5]`
b.my_print()
(f.x,f.y),(a.x,a.y),(b.x,b.y)
# #### `[시점6]`
b.my_print()
(f.x,f.y),(a.x,a.y),(b.x,b.y)
# #### `[시점7]`
a.my_print()
(f.x,f.y),(a.x,a.y),(b.x,b.y)
# #### `[시점8]`
a.my_print()
(f.x,f.y),(a.x,a.y),(b.x,b.y)
# ### 예제2
# `-` 아래처럼 코드를 바꿔도 잘 동작할것 같다.
class Testclass2:
def __init__(self): # 클래스가 생성되는 시점에서는 실행되지 않고, 인스턴스가 생성되는 시점에서 실행될 것
self.x=0
self.y=0
def my_print(self):
self.x += 1
Testclass2.y +=1
print("현재 인스턴스에서 %s 회 출력" % self.x)
print("전체 인스턴스에서 총 %s 회 출력" % self.y)
c=Testclass2()
# 인스턴스 object 생성
# +
# c.my_print()
# -
# `–` 왜 에러가 나는가?
dir(Testclass2)
dir(Testclass1)
# `-` 관찰1: Testclass2에서는 Testclass1과는 다르게 x,y가 없다.
dir(c)
# `–` 관찰2: 그런데 `c`라는 인스턴스 오브젝트에서는 x,y가 있다.
# `-` 추론: `__init__`함수는 클래스 오브젝트가 만들어지는 시점에서는 실행되지 않고, 인스텐스 오브젝트가 만들어지는 시점에 실행된다.
# `-` 결국 `__init__` 함수의 역할은 클래스 오브젝트에서 인스턴스 오브젝트를 만든후에 초기화를 위해서 실행하는 어떠한 일련의 명령들을 묶어놓은 것에 불과하다.
# `–` 즉 위의 코드는 굳이 따지면 아래를 실행한 것과 동일하다.
class Testclass2:
# def __init__(self):
# self.x=0
# self.y=0
def my_print(self):
self.x += 1
Testclass2.y +=1
print("현재 인스턴스에서 %s 회 출력" % self.x)
print("전체 인스턴스에서 총 %s 회 출력" % self.y)
c=Testclass2()
c.x=0
c.y=0
# `-` 이 상황에서
# ```python
# c.my_print()
# ```
# 를 실행하면
# ```python
# c.x += 1
# Testclass2.y +=1
# print("현재 인스턴스에서 %s 회 출력" % c.x)
# print("전체 인스턴스에서 총 %s 회 출력" % c.y)
# ```
# 이 실행되는데, 이때 Testclass2.y 이 정의되어 있지 않으므로
# ```python
# Testclass2.y +=1
# ```
# 에서 에러가 난다.
# ### 예제 3
# `-` 그렇다면 아래와 같이 수정하면 어떨까?
class Testclass3:
def __init__(self):
self.x=0
Testclass3.y=0
def my_print(self):
self.x += 1
Testclass3.y +=1
print("현재 인스턴스에서 %s 회 출력" % self.x)
print("전체 인스턴스에서 총 %s 회 출력" % self.y)
# +
# class Testclass2:
# def __init__(self): # 클래스가 생성되는 시점에서는 실행되지 않고, 인스턴스가 생성되는 시점에서 실행될 것
# self.x=0
# self.y=0
# def my_print(self):
# self.x += 1
# Testclass2.y +=1
# print("현재 인스턴스에서 %s 회 출력" % self.x)
# print("전체 인스턴스에서 총 %s 회 출력" % self.y)
# -
a=Testclass3()
b=Testclass3()
a.my_print()
b.my_print()
a.my_print()
a.my_print()
b.my_print()
b.my_print()
# `–` Testclass1과 동일한 기능이 수행되는것 같다.
# `-` 그런데 조금만 생각해보면 엉터리라는 것을 알 수 있다. 아래의 코드를 관찰하여보자.
# +
class Testclass3:
def __init__(self):
self.x=0
Testclass3.y=0
def my_print(self):
self.x += 1
Testclass3.y +=1
print("현재 인스턴스에서 %s 회 출력" % self.x)
print("전체 인스턴스에서 총 %s 회 출력" % self.y)
a=Testclass3()
a.my_print()
a.my_print()
b=Testclass3() # 초기화
b.my_print()
# -
# `-` Testclass3는 인스턴스를 생성할때마다 `y=0`이 설정된다. 그래서
# ```python
# b=Testclass3()
# ```
# 이 시점에서 의도하지 않게 '전체 인스턴스에서 출력된 횟수'를 의미하는 `y`가 초기화되었다.
# `-` 코드는 엉터리이지만, Testclass3은 의외로 분석할만한 가치가 있다. 특히 위의 실행결과를 시점별로 Testclass1과 비교해보면 재미있다.
# `-` Testclass1
# ```python
# ### Testclass1
# ## 시점1: 클래스 오브젝트 생성
# class Testclass1:
# x=0
# y=0
# def my_print(self):
# self.x += 1
# Testclass1.y +=1
# print("현재 인스턴스에서 %s 회 출력" % self.x)
# print("전체 인스턴스에서 총 %s 회 출력" % self.y)
# ## 시점2: 인스턴스 오브젝트 a를 생성
# a=Testclass1()
# ## 시점3: a에서 메소드 실행
# a.my_print()
# ## 시점4: a에서 메소드를 한번 더 실행
# a.my_print()
# ## 시점5: 인스턴스 오브젝트 b를 생성
# b=Testclass1()
# ## 시점6: b에서 메소드를 실행
# b.my_print()
# ```
# ```
# 현재 인스턴스에서 1 회 출력
# 전체 인스턴스에서 총 1 회 출력
# 현재 인스턴스에서 2 회 출력
# 전체 인스턴스에서 총 2 회 출력
# 현재 인스턴스에서 1 회 출력
# 전체 인스턴스에서 총 3 회 출력
# ```
# | |시점1 |시점2 |시점3 | 시점4 | 시점5| 시점6|
# |:-:|:-:|:-:|:-:|:-:|:-:|:-:|
# |Testclass1.x|0| 0| 0| 0| 0| 0|
# |Testclass1.y|0| 0| 1| 2| 2 | 3|
# |a.x| 값없음 | 0| 1| 2| 2| 2|
# |a.y| 값없음 | 0| 1| 2| 2| 3|
# |b.x| 값없음| 값없음| 값없음| 값없음| 0| 1|
# |b.y| 값없음| 값없음| 값없음| 값없음| 2| 3|
# `–` Testclass3
# ```python
# #### Testclass3
# ## 시점1: 클래스 오브젝트 생성
# class Testclass3:
# def __init__(self):
# self.x=0
# Testclass3.y=0
# def my_print(self):
# self.x += 1
# Testclass3.y +=1
# print("현재 인스턴스에서 %s 회 출력" % self.x)
# print("전체 인스턴스에서 총 %s 회 출력" % self.y)
# ## 시점2: 인스턴스 오브젝트 a를 생성
# a=Testclass3()
# ## 시점3: a에서 메소드 실행
# a.my_print()
# ## 시점4: a에서 메소드를 한번 더 실행
# a.my_print()
# ## 시점5: 인스턴스 오브젝트 b를 생성
# b=Testclass3()
# ## 시점6: b에서 메소드를 실행
# b.my_print()
# ```
# ```
# 현재 인스턴스에서 1 회 출력
# 전체 인스턴스에서 총 1 회 출력
# 현재 인스턴스에서 2 회 출력
# 전체 인스턴스에서 총 2 회 출력
# 현재 인스턴스에서 1 회 출력
# 전체 인스턴스에서 총 1 회 출력
# ```
# | |시점1 |시점2 |시점3 | 시점4 | 시점5| 시점6|
# |:-:|:-:|:-:|:-:|:-:|:-:|:-:|
# |Testclass3.x|값없음| 값없음| 값없음| 값없음| 값없음| 값없음|
# |Testclass3.y|값없음| 0| 1| 2| 0 | 1|
# |a.x| 값없음 | 0| 1| 2| 2| 2|
# |a.y| 값없음 | 0| 1| 2| 0| 1|
# |b.x| 값없음| 값없음| 값없음| 값없음| 0| 1|
# |b.y| 값없음| 값없음| 값없음| 값없음| 0| 1|
# `–` Testclass3.y가 업데이트 되면 a.y, b.y도 자동으로 업데이트 된다.
# > ### ***`네임스페이스`***
# ### 예제1
class Testclass1:
x=0
Testclass1.x
a=Testclass1()
# 인스턴스
a.x
# `–` Testclass1.x를 수정하면 a.x가 강제로 수정된다.
Testclass1.x=100
a.x
# `-` a.x를 수정한다고 하여 Testclass1.x가 강제로 수정되는 것은 아님
a.x=200
Testclass1.x
a.x
# `-` 이건 왜이러지?
Testclass1.x=300
# + tags=[]
a.x
# -
# `-` 아래의 상황과 비슷하다.
## [code1]
x=39
def nextyear():
y=x+1
print(x,y)
nextyear()
x
# +
## [code2]
# x=39
# def nextyear():
# y=x+1
# print(x,y)
# x=0
# nextyear()
# -
# `-` [code2]와 [code1]의 차이점은 x=0이라는 코드가 추가로 포함되었는지 유무다.
# `-` code1에서는 x는 global variable, code2에서는 x가 local variable 이라서 생기는 문제점이다.
## [code2]
x=39
def nextyear():
x=0
y=x+1
print(x,y)
nextyear()
x
# `–` 다시 우리의 예제로 돌아오자.
# ```python
# ### 시점1
# class Testclass1:
# x=0
# ### 시점2
# a=Testclass1()
# ### 시점3
# Testclass1.x=100
# ### 시점4
# a.x=200 `이 순간 a.x의 속성이 instance로 변함`
# ### 시점5
# Testclass1.x=300
# ```
# | |시점1 |시점2 |시점3 | 시점4 | 시점5|
# |:-:|:-:|:-:|:-:|:-:|:-:|
# |Testclass1.x|0| 0| 100| 100| 300|
# |a.x| 값없음 | 0| 100| 200| 200|
# |a.x의 속성| - | class| class| instance | instance|
# `–` a.x가 클래스로부터 물려받은 속성인지 (그래서 클래스와 연결되어있는지) 아니면 instance가 독자적으로 가지고 있는 속성인지 어떻게 알 수 있을까?
### 시점1
class Testclass1:
x=0
print('시점1',Testclass1.x)
### 시점2
a=Testclass1()
print('시점2',Testclass1.x,a.x,a.__dict__)
### 시점3
Testclass1.x=100
print('시점3',Testclass1.x,a.x,a.__dict__)
### 시점4
a.x=200
print('시점4',Testclass1.x,a.x,a.__dict__)
# 이젠 독자성을 지님
### 시점5
Testclass1.x=300
print('시점5',Testclass1.x,a.x,a.__dict__)
# 참고 : _dict__ 용도? 클래스 객체의 속성 정보를 확인하기 위해 사용. 객체가 가진 여러가지 속성들을 딕셔너리 형태로 편하게 확인할 수 있다.
# ### 예제2
# +
x=11 ## 전역변수 ... A
def f():
x=22 ## 함수 f안에 설정된 지역변수
print(x) ## 전역에 x=11 있지만 함수안에 x=22가 있으므로 x=22를 사용. --> 22출력됨
def g():
print(x) ## 함수 g안에 x를 찾아봤는데 없음 --> 전역에서 x를 찾음 --> x=11 --> 11출력함.
class Testclass2:
x=33 ## 클래스 변수 ... B
def m1(self):
x=44 ## 메소드 변수 ... C
def m2(self):
self.x=44 ## 인스턴스 변수 ... D
# -
# `-` 결과를 관찰하고 해석해보자.
print(x)
# > Note: 전역변수 출력
f()
# > Note: $f$ 에서 설정된 지역변수 22가 출력됨
x
# > Note: $f$ 내의 지역변수를 사용하여도 전역변수는 변하지 않음. (함수내부에서 선언된 x=22는 함수외부에 영향을 주지못함)
g()
# > Note: g에서 설정된 지역변수가 따로 없으므로 전역변수 출력
x,Testclass2.x
# > Note: 전역변수 x와 클래스오브젝트에 설정된 변수 x
a=Testclass2()
(x,Testclass2.x,a.x),a.__dict__
# > Note: 전역변수, 클래스 오브젝트내의 변수, 인스턴스내의 변수(`a.__dict__`의 결과로 보아 인스턴스내의 변수는 클래스 오브젝트내의 변수를 빌려쓰고 있다. ).
Testclass2.x=200
(x,Testclass2.x,a.x),a.__dict__
# > Note: 클래스오브젝트에서 변수를 고치면 인스턴스에 영향을 미침, 아직 인스턴스가 독자성을 갖지 않음
a.m1()
(x,Testclass2.x,a.x),a.__dict__
# > Note: 메소드 `m1`내에서 선언된 x=44라는 선언은 아무것도 변화시킬수 없음.
a.m2()
(x,Testclass2.x,a.x),a.__dict__ # 독자성을 갖는 것
# > Note: 메소드 `m2`에 있는 self.x는 결국 a.x라는 의미이고, 이 선언은 클래스오브젝트 내의 변수와 독립적으로 인스턴스오브젝트 내에서 통용되는 변수를 선언하는 것임. 이 선언의 결과는 `a.__dict__`의 출력결과에서도 확인가능.
Testclass2.x=300
(x,Testclass2.x,a.x),a.__dict__
# > Note: 이제는 a.x와 Testclass2.x 는 분리된 상태이므로, Testclass2.x의 값을 바꾸어도 a.x에는 값의 변화가 없음.
# 전역변수(A), 클래스 변수(B), 메소드 변수(C), 인스턴스 변수(D)
#
# A>B>D>C
# > ### ***`연산자 오버로딩`***
# `-` 아래의 코드를 관찰하자.
1+1
# `-` 생각해보니까 `1`은 int class 에서 생성된 인스턴스이다.
#
# `-` 코드를 관찰하니 instance와 instance를 `+`라는 연산이 연결하는 형태임.
class Student:
def __init__(self,age=20.0,semester=1):
self.age=age
self.semester=semester
def __add__(self,val):
# val==0: 휴학
# val==1: 등록
if val==0:
self.age=self.age +0.5
elif val==1:
self.age=self.age+0.5
self.semester=self.semester+1
return self
def __repr__(self):
return '나이: %s \n학기: %s' % (self.age,self.semester)
guebin=Student()
guebin.age
guebin.semester
guebin
guebin+1
guebin+0
guebin+0+0+0+0+1+0+1
# `-` 연산자 오버로드 핵심아이디어
# - 클래스가 일반적인 파이썬 연산을 재정의하는 것, 즉 가로채는 것이라고 생각해도 됨
# - 여기에서 연산은 단순히 덧셈, 뺄셈을 의미하는게 아니라, `print()`, `+`, `[0]`(인덱싱) 와 같은 파이썬 내장문법을 모두 포괄하는 개념이라 이해하는 것이 옳다.
class Student2(Student):
def __getitem__(self,index):
return [self.age,self.semester][index]
hynn=Student2()
hynn+1+1+0+0
hynn[0]
hynn[1]
hynn[:]
# ## 도움말 작성방법
# `-` 넘파이의 경우 아래와 같이 도움말이 잘 작성되어 있다.
# +
import numpy as np
a=np.array([1,2,3])
# # a?
# -
# `-` 하지만 우리는?
# +
# hynn?
# -
# `-` 우리도 도움말을 작성하고 싶다.
class Student2(Student):
'''
Student2는 Student의 개선
# Student 클래스의 기능
1. 출력기능 (__repr__)
2. 연산기능 (__add__): 학기와 나이를 카운트
Examples
--------
>>> hynn=Student2()
>>> hynn+1
나이: 20.5
학기: 2
# Student2에서 추가된 기능
1. 인덱싱
'''
def __getitem__(self,index):
return [self.age,self.semester][index]
hynn=Student2()
# +
# hynn?
# -
hynn=Student2(21,1)
hynn
# +
# hynn?
# -
# ## self에 대한 진실
# `–` 사실 이름이 self가 아니어도 된다.
# `-`클래스의 첫번째 인자는 굳이 self가 아니라 임의로 a라고 명명해도 됨
class MooYaHo:
'''
201821994
'''
def __init__(a):
a.text='mooyaho'
moo1=MooYaHo()
# +
# moo1?
# -
moo1=MooYaHo()
moo1.text
# `–` 그런데 self를 많이 쓴다.
| _notebooks/2022-01-02-intro3.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Explore and create ML datasets
#
# In this notebook, we will explore data corresponding to taxi rides in New York City to build a Machine Learning model in support of a fare-estimation tool. The idea is to suggest a likely fare to taxi riders so that they are not surprised, and so that they can protest if the charge is much higher than expected.
#
# ## Learning Objectives
# 1. Access and explore a public BigQuery dataset on NYC Taxi Cab rides
# 2. Visualize your dataset using the Seaborn library
# 3. Inspect and clean-up the dataset for future ML model training
# 4. Create a benchmark to judge future ML model performance off of
#
# Each learning objective will correspond to a __#TODO__ in this student lab notebook -- try to complete this notebook first and then review the [solution notebook](../solution/explore_data.ipynb).
#
# Let's start off with the Python imports that we need.
# !pip install tensorflow==2.1 --user
# Please ignore any compatibility warnings and errors
# Make sure to <b>restart</b> your kernel to ensure this change has taken place.
from google.cloud import bigquery
import seaborn as sns
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
import shutil
# <h3> Extract sample data from BigQuery </h3>
#
# The dataset that we will use is <a href="https://console.cloud.google.com/bigquery?p=nyc-tlc&d=yellow&t=trips&page=table">a BigQuery public dataset</a>. Click on the link, and look at the column names. Switch to the Details tab to verify that the number of records is one billion, and then switch to the Preview tab to look at a few rows.
#
# Let's write a SQL query to pick up interesting fields from the dataset. It's a good idea to get the timestamp in a predictable format.
# %%bigquery
SELECT
FORMAT_TIMESTAMP("%Y-%m-%d %H:%M:%S %Z", pickup_datetime) AS pickup_datetime,
pickup_longitude, pickup_latitude, dropoff_longitude,
dropoff_latitude, passenger_count, trip_distance, tolls_amount,
fare_amount, total_amount
# TODO 1: Specify the correct BigQuery public dataset for nyc-tlc yellow taxi cab trips
# Tip: For projects with hyphens '-' be sure to escape with backticks ``
FROM
LIMIT 10
# Let's increase the number of records so that we can do some neat graphs. There is no guarantee about the order in which records are returned, and so no guarantee about which records get returned if we simply increase the LIMIT. To properly sample the dataset, let's use the HASH of the pickup time and return 1 in 100,000 records -- because there are 1 billion records in the data, we should get back approximately 10,000 records if we do this.
#
# We will also store the BigQuery result in a Pandas dataframe named "trips"
# %%bigquery trips
SELECT
FORMAT_TIMESTAMP("%Y-%m-%d %H:%M:%S %Z", pickup_datetime) AS pickup_datetime,
pickup_longitude, pickup_latitude,
dropoff_longitude, dropoff_latitude,
passenger_count,
trip_distance,
tolls_amount,
fare_amount,
total_amount
FROM
`nyc-tlc.yellow.trips`
WHERE
ABS(MOD(FARM_FINGERPRINT(CAST(pickup_datetime AS STRING)), 100000)) = 1
print(len(trips))
# We can slice Pandas dataframes as if they were arrays
trips[:10]
# <h3> Exploring data </h3>
#
# Let's explore this dataset and clean it up as necessary. We'll use the Python Seaborn package to visualize graphs and Pandas to do the slicing and filtering.
# TODO 2: Visualize your dataset using the Seaborn library. Plot the distance of the trip as X and the fare amount as Y
ax = sns.regplot(x="", y="", fit_reg=False, ci=None, truncate=True, data=trips)
ax.figure.set_size_inches(10, 8)
# Hmm ... do you see something wrong with the data that needs addressing?
#
# It appears that we have a lot of invalid data that is being coded as zero distance and some fare amounts that are definitely illegitimate. Let's remove them from our analysis. We can do this by modifying the BigQuery query to keep only trips longer than zero miles and fare amounts that are at least the minimum cab fare ($2.50).
#
# Note the extra WHERE clauses.
# %%bigquery trips
SELECT
FORMAT_TIMESTAMP("%Y-%m-%d %H:%M:%S %Z", pickup_datetime) AS pickup_datetime,
pickup_longitude, pickup_latitude,
dropoff_longitude, dropoff_latitude,
passenger_count,
trip_distance,
tolls_amount,
fare_amount,
total_amount
FROM
`nyc-tlc.yellow.trips`
WHERE
ABS(MOD(FARM_FINGERPRINT(CAST(pickup_datetime AS STRING)), 100000)) = 1
# TODO 3: Filter the data to only include non-zero distance trips and fares above $2.50
AND
print(len(trips))
ax = sns.regplot(x="trip_distance", y="fare_amount", fit_reg=False, ci=None, truncate=True, data=trips)
ax.figure.set_size_inches(10, 8)
# What's up with the streaks around 45 dollars and 50 dollars? Those are fixed-amount rides from JFK and La Guardia airports into anywhere in Manhattan, i.e. to be expected. Let's list the data to make sure the values look reasonable.
#
# Let's also examine whether the toll amount is captured in the total amount.
tollrides = trips[trips['tolls_amount'] > 0]
tollrides[tollrides['pickup_datetime'] == '2012-02-27 09:19:10 UTC']
# Looking a few samples above, it should be clear that the total amount reflects fare amount, toll and tip somewhat arbitrarily -- this is because when customers pay cash, the tip is not known. So, we'll use the sum of fare_amount + tolls_amount as what needs to be predicted. Tips are discretionary and do not have to be included in our fare estimation tool.
#
# Let's also look at the distribution of values within the columns.
trips.describe()
# Hmm ... The min, max of longitude look strange.
#
# Finally, let's actually look at the start and end of a few of the trips.
# +
def showrides(df, numlines):
lats = []
lons = []
for iter, row in df[:numlines].iterrows():
lons.append(row['pickup_longitude'])
lons.append(row['dropoff_longitude'])
lons.append(None)
lats.append(row['pickup_latitude'])
lats.append(row['dropoff_latitude'])
lats.append(None)
sns.set_style("darkgrid")
plt.figure(figsize=(10,8))
plt.plot(lons, lats)
showrides(trips, 10)
# -
showrides(tollrides, 10)
# As you'd expect, rides that involve a toll are longer than the typical ride.
# <h3> Quality control and other preprocessing </h3>
#
# We need to do some clean-up of the data:
# <ol>
# <li>New York city longitudes are around -74 and latitudes are around 41.</li>
# <li>We shouldn't have zero passengers.</li>
# <li>Clean up the total_amount column to reflect only fare_amount and tolls_amount, and then remove those two columns.</li>
# <li>Before the ride starts, we'll know the pickup and dropoff locations, but not the trip distance (that depends on the route taken), so remove it from the ML dataset</li>
# <li>Discard the timestamp</li>
# </ol>
#
# We could do preprocessing in BigQuery, similar to how we removed the zero-distance rides, but just to show you another option, let's do this in Python. In production, we'll have to carry out the same preprocessing on the real-time input data.
#
# This sort of preprocessing of input data is quite common in ML, especially if the quality-control is dynamic.
# +
def preprocess(trips_in):
trips = trips_in.copy(deep=True)
trips.fare_amount = trips.fare_amount + trips.tolls_amount
del trips['tolls_amount']
del trips['total_amount']
del trips['trip_distance'] # we won't know this in advance!
qc = np.all([\
trips['pickup_longitude'] > -78, \
trips['pickup_longitude'] < -70, \
trips['dropoff_longitude'] > -78, \
trips['dropoff_longitude'] < -70, \
trips['pickup_latitude'] > 37, \
trips['pickup_latitude'] < 45, \
trips['dropoff_latitude'] > 37, \
trips['dropoff_latitude'] < 45, \
trips['passenger_count'] > 0,
], axis=0)
return trips[qc]
tripsqc = preprocess(trips)
tripsqc.describe()
# -
# The quality control has removed about 300 rows (11400 - 11101) or about 3% of the data. This seems reasonable.
#
# Let's move on to creating the ML datasets.
#
# <h3> Create ML datasets </h3>
#
# Let's split the QCed data randomly into training, validation and test sets.
# Note that this is not the entire data. We have 1 billion taxicab rides. This is just splitting the 10,000 rides to show you how it's done on smaller datasets. In reality, we'll have to do it on all 1 billion rides and this won't scale.
# +
shuffled = tripsqc.sample(frac=1)
trainsize = int(len(shuffled['fare_amount']) * 0.70)
validsize = int(len(shuffled['fare_amount']) * 0.15)
df_train = shuffled.iloc[:trainsize, :]
df_valid = shuffled.iloc[trainsize:(trainsize+validsize), :]
df_test = shuffled.iloc[(trainsize+validsize):, :]
# -
df_train.head(n=1)
df_train.describe()
df_valid.describe()
df_test.describe()
# Let's write out the three dataframes to appropriately named csv files. We can use these csv files for local training (recall that these files represent only 1/100,000 of the full dataset) just to verify our code works, before we run it on all the data.
# +
def to_csv(df, filename):
outdf = df.copy(deep=False)
outdf.loc[:, 'key'] = np.arange(0, len(outdf)) # rownumber as key
# reorder columns so that target is first column
cols = outdf.columns.tolist()
cols.remove('fare_amount')
cols.insert(0, 'fare_amount')
print (cols) # new order of columns
outdf = outdf[cols]
outdf.to_csv(filename, header=False, index_label=False, index=False)
to_csv(df_train, 'taxi-train.csv')
to_csv(df_valid, 'taxi-valid.csv')
to_csv(df_test, 'taxi-test.csv')
# -
# !head -10 taxi-valid.csv
# <h3> Verify that datasets exist </h3>
# !ls -l *.csv
# We have 3 .csv files corresponding to train, valid, test. The ratio of file-sizes correspond to our split of the data.
# + language="bash"
# head taxi-train.csv
# -
# Looks good! We now have our ML datasets and are ready to train ML models, validate them and evaluate them.
# <h3> Benchmark </h3>
#
# Before we start building complex ML models, it is a good idea to come up with a very simple model and use that as a benchmark.
#
# My model is going to be to simply divide the mean fare_amount by the mean trip_distance to come up with a rate and use that to predict. Let's compute the RMSE of such a model.
# +
def distance_between(lat1, lon1, lat2, lon2):
# haversine formula to compute distance "as the crow flies". Taxis can't fly of course.
dist = np.degrees(np.arccos(np.minimum(1,np.sin(np.radians(lat1)) * np.sin(np.radians(lat2)) + np.cos(np.radians(lat1)) * np.cos(np.radians(lat2)) * np.cos(np.radians(lon2 - lon1))))) * 60 * 1.515 * 1.609344
return dist
def estimate_distance(df):
return distance_between(df['pickuplat'], df['pickuplon'], df['dropofflat'], df['dropofflon'])
def compute_rmse(actual, predicted):
return np.sqrt(np.mean((actual-predicted)**2))
def print_rmse(df, rate, name):
print ("{1} RMSE = {0}".format(compute_rmse(df['fare_amount'], rate*estimate_distance(df)), name))
# TODO 4: Create a benchmark to judge future ML model performance off of
# Specify the five feature columns
FEATURES = ['','','','','']
# Specify the one target column for prediction
TARGET = ''
columns = list([TARGET])
columns.append('pickup_datetime')
columns.extend(FEATURES) # in CSV, target is the first column, after the features
columns.append('key')
df_train = pd.read_csv('taxi-train.csv', header=None, names=columns)
df_valid = pd.read_csv('taxi-valid.csv', header=None, names=columns)
df_test = pd.read_csv('taxi-test.csv', header=None, names=columns)
rate = df_train['fare_amount'].mean() / estimate_distance(df_train).mean()
print ("Rate = ${0}/km".format(rate))
print_rmse(df_train, rate, 'Train')
print_rmse(df_valid, rate, 'Valid')
print_rmse(df_test, rate, 'Test')
# -
# <h2>Benchmark on same dataset</h2>
#
# The RMSE depends on the dataset, and for comparison, we have to evaluate on the same dataset each time. We'll use this query in later labs:
# +
validation_query = """
SELECT
(tolls_amount + fare_amount) AS fare_amount,
pickup_datetime,
pickup_longitude AS pickuplon,
pickup_latitude AS pickuplat,
dropoff_longitude AS dropofflon,
dropoff_latitude AS dropofflat,
passenger_count*1.0 AS passengers,
'unused' AS key
FROM `nyc-tlc.yellow.trips`
WHERE ABS(MOD(FARM_FINGERPRINT(CAST(pickup_datetime AS STRING)), 10000)) = 2
AND
trip_distance > 0
AND fare_amount >= 2.5
AND pickup_longitude > -78
AND pickup_longitude < -70
AND dropoff_longitude > -78
AND dropoff_longitude < -70
AND pickup_latitude > 37
AND pickup_latitude < 45
AND dropoff_latitude > 37
AND dropoff_latitude < 45
AND passenger_count > 0
"""
client = bigquery.Client()
df_valid = client.query(validation_query).to_dataframe()
print_rmse(df_valid, 2.59988, 'Final Validation Set')
# -
# The simple distance-based rule gives us a RMSE of <b>$8.14</b>. We have to beat this, of course, but you will find that simple rules of thumb like this can be surprisingly difficult to beat.
#
# Let's be ambitious, though, and make our goal to build ML models that have a RMSE of less than $6 on the test set.
# Copyright 2020 Google Inc.
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
| quests/serverlessml/01_explore/labs/explore_data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# #%pylab inline
import tellurium as te
import SloppyCell.ReactionNetworks as sc
# %pylab inline
import pandas as pd
# +
Model_params = sc.Utility.load('params.bp')
figure(dpi=600)
model = te.loadAntimonyModel('SloppyCell_model/phyAB_interaction_photoperiod')
for p in Model_params.keys():
model.setValue(p,Model_params.getByKey(p))
model.setValue('R_levels',1)
model.setValue('FR_levels',0)
model.setValue('muta',0)
model.setValue('mutb',0)
model.setValue('PF',16)
res = model.simulate(0,24*10,1000)
plot(res['time']-24*2,res['[Ar]']*10+res['[Afr]']*10,'k' ,label='PhyA longday')
model = te.loadAntimonyModel('SloppyCell_model/phyAB_interaction_photoperiod')
for p in Model_params.keys():
model.setValue(p,Model_params.getByKey(p))
model.setValue('R_levels',1)
model.setValue('FR_levels',0)
model.setValue('muta',0)
model.setValue('mutb',0)
model.setValue('PF',8)
res = model.simulate(0,24*10,1000)
plot(res['time']-24*2,res['[Ar]']*10+res['[Afr]']*10,'b' ,label='PhyA shortday')
#plot(res['time']-24*2,res['[Am]'],'b' ,label='PhyA shortday')
#axvspan(0,40, color='gray', alpha=0.5)
xticks(range(0,24*10,8), fontsize=20)
yticks(fontsize=20)
ylim(0,3)
xlim(150,200)
for i in range(0,10):
axvspan(16+(24*i),24+24*i, color='gray', alpha=0.5)
for i in range(0,10):
axvspan(8+(24*i),24+24*i, color='gray', alpha=0.5)
legend(loc='upper right', fontsize=18)
xlim(0,48)
#savefig('PhyA_protein_SD_vs_LD.png', format='png', dpi=600, transparent=True)
| scripts/PhyABmodel/.ipynb_checkpoints/model_vs_time_series-checkpoint.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Lecture-02 Snell's Law
# * author : <NAME>
# * Data : 2022/02/10
# %matplotlib inline
import matplotlib.pyplot as plt
import numpy as np
# ## Maxwell's Equations and Wave Equations
#
# Time-dependent Maxwell's equatios:
# ### $$\nabla\cdot\textbf{D} = \rho_f - (1)$$
# ### $$\nabla\cdot\textbf{B} = 0 - (2)$$
# ### $$\nabla\times\textbf{E} = -\frac{\partial}{\partial t}\textbf{B} - (3)$$
# ### $$\nabla\times\textbf{H} = \textbf{J}_f+\frac{\partial}{\partial t}\textbf{D} - (4)$$
# $\textbf{D}$ is the displacement field and $\textbf{D}=\epsilon_0\epsilon_r\textbf{E}$. $\textbf{E}$ is the electric field. $\epsilon_0$ is the permittivity in vacuum and $\epsilon_r$ is the relative permittivity. $\rho_f$ is the free charge density. $\textbf{B}$ is the magnetic field. $\textbf{B} = \mu_0\mu_r\textbf{H}$. $\mu_0$ is the permeability in vacuum and $\mu_r$ is the relative permeability. $\textbf{J}_f$ is the free current density.
#
# Interestingly, **Eq. (1-4)** are not totally independent to each other.
#
# Based on definition of the charge density ($\rho_f$) and the current density ($\textbf{J}_f$), the [contiunity equation](https://en.wikipedia.org/wiki/Continuity_equation) is
# ### $$\nabla\cdot\textbf{J}_f = -\frac{\partial}{\partial t}\rho_f - (5)$$
# With continuity equation, **Eq. (1)** can be derived from **Eq. (4)** and so as **Eq.(2)** and **Eq. (3)**.
# $$0 = \nabla\cdot\nabla\times\textbf{H} = \nabla\cdot\textbf{J}_f+\nabla\cdot\frac{\partial}{\partial t}\textbf{D} = -\frac{\partial}{\partial t}\rho_f+\frac{\partial}{\partial t}\nabla\cdot\textbf{D} = \frac{\partial}{\partial t} (-\rho_f + \nabla\cdot\textbf{D} ))$$
# As a result, only two curl equations (**Eq. (3-4)**) are considered in the following section.
#
# From **Eq. (3)** and **Eq. (4)**, we can take curl on the both side. Then,
#
# <font size="4">$$\nabla\times\nabla\times\textbf{E} = -\frac{\partial}{\partial t}\nabla\times\textbf{B} - (6)$$</font>
# <font size="4">$$\nabla\times\nabla\times\textbf{H} = \nabla\times(\textbf{J}_f+\frac{\partial}{\partial t}\textbf{D}) - (7)$$</font>
# With [vector calculus identity](https://en.wikipedia.org/wiki/Vector_calculus_identities) ($ \nabla\times\nabla\times\textbf{A} = \nabla(\nabla\cdot\textbf{A})-\nabla^2\textbf{A}$)
#
# ### $$\nabla(\nabla\cdot\textbf{E})-\nabla^2\textbf{E} = -\frac{\partial}{\partial t}\nabla\times(\mu_0\mu_r\textbf{H}) - (8)$$
# ### $$\nabla(\nabla\cdot\textbf{H})-\nabla^2\textbf{H} = \nabla\times(\textbf{J}_f+\frac{\partial}{\partial t}\epsilon_0\epsilon_r\textbf{E}) - (9)$$
# With **Eq.(1)** and **Eq.(4)**
#
# ### $$\nabla(\frac{\rho_f}{\epsilon_0\epsilon_r})-\nabla^2\textbf{E} = -\mu_0\mu_r\frac{\partial}{\partial t}(\textbf{J}_f+\epsilon_0\epsilon_r\frac{\partial}{\partial t}\textbf{E}) - (10)$$
# ### $$-\nabla^2\textbf{H} = \nabla\times\textbf{J}_f-\epsilon_0\mu_0\epsilon_r\mu_r\frac{\partial^2}{\partial t^2}\textbf{H} - (11)$$
# While calculating **Eq. (10)** and **(11)**, we suppose the $\mu_r$ and $\epsilon_r$ are constants. <br/>
# However, it is an interesting problem, what is constant here? Because the Maxwell's equation we discussed here is in differtial form and hence, the constant here means that this is constant in local region but not global. Then, the gradient and time derivative would not work on these relative material constants. As a result, **Eq. (10)** and **Eq. (11)** is valid in a single material, and between different materials, it can be dealt by the boundary conditions (as in **Lecture 03**.)
#
# Then,
#
# ### $$\nabla^2\textbf{E} - \epsilon_0\mu_0\epsilon_r\mu_r\frac{\partial^2}{\partial t^2}\textbf{E} = \nabla(\frac{\rho_f}{\epsilon_0\epsilon_r}) + \mu_0\mu_r\frac{\partial}{\partial t}\textbf{J}_f - (12)$$
# ### $$\nabla^2\textbf{H} - \epsilon_0\mu_0\epsilon_r\mu_r\frac{\partial^2}{\partial t^2}\textbf{H} = -\nabla\times\textbf{J}_f - (13)$$
#
# **Eq. (12)** and **Eq. (13)** are the general form of EM wave equations. LHS (ligh-hand side) is the wave propagation of the electric and magnetic field and the RHS (right-hand side) is the source terms.
# In the source free region, (i.e. $\rho_f = 0$ and $\textbf{J}_f = 0$)
#
# ### $$\nabla^2\textbf{E} - \frac{1}{v^2}\frac{\partial^2}{\partial t^2}\textbf{E} = 0 - (14)$$
# ### $$\nabla^2\textbf{H} - \frac{1}{v^2}\frac{\partial^2}{\partial t^2}\textbf{H} = 0 - (15)$$
#
# and $v^2 = \frac{1}{\epsilon_0\mu_0\epsilon_r\mu_r} = c^2 \frac{1}{\epsilon_r\mu_r} = \frac{c^2}{n^2}$. $c$ is the speed of light in vacuum and n is the refractive index. **Eq. (14-15)** are the conventional wave equations.
# ## Plane Wave
# A plane wave is one possible solution for the wave equation and is defined as:
# ### $$A(\textbf{r},t) = A_0exp(i (\textbf{k}\cdot\textbf{r}-\omega t)) - (16)$$
#
# And
#
# ### $$A_0 = |A_0|exp(\phi_0) - (17)$$
# ### $$\textbf{k} = (k_x, k_y, k_z) - (18)$$
# ### $$\textbf{r} = (x, y, z) - (19)$$
#
# $A_0$ is a complex constant with length $|A_0|$ and phase $\phi_0$, $\textbf{k}$ is a wave vector, $\textbf{r}$ is the position, $\omega$ is the angular frequancy, $t$ is the time.
#
# Hence, we can defined a phase function ($\phi(\textbf{r},t)$) as:
# ### $$\phi(\textbf{r},t) = \textbf{k}\cdot\textbf{r}-\omega t+\phi_0 - (20)$$
# then
#
# ### $$A(\textbf{r},t) = |A_0|exp(i\phi(\textbf{r},t)) - (21)$$
# Furthermore, insert **Eq. (21)** into wave equation, we can find that
#
# ### $$k = \sqrt{k_x^2 + k_y^2 + k_z^2} = \omega /v = k_o\sqrt{\epsilon_r\mu_r} = nk_o - (22)$$
#
# where $k_0 = \omega /c$ is the wave number in vacuum and n is the refractive index.
def wave_equation(A0, x, y, z, t=0.0, w=1.0, kx=1.0, ky=1.0, kz=1.0):
# A0 : the complex amplitude of the wave
# x, y, z : positions
# t : time
# w : angular frequency
# kx, ky, kz : wave number components
return A0 * np.exp( 1j * ( kx*x + ky*y + kz*z - w*t) )
def cal_k(kx, ky, kz):
# kx, ky, kz : wave number components
return np.sqrt(kx**2 + ky**2 + kz**2)
def cal_normalize_direction(kx, ky, kz):
# kx, ky, kz : wave number components
k = cal_k(kx, ky, kz)
return kx/k, ky/k, kz/k
# =================================================================================================================
# #### Supporting Information
# The maximum real part value of a plane wave would happened when
#
# ### $$\phi(\textbf{r},t) = \textbf{k}\cdot\textbf{r}_{AN}-\omega t+\phi_0 = 2m\pi- (S1)$$
#
# where m is an integer. We call these phases as antinodes.
#
# The position of the antinode would become
#
# ### $$\textbf{k}\cdot\textbf{r}_{AN} = \omega t-\phi_0 + 2m\pi- (S2)$$
#
# =================================================================================================================
def find_nth_antinode(m, A0, kx, ky, kz, wt):
# m : mth order
# A0 : the complex amplitude of the wave
# kx, ky, kz : wave number components
# wt : angular frequency * time
# suppose (x,y,z) is along (kx,ky,kz), i,e, (x,y,z)=a(kx,ky,kz)
# hence, phase = a(kx**2 + ky**2 + kz**2) = 2n*pi + w*t = ak
phi0 = np.angle(A0)
a = (2*m*np.pi+wt-phi0)/cal_k(kx, ky, kz)
norvec_x, norvec_y, norvec_z = cal_normalize_direction(kx, ky, kz)
return a*norvec_x, a*norvec_y, a*norvec_z
# # Phase velocity and Wave vector
#
# **Eq. (20)** shows that when t is from $t_a$ to $t_a+\Delta t$, a given phase, $\phi_a = \textbf{k}\cdot\textbf{r}(t_a)-\omega t_a+\phi_0 = \textbf{k}\cdot\textbf{r}(t_a+\Delta t)-\omega (t_a+\Delta t)+\phi_0$, indicating that the position of $\phi_a$ would shift by $\phi_a(t+\Delta t_a) - \phi_a(t) = 0$. Hence, we can calculate how fast $\phi_a$ shifts. Then
# ### $$\textbf{k}\cdot(\textbf{r}(t_a+\Delta t)-\textbf{r}(t_a)) = \omega \Delta t - (23)$$
#
# and
#
# ### $$\textbf{k}\cdot(\frac{\textbf{r}(t_a+\Delta t)-\textbf{r}(t_a)}{\Delta t}) = \omega - (24)$$
# As a result, we can define a phase velocity, $\textbf{v}_p$, as
#
# ### $$\textbf{k}\cdot\textbf{v}_p= \omega - (25)$$
# From **Eq. (25)**, it is obvious that the minimum of $\textbf{v}_p$ is happened when it is parallel to $\textbf{k}$ and it equals to
#
# ### $$v_p= \frac{\omega}{k} = v - (26)$$
# The general equation of $v_p$ would be
#
# ### $$v_p= \frac{\omega}{k}\frac{1}{cos\theta} = \frac{v}{cos\theta}- (27)$$
#
# , where $\theta$ is the angle between $\textbf{k}$ and $\textbf{v}_p$
# #### Someone would think strange that why the phase velocity is not a constant value, $v$, and it would depend on the angle between the wave vector, $\textbf{k}$.
#
# In order to solve this question, we need to understand the physical meaning (not just in math) or what we have caluclated about $\textbf{v}_p$
#
# In the following section, without loss of generality, we consider a plane wave only propagate on x-z plane (i.e. $k_y = 0$).
# ## Example
A0 = 1.0 # plane wave amplitude (phasor)
w = 1.0 # angular frequency
kx = 1.0 # x component of wave vector
ky = 0.0 # y component of wave vector
kz = 1.0 # z component of wave vector
# +
# construct x, z, and t grids
x_list = np.linspace( 0, 10, 100)
z_list = np.linspace( 0, 10, 100)
t_list = np.linspace( 0, 1, 2)
[x,z] = np.meshgrid( x_list, z_list)
y = 0
# calculate parameters
k = cal_k(kx, ky, kz)
wavelength = 2*np.pi/k
(xlen, zlen) = x.shape
tlen = t_list.size
# calculate wave equation
F = np.zeros( (xlen, zlen, tlen), dtype=np.complex_ )
for ii, t in enumerate(t_list):
F[:,:,ii] = wave_equation(A0, x, y, z, t, w=w, kx=kx, ky=ky, kz=kz)
# -
# Now we have calculated the value of wave equation.
# Next, we are going to plot the wave and we also plot a arrow ($\textbf{k}$), which is normal to the wave front.
# +
ii = 0 # time index
# set the start of the arrow is at the peak value
x0, y0, z0 = find_nth_antinode(1, A0, kx, ky, kz, w*t_list[ii])
# set the arrow is along k direction and the length equals to wavelength
norvec_x, norvec_y, norvec_z = cal_normalize_direction(kx, ky, kz)
arrow_x, arrow_z = norvec_x * wavelength, norvec_z * wavelength
## plot figure
fig, ax = plt.subplots(1, 1)
cs = ax.contourf(x, z, np.real(F[:,:,ii]))
ax.annotate("", xy=(x0+arrow_x,z0+arrow_z), xytext=(x0, z0),arrowprops=dict(facecolor='red', shrink=0.01))
ax.plot([0,10], [x0, z0], 'r--', linewidth=2.0)
ax.plot([x0, z0], [0,10], 'r--', linewidth=2.0)
ax.plot([0,10], [x0+arrow_x, z0+arrow_z], 'r--', linewidth=2.0)
ax.plot([x0+arrow_x, z0+arrow_z], [0,10], 'r--', linewidth=2.0)
ax.set_title( "t = {0:>.3f} s".format(t_list[ii]) )
ax.set_aspect('equal', 'box')
ax.set_xlim(0,10)
ax.set_ylim(0,10)
fig.colorbar(cs, ax=ax, shrink=0.9)
# -
# Now we are going to plot the wave front of the antinode on the figure.
def find_antinode_line(m, A0, kx, kz, k, wt, tstart=0.0, tend=1.0, step=100):
# m : mth order
# A0 : the complex amplitude of the wave
# kx, ky, kz : wave number components
# k : wave number
# wt : angular frequency * time
# tstart, tend, tstep: the parameter of the parametric equations
# tstart and tend is the parameters in parametric equation
x0, y0, z0 = find_nth_antinode(m, A0, kx, ky, kz, wt)
norvec_x, norvec_y, norvec_z = cal_normalize_direction(kx, ky, kz)
t = np.linspace( tstart, tend, step)
x = x0 - norvec_z * t
z = z0 + norvec_x * t
return x, z
# +
# set the start of the arrow is at the peak value
x0, y0, z0 = find_nth_antinode(1, A0, kx, ky, kz, w*t_list[ii])
# set the arrow is along k direction and the length equals to wavelength
norvec_x, norvec_y, norvex_z = cal_normalize_direction(kx, ky, kz)
arrow_x, arrow_z = norvec_x * wavelength, norvex_z * wavelength
xAN1, zAN1 = find_antinode_line(1, A0, kx, kz, k, w*t_list[ii], tstart=-10.0, tend=10.0, step=100)
xAN2, zAN2 = find_antinode_line(2, A0, kx, kz, k, w*t_list[ii], tstart=-10.0, tend=10.0, step=100)
## plot figure
ii = 0 # time index
fig, ax = plt.subplots(1, 1)
cs = ax.contourf(x, z, np.real(F[:,:,ii]))
ax.annotate("", xy=(x0+arrow_x,z0+arrow_z), xytext=(x0, z0),arrowprops=dict(facecolor='red', shrink=0.01))
ax.plot([0,10], [x0, z0], 'r--', linewidth=2.0)
ax.plot([x0, z0], [0,10], 'r--', linewidth=2.0)
ax.plot([0,10], [x0+arrow_x, z0+arrow_z], 'r--', linewidth=2.0)
ax.plot([x0+arrow_x, z0+arrow_z], [0,10], 'r--', linewidth=2.0)
ax.plot(xAN1, zAN1, 'b--', linewidth=1.0)
ax.plot(xAN2, zAN2, 'k--', linewidth=2.0)
ax.scatter([x0], [z0], s=500, edgecolors='k', facecolor='r', linewidth=2)
ax.set_title( "t = {0:>.3f} s".format(t_list[ii]) )
ax.set_aspect('equal', 'box')
ax.set_xlim(0,10)
ax.set_ylim(0,10)
fig.colorbar(cs, ax=ax, shrink=0.9)
# -
# From above figure, it is obvious that for a given $\phi_a$, the position of $\phi_a$ would be a line in 2D or would be a plane in 3D. Then, for example, if when $t=t_a$, the starting point is the red point in the above figure. After a period, T, the phase plane would become the black dash line as in the figure. From our calculation in $\textbf{v}_p$, all the points on the black dash line are all possibe points for $\phi(t_a+T)$ because in the calculation, the position of the phase is not specified and so as the velocity direction of $\phi_a$. In the figure, it is obvious that the minimum displacement of $\phi_a$ is along the direction of $\textbf{k}$ (equal to $\lambda$) and all the displacements in other directions would always larger than $\lambda$. As a result, the calculated $\textbf{v}_p$ would be equal or larger than $\omega/k$ as we dicussed in **Eq. (27)**.
#
# ### However, it does not means that the wave could propagate faster than $v$ because there is an assuption in our derivation.
# From **Eq. (16)** to **Eq. (27)**, we have supposed that the wave is in steady state. However, from the [transient situation video](https://www.youtube.com/watch?time_continue=32&v=tZ1rlO48xMM&feature=emb_logo), in the beginning of the animation, the wave would propagate along the normal direction of the wave front, indicating that real propagation velocity of the wave is the minimum value of $\textbf{v}_p$.
#
# ### Then what is the meaning of $\textbf{v}_p$ ?
# Now if we consider the wave along two different direction, x-axis and on $\textbf{k}$ direction.
#
# Imagine that we do not know the full picture of the wave. We walk along these two directions and hold a detector to detect the amplitude of the wave.
# +
# along x axis
mlen = 10
x_xaxis = np.linspace( 0, mlen, 1000)
y_xaxis = np.zeros( x_xaxis.shape, dtype=x_xaxis.dtype )
z_xaxis = np.zeros( x_xaxis.shape, dtype=x_xaxis.dtype )
F_xaxis = wave_equation(A0, x_xaxis, y_xaxis, z_xaxis, t=0, w=w, kx=kx, ky=ky, kz=kz)
# along k direction
a_kaxis = np.linspace( 0, mlen, 1000)
norvec_x, norvec_y, norvec_z = cal_normalize_direction(kx, ky, kz)
x_kaxis = norvec_x * a_kaxis
y_kaxis = norvec_y * a_kaxis
z_kaxis = norvec_z * a_kaxis
F_kaxis = wave_equation(A0, x_kaxis, y_kaxis, z_kaxis, t=0, w=w, kx=kx, ky=ky, kz=kz)
# +
## plot data
fig, axes = plt.subplots(1,1)
axes.plot( x_xaxis, np.real(F_xaxis), 'r-', linewidth=2.0)
axes.plot( a_kaxis, np.real(F_kaxis), 'b-', linewidth=2.0)
axes.plot( x_xaxis, np.zeros(x_xaxis.size), 'k--', linewidth=0.5)
axes.set_xlabel('(m)', fontsize=20)
axes.set_ylabel('field', fontsize=20)
axes.legend(['along x axis', r'along k'])
plt.xlim( 0, mlen)
plt.ylim( -np.absolute(A0), np.absolute(A0) );
# -
# The abouve figure shows what we could measure along these two directions.
#
# From the simulation, we could find that the wavelength along the x axis is longer than along the $\textbf{k}$ direction( i.e. $\lambda$).
#
#
#
#
#
# And $\lambda_x$ would be larger than $\lambda$ because $k_x$ is smaller than $k = 2\pi/\lambda$.
#
# Similarly, we can define the wavelength along x-axis, $\lambda_y$, and the wavelength along z axis $\lambda_z$ as
#
# ### $$k_y = \frac{2\pi}{\lambda_y} - (31)$$
# ### $$k_z = \frac{2\pi}{\lambda_z} - (32)$$
#
# As a result, we can find that the component of the wave vector is related to the wavlength of the corresponding axis.
# Then for in the following, we are going to discuss an arbitrary line/surface on xz plane (red line),
# +
# set the start of the arrow is at the peak value
x0, y0, z0 = find_nth_antinode(1, A0, kx, ky, kz, w*t_list[ii])
# arbitrary line
vx, vy, vz = 2.0, 0.0, 0.5
norv_x, norv_y, norv_z = cal_normalize_direction(vx, vy, vz)
a_arb = np.linspace(-10, 10, 100)
xL = x0 + a_arb*norv_x
yL = y0 + a_arb*norv_y
zL = z0 + a_arb*norv_z
F_arb = wave_equation(A0, xL, yL, zL, t=0, w=w, kx=kx, ky=ky, kz=kz)
# set the arrow is along k direction and the length equals to wavelength
norvec_x, norvec_y, norvex_z = cal_normalize_direction(kx, ky, kz)
arrow_x, arrow_z = norvec_x * wavelength, norvex_z * wavelength
xAN1, zAN1 = find_antinode_line(1, A0, kx, kz, k, w*t_list[ii], tstart=-10.0, tend=10.0, step=100)
xAN2, zAN2 = find_antinode_line(2, A0, kx, kz, k, w*t_list[ii], tstart=-10.0, tend=10.0, step=100)
## plot figure
ii = 0 # time index
fig, ax = plt.subplots(1, 1)
cs = ax.contourf(x, z, np.real(F[:,:,ii]))
ax.annotate("", xy=(x0+arrow_x,z0+arrow_z), xytext=(x0, z0),arrowprops=dict(facecolor='red', shrink=0.01))
ax.plot(xAN1, zAN1, 'b--', linewidth=1.0)
ax.plot(xAN2, zAN2, 'k--', linewidth=2.0)
ax.plot(xL, zL, 'r--', linewidth=2.0)
ax.scatter([x0], [z0], s=500, edgecolors='k', facecolor='r', linewidth=2)
ax.set_title( "t = {0:>.3f} s".format(t_list[ii]) )
ax.set_aspect('equal', 'box')
ax.set_xlim(0,10)
ax.set_ylim(0,10)
fig.colorbar(cs, ax=ax, shrink=0.9)
# +
## plot data
fig, axes = plt.subplots(1,1)
axes.plot( x_xaxis, np.real(F_xaxis), 'r--', linewidth=1.0)
axes.plot( a_kaxis, np.real(F_kaxis), 'b--', linewidth=1.0)
axes.plot( a_arb, np.real(F_arb), 'k-', linewidth=2.0)
axes.plot( x_xaxis, np.zeros(x_xaxis.size), 'k--', linewidth=0.5)
axes.set_xlabel('(m)', fontsize=20)
axes.set_ylabel('field', fontsize=20)
axes.legend(['along x axis', r'along k', r'along arbitrary'])
plt.xlim( 0, mlen)
plt.ylim( -np.absolute(A0), np.absolute(A0) )
# -
# Hence, we can calculate the wavelength along this arbitrary line/surface (red line) as $\lambda_{arb}=\lambda/cos\theta$, where $\theta$ is the angle between $\textbf{k}$ and the red line. We can calculate the wave vector component along this direction. (i.e. $k_{arb} = 2\pi/\lambda_a = (2\pi/\lambda) cos\theta = k cos\theta$).
#
# Hence, the correspoding phase velocity along the red line is
#
# ### $$v_p = \frac{\lambda_{arb}}{T} = \frac{2\pi\lambda_{arb}}{2\pi T} = \frac{\omega}{k_{arb}} = \frac{\omega}{kcos\theta} - (33)$$
#
# , which is the same as **Fig. (27)**. That's why the phase velocity, $v_p$, would be larger than $v$ because it is not a real wave propagating direction. It is a fake velocity along an arbitrary direction.
# On the other hand, along this red line, the phase difference of a length $l$ would correspond to
#
# ### $$\Delta\phi_{l} = 2\pi\frac{l}{\lambda_{arb}} = k_{arb}l- (34)$$
#
# Hence, the wave number is a quantity to calculate the corresponding phase differece of a length, $l$. And that's why we call $k$ as wave number.
#
# On the contrary, if we know the wavelength of some arbitrary direction, we can directly write down the wave number along this direction by **Eq. (34)**.
# +
# set the start of the arrow is at the peak value
x0, y0, z0 = find_nth_antinode(1, A0, kx, ky, kz, w*t_list[ii])
# set the arrow is along k direction and the length equals to wavelength
norvec_x, norvec_y, norvex_z = cal_normalize_direction(kx, ky, kz)
arrow_x, arrow_z = norvec_x * wavelength, norvex_z * wavelength
xAN1, zAN1 = find_antinode_line(1, A0, kx, kz, k, w*t_list[ii], tstart=-10.0, tend=10.0, step=100)
xAN2, zAN2 = find_antinode_line(2, A0, kx, kz, k, w*t_list[ii], tstart=-10.0, tend=10.0, step=100)
## plot figure
ii = 0 # time index
fig, ax = plt.subplots(1, 1)
cs = ax.contourf(x, z, np.real(F[:,:,ii]))
ax.annotate("", xy=(x0+arrow_x,z0+arrow_z), xytext=(x0, z0),arrowprops=dict(facecolor='red', shrink=0.01))
ax.plot([x0,x0+arrow_x], [x0, z0], 'r--', linewidth=2.0)
ax.plot([x0+arrow_x, z0+arrow_z], [z0,z0+arrow_z], 'r--', linewidth=2.0)
ax.plot(xAN1, zAN1, 'b--', linewidth=1.0)
ax.plot(xAN2, zAN2, 'k--', linewidth=2.0)
ax.scatter( [x0], [z0], s=500, edgecolors='k', facecolor='r', linewidth=2)
ax.scatter([x0+arrow_x], [z0], s=500, edgecolors='k', facecolor='y', linewidth=2)
ax.scatter([x0+arrow_x], [z0+arrow_z], s=500, edgecolors='k', facecolor='w', linewidth=2)
ax.set_title( "t = {0:>.3f} s".format(t_list[ii]) )
ax.set_aspect('equal', 'box')
ax.set_xlim(0,10)
ax.set_ylim(0,10)
fig.colorbar(cs, ax=ax, shrink=0.9)
# -
# Along the red dash line, the phase diffence between red point and the white point can be calculate as the sum of phase difference of the red and yellow point and the phase difference between the yellow and the white point.
#
# ### $$\Delta\phi_{red\rightarrow white} = \Delta\phi_{red\rightarrow yellow} + \Delta\phi_{yellow\rightarrow white} = k_x\Delta_x + k_y\Delta_y - (35) $$
#
# , which the same as **Fig. (20)**.
#
# And it is an important concept when discuss the blue shift in spectrum in the planar emssiom device.
# Accordingly, the wave vector follows the rotation transformation of the coordinate but the wave number along the axis does not.
# # Summary
# In this lecture, we discuss the phase velocity and why it can larger than the speed of light. Besides, we also briefly discuss the physical meaning of the wave vector and the component of the wave vector, which is a vector/quantity to evaluate the phase differen for a given direction.
| Lecture-01 Plane Wave.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Titlewave
# language: python
# name: titlewave
# ---
from pymongo import MongoClient
from datetime import datetime
import sys
import os
import pandas as pd
from time import time
import matplotlib.pyplot as plt
# +
forum = 'overflow'
print(f'Forum: {forum}')
client = MongoClient()
db = client.titlewave
posts = db[f'{forum}.posts']
total_posts = posts.count_documents({})
print(f'{total_posts} posts found...')
# -
# Count posts by year
start_time = time()
result = posts.aggregate([{'$group': {'_id': {'$year': '$CreationDate'},
'NumPosts': {'$sum': 1},
'AvgViews': {'$avg': '$ViewCount'},
'AnswerProbability': {'$avg': {'$cond': [{'$gt': ['$AnswerCount', 0]}, 1, 0]}}}},
{'$sort': {'_id': 1}}
])
result = list(result)
df = pd.DataFrame(result)
print(f'Duration: {time() - start_time:.2f} s')
df
def mongo_query(start_date, end_date, exclude_closed):
"""Create a MongoDB query based on a set of conditions."""
query = {}
if start_date:
if not ('CreationDate' in query):
query['CreationDate'] = {}
query['CreationDate']['$gte'] = start_date
if end_date:
if not ('CreationDate' in query):
query['CreationDate'] = {}
query['CreationDate']['$lt'] = end_date
if exclude_closed:
query['Closed'] = False
return query
def single_year_query(year):
return mongo_query(start_date=datetime(year, 1, 1),
end_date=datetime(year+1, 1, 1),
exclude_closed=True)
# Count posts in a specific year by month
start_time = time()
result = posts.aggregate([{'$match': single_year_query(2018)},
{'$group': {'_id': {'$month': '$CreationDate'},
'NumPosts': {'$sum': 1},
'AvgViews': {'$avg': '$ViewCount'},
'AnswerProbability': {'$avg': {'$cond': [{'$gt': ['$AnswerCount', 0]}, 1, 0]}}}},
{'$sort': {'_id': 1}}
])
result = list(result)
df = pd.DataFrame(result)
print(f'Duration: {time() - start_time:.2f} s')
df
# Analyze posts by title length
start_time = time()
result = posts.aggregate([{'$match': single_year_query(2018)},
{'$group': {'_id': {'$strLenCP': '$Title'},
'NumPosts': {'$sum': 1},
'AvgViews': {'$avg': '$ViewCount'},
'AnswerProbability': {'$avg': {'$cond': [{'$gt': ['$AnswerCount', 0]}, 1, 0]}}}},
{'$sort': {'_id': 1}}
])
result = list(result)
df = pd.DataFrame(result)
print(f'Duration: {time() - start_time:.2f} s')
# Plot the results for view counts (TODO: median would be less noisy)
df = df.rename(columns={'_id': 'TitleLength'})
fig, (ax1, ax2) = plt.subplots(1,2, figsize=[12,4])
df.plot(x='TitleLength', y='NumPosts', ax = ax1, color='blue')
df.plot(x='TitleLength', y='AnswerProbability', ax = ax1.twinx(), color='red')
df.plot(x='TitleLength', y='NumPosts', ax = ax2, color='blue')
df.plot(x='TitleLength', y='AvgViews', ax = ax2.twinx(), color='red')
plt.show()
# +
# Analyze posts by tag
start_time = time()
result = posts.aggregate([{'$project': {'Title': False, 'Body': False}},
{'$unwind': '$Tags'},
{'$group': {'_id': '$Tags',
'NumPosts': {'$sum': 1},
'AvgViews': {'$avg': '$ViewCount'},
'AnswerProbability': {'$avg': {'$cond': [{'$gt': ['$AnswerCount', 0]}, 1, 0]}}}},
{'$sort': {'NumPosts': -1}}
])
result = list(result)
df = pd.DataFrame(result)
print(f'Duration: {time() - start_time:.2f} s')
df.head(20)
# -
| model_training/data_exploration.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Self-Driving Car Engineer Nanodegree
#
# ## Deep Learning
#
# ## Project: Build a Traffic Sign Recognition Classifier
#
# In this notebook, a template is provided for you to implement your functionality in stages, which is required to successfully complete this project. If additional code is required that cannot be included in the notebook, be sure that the Python code is successfully imported and included in your submission if necessary.
#
# > **Note**: Once you have completed all of the code implementations, you need to finalize your work by exporting the iPython Notebook as an HTML document. Before exporting the notebook to html, all of the code cells need to have been run so that reviewers can see the final implementation and output. You can then export the notebook by using the menu above and navigating to \n",
# "**File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.
#
# In addition to implementing code, there is a writeup to complete. The writeup should be completed in a separate file, which can be either a markdown file or a pdf document. There is a [write up template](https://github.com/udacity/CarND-Traffic-Sign-Classifier-Project/blob/master/writeup_template.md) that can be used to guide the writing process. Completing the code template and writeup template will cover all of the [rubric points](https://review.udacity.com/#!/rubrics/481/view) for this project.
#
# The [rubric](https://review.udacity.com/#!/rubrics/481/view) contains "Stand Out Suggestions" for enhancing the project beyond the minimum requirements. The stand out suggestions are optional. If you decide to pursue the "stand out suggestions", you can include the code in this Ipython notebook and also discuss the results in the writeup file.
#
#
# >**Note:** Code and Markdown cells can be executed using the **Shift + Enter** keyboard shortcut. In addition, Markdown cells can be edited by typically double-clicking the cell to enter edit mode.
# ---
# ## Step -1: Download dataset and unzip
# +
import os
import requests
import numpy as np
from tqdm import tqdm
from zipfile import ZipFile
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow import keras
from datetime import datetime
from tensorflow.keras import layers
from keras.preprocessing.image import ImageDataGenerator
# -
def download(url: str, fname: str):
resp = requests.get(url, stream=True)
total = int(resp.headers.get('content-length', 0))
with open(fname, 'wb') as file, tqdm(
desc=fname,
total=total,
unit='iB',
unit_scale=True,
unit_divisor=1024,
) as bar:
for data in resp.iter_content(chunk_size=1024):
size = file.write(data)
bar.update(size)
file_name = "traffic-signs-data.zip"
dest_name = "data_set"
url = "https://s3-us-west-1.amazonaws.com/udacity-selfdrivingcar/traffic-signs-data.zip"
if not os.path.exists(file_name):
download(url, file_name)
with ZipFile(file=file_name) as zip_file:
for file in tqdm(iterable=zip_file.namelist(), total=len(zip_file.namelist())):
zip_file.extract(member=file, path=dest_name)
# ---
# ## Step 0: Load The Data
# +
# Load pickled data
import pickle
# TODO: Fill this in based on where you saved the training and testing data
training_file = "data_set/train.p"
validation_file = "data_set/valid.p"
testing_file = "data_set/test.p"
with open(training_file, mode='rb') as f:
train = pickle.load(f)
with open(validation_file, mode='rb') as f:
valid = pickle.load(f)
with open(testing_file, mode='rb') as f:
test = pickle.load(f)
X_train, y_train = train['features'], train['labels']
X_valid, y_valid = valid['features'], valid['labels']
X_test, y_test = test['features'], test['labels']
# +
indices = []
y_train_values = np.unique(y_train)
for index in y_train_values:
indices.append(np.argwhere(y_train == index)[0][0])
fig1, axes = plt.subplots(ncols=len(y_train_values)//4, nrows=4, figsize=(15,5), constrained_layout=True)
axes = axes.flatten()
for image_index, axe in zip(indices, axes):
axe.imshow(X_train[image_index+50])
axe.set_title("class " + str(y_train[image_index]))
axe.axis('off')
# -
# ---
#
# ## Step 1: Dataset Summary & Exploration
#
# The pickled data is a dictionary with 4 key/value pairs:
#
# - `'features'` is a 4D array containing raw pixel data of the traffic sign images, (num examples, width, height, channels).
# - `'labels'` is a 1D array containing the label/class id of the traffic sign. The file `signnames.csv` contains id -> name mappings for each id.
# - `'sizes'` is a list containing tuples, (width, height) representing the original width and height the image.
# - `'coords'` is a list containing tuples, (x1, y1, x2, y2) representing coordinates of a bounding box around the sign in the image. **THESE COORDINATES ASSUME THE ORIGINAL IMAGE. THE PICKLED DATA CONTAINS RESIZED VERSIONS (32 by 32) OF THESE IMAGES**
#
# Complete the basic data summary below. Use python, numpy and/or pandas methods to calculate the data summary rather than hard coding the results. For example, the [pandas shape method](http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.shape.html) might be useful for calculating some of the summary results.
# ### Provide a Basic Summary of the Data Set Using Python, Numpy and/or Pandas
# +
### Replace each question mark with the appropriate value.
### Use python, pandas or numpy methods rather than hard coding the results
num_classes = 43
input_shape = (32, 32, 3)
# TODO: Number of training examples
n_train = X_train.shape[0]
# TODO: Number of validation examples
n_validation = X_valid.shape[0]
# TODO: Number of testing examples.
n_test = X_test.shape[0]
# TODO: What's the shape of an traffic sign image?
image_shape = X_train.shape[1:]
# TODO: How many unique classes/labels there are in the dataset.
n_classes = np.unique(y_train).shape[0]
print("Number of training examples =", n_train)
print("Number of testing examples =", n_test)
print("Image data shape =", image_shape)
print("Number of classes =", n_classes)
# -
# ### Include an exploratory visualization of the dataset
# Visualize the German Traffic Signs Dataset using the pickled file(s). This is open ended, suggestions include: plotting traffic sign images, plotting the count of each sign, etc.
#
# The [Matplotlib](http://matplotlib.org/) [examples](http://matplotlib.org/examples/index.html) and [gallery](http://matplotlib.org/gallery.html) pages are a great resource for doing visualizations in Python.
#
# **NOTE:** It's recommended you start with something simple first. If you wish to do more, come back to it after you've completed the rest of the sections. It can be interesting to look at the distribution of classes in the training, validation and test set. Is the distribution the same? Are there more examples of some classes than others?
### Data exploration visualization code goes here.
### Feel free to use as many code cells as needed.
import matplotlib.pyplot as plt
# Visualizations will be shown in the notebook.
# %matplotlib inline
rows = 2
cols = 4
indices = np.random.randint(0, n_train, rows*cols)
fig1, f1_axes = plt.subplots(ncols=cols, nrows=rows, constrained_layout=True)
f1_axes = f1_axes.flatten()
for i, index in enumerate(indices):
f1_axes[i].imshow(X_train[index])
f1_axes[i].set_title(f'{y_train[index]}')
f1_axes[i].axis('off')
# ----
#
# ## Step 2: Design and Test a Model Architecture
#
# Design and implement a deep learning model that learns to recognize traffic signs. Train and test your model on the [German Traffic Sign Dataset](http://benchmark.ini.rub.de/?section=gtsrb&subsection=dataset).
#
# The LeNet-5 implementation shown in the [classroom](https://classroom.udacity.com/nanodegrees/nd013/parts/fbf77062-5703-404e-b60c-95b78b2f3f9e/modules/6df7ae49-c61c-4bb2-a23e-6527e69209ec/lessons/601ae704-1035-4287-8b11-e2c2716217ad/concepts/d4aca031-508f-4e0b-b493-e7b706120f81) at the end of the CNN lesson is a solid starting point. You'll have to change the number of classes and possibly the preprocessing, but aside from that it's plug and play!
#
# With the LeNet-5 solution from the lecture, you should expect a validation set accuracy of about 0.89. To meet specifications, the validation set accuracy will need to be at least 0.93. It is possible to get an even higher accuracy, but 0.93 is the minimum for a successful project submission.
#
# There are various aspects to consider when thinking about this problem:
#
# - Neural network architecture (is the network over or underfitting?)
# - Play around preprocessing techniques (normalization, rgb to grayscale, etc)
# - Number of examples per label (some have more than others).
# - Generate fake data.
#
# Here is an example of a [published baseline model on this problem](http://yann.lecun.com/exdb/publis/pdf/sermanet-ijcnn-11.pdf). It's not required to be familiar with the approach used in the paper but, it's good practice to try to read papers like these.
# ### Pre-process the Data Set (normalization, grayscale, etc.)
# Minimally, the image data should be normalized so that the data has mean zero and equal variance. For image data, `(pixel - 128)/ 128` is a quick way to approximately normalize the data and can be used in this project.
#
# Other pre-processing steps are optional. You can try different techniques to see if it improves performance.
#
# Use the code cell (or multiple code cells, if necessary) to implement the first step of your project.
# I decided not to use augmentation it give better accuracy
### Preprocess the data here. It is required to normalize the data. Other preprocessing steps could include
### converting to grayscale, etc.
datagen = ImageDataGenerator(
# rotation_range=20,
# horizontal_flip=True
)
### Feel free to use as many code cells as needed.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_valid = keras.utils.to_categorical(y_valid, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
# ### Model Architecture
# ### Callbacks
# 1. TensorBoard callback
# 2. Early stopping callbak
# TensorBoard
logdir = "logs/scalars/" + datetime.now().strftime("%Y%m%d-%H%M%S")
tensorboard_callback = keras.callbacks.TensorBoard(log_dir=logdir)
# EarlyStopping
early_stopping = keras.callbacks.EarlyStopping(monitor='loss', patience=3)
# ### Model
#
# | Layer | Type | Kernal | Features | Activation |
# |:-:|:-:|:-:|:-:|:-:|
# | Input | | 32x32 |
# | C1 | Convolution | 5x5 | 6 | tanh |
# | S2 | Sub-sampling | 2x2 | | |
# | C3 | Convolution | 5x5 | 16 | tanh |
# | S4 | Sub-sampling | 5x5 |
# | C5 | Convolution | 5x5 | 120 | tanh |
# | F6 | Fully Connected | | 84 | tanh |
# | F7 | Fully Connected | | 10 | | Softmax |
# +
use_pretrained_model = False
model_path = 'models/lenet_no_agm_with_dropout75.h5'
#if(use_pretrained_model and os.path.exists(model_path)):
# print("Pretrained model")
# model = keras.models.load_model(model_path)
#else:
model = keras.Sequential(
[
layers.InputLayer(input_shape=input_shape),
layers.Lambda(lambda x: tf.image.rgb_to_grayscale(x), name="RGB2Gray"),
layers.Lambda(lambda x: (x / 255.0) + 0.5, name="Normalize"),
layers.Conv2D(6, kernel_size=(5, 5), strides=1, name="C1", activation="relu"),
layers.MaxPooling2D(pool_size=(2, 2), strides=2, name="S2"),
layers.Conv2D(16, kernel_size=(5, 5), strides=1, name="C3", activation="relu"),
layers.MaxPooling2D(pool_size=(2, 2), strides=2, name="S4"),
layers.Conv2D(120, kernel_size=(5, 5), strides=1, name="C5", activation="relu"),
layers.Flatten(),
layers.Dense(84, activation="relu", name="F6"),
layers.Dropout(0.75),
layers.Dense(num_classes, name="F7", activation="softmax"),
]
)
model.summary()
# -
# ### Train, Validate and Test the Model
# A validation set can be used to assess how well the model is performing. A low accuracy on the training and validation
# sets imply underfitting. A high accuracy on the training set but low accuracy on the validation set implies overfitting.
# +
batch_size = 128
epochs = 50
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])\
model.load_weights(model_path)
# -
### Train your model here.
### Calculate and report the accuracy on the training and validation set.
### Once a final model architecture is selected,
### the accuracy on the test set should be calculated and reported as well.
### Feel free to use as many code cells as needed.
history = model.fit(x=datagen.flow(X_train, y_train, batch_size=batch_size),
validation_data=datagen.flow(X_test, y_test),
steps_per_epoch=len(X_train) // batch_size,
epochs=epochs,
callbacks=[tensorboard_callback, early_stopping])
def plot_history(history):
accuracy = history.history['accuracy']
val_accuracy = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
epoc_list = range(1, len(accuracy) + 1)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15,5))
ax1.plot(epoc_list, accuracy, 'b', label='Training acc')
ax1.plot(epoc_list, val_accuracy, 'r', label='Validation acc')
ax1.set_title('Training and validation accuracy')
ax2.plot(epoc_list, loss, 'b', label='Training loss')
ax2.plot(epoc_list, val_loss, 'r', label='Validation loss')
ax2.set_title('Training and validation loss')
fig.legend()
plot_history(history)
# ---
#
# ## Step 3: Test a Model on New Images
#
# To give yourself more insight into how your model is working, download at least five pictures of German traffic signs from the web and use your model to predict the traffic sign type.
#
# You may find `signnames.csv` useful as it contains mappings from the class id (integer) to the actual sign name.
# ### Load and Output the Images
X_valid.shape, model.layers[0].input_shape
### Load the images and plot them here.
### Feel free to use as many code cells as needed.
score = model.evaluate(X_valid, y_valid, verbose=0)
print("Valid loss:", score[0])
print("Valid accuracy:", score[1])
# ### Predict the Sign Type for Each Image
# +
### Run the predictions here and use the model to output the prediction for each image.
### Make sure to pre-process the images with the same pre-processing pipeline used earlier.
### Feel free to use as many code cells as needed.
# -
# ### Analyze Performance
test_images = ["TGS_01.jpg", "TGS_02.jpg", "TGS_03.jpg",
"TGS_04.jpg", "TGS_05.jpg", "TGS_07.jpg",
"TGS_08.jpg"]
test_labels = np.array([18, 5, 35, 38, 17, 37, 13], dtype=np.float32)
test_labels = keras.utils.to_categorical(test_labels, num_classes)
# +
from matplotlib.pyplot import imread
fig1, axes = plt.subplots(ncols=len(test_images), nrows=1, figsize=(15,5), constrained_layout=True)
axes = axes.flatten()
real_time_images = np.zeros((len(test_images), 32, 32, 3))
for index, (image, axe) in enumerate(zip(test_images, axes)):
image_ = imread("data_set/testing_images/" + image)
real_time_images[index] = image_
axe.imshow(image_)
axe.axis('off')
# -
real_time_images.shape, test_labels.shape
((34799, 32, 32, 3), (34799, 43))
score = model.evaluate(real_time_images, test_labels, verbose=0)
print("Valid loss:", score[0])
print("Valid accuracy:", score[1])
# ### Output Top 5 Softmax Probabilities For Each Image Found on the Web
# For each of the new images, print out the model's softmax probabilities to show the **certainty** of the model's predictions (limit the output to the top 5 probabilities for each image). [`tf.nn.top_k`](https://www.tensorflow.org/versions/r0.12/api_docs/python/nn.html#top_k) could prove helpful here.
#
# The example below demonstrates how tf.nn.top_k can be used to find the top k predictions for each image.
#
# `tf.nn.top_k` will return the values and indices (class ids) of the top k predictions. So if k=3, for each sign, it'll return the 3 largest probabilities (out of a possible 43) and the correspoding class ids.
#
# Take this numpy array as an example. The values in the array represent predictions. The array contains softmax probabilities for five candidate images with six possible classes. `tf.nn.top_k` is used to choose the three classes with the highest probability:
#
# ```
# # (5, 6) array
# a = np.array([[ 0.24879643, 0.07032244, 0.12641572, 0.34763842, 0.07893497,
# 0.12789202],
# [ 0.28086119, 0.27569815, 0.08594638, 0.0178669 , 0.18063401,
# 0.15899337],
# [ 0.26076848, 0.23664738, 0.08020603, 0.07001922, 0.1134371 ,
# 0.23892179],
# [ 0.11943333, 0.29198961, 0.02605103, 0.26234032, 0.1351348 ,
# 0.16505091],
# [ 0.09561176, 0.34396535, 0.0643941 , 0.16240774, 0.24206137,
# 0.09155967]])
# ```
#
# Running it through `sess.run(tf.nn.top_k(tf.constant(a), k=3))` produces:
#
# ```
# TopKV2(values=array([[ 0.34763842, 0.24879643, 0.12789202],
# [ 0.28086119, 0.27569815, 0.18063401],
# [ 0.26076848, 0.23892179, 0.23664738],
# [ 0.29198961, 0.26234032, 0.16505091],
# [ 0.34396535, 0.24206137, 0.16240774]]), indices=array([[3, 0, 5],
# [0, 1, 4],
# [0, 5, 1],
# [1, 3, 5],
# [1, 4, 3]], dtype=int32))
# ```
#
# Looking just at the first row we get `[ 0.34763842, 0.24879643, 0.12789202]`, you can confirm these are the 3 largest probabilities in `a`. You'll also notice `[3, 0, 5]` are the corresponding indices.
score = model.predict(real_time_images)
sorted_indices = np.argsort(score)
# +
pos = np.arange(0, 5)
for image, score_value, so in zip(real_time_images, score, sorted_indices):
fig, (ax0, ax1) = plt.subplots(ncols=2, nrows=1, figsize=(10,3), constrained_layout=True)
ax0.imshow(image.astype(np.uint8))
ax0.axis('off')
test_names = so[-5:]
ax1.barh(pos, score_value[so][-5:], align='center', height=0.5, tick_label=test_names)
# -
# ### Project Writeup
#
# Once you have completed the code implementation, document your results in a project writeup using this [template](https://github.com/udacity/CarND-Traffic-Sign-Classifier-Project/blob/master/writeup_template.md) as a guide. The writeup can be in a markdown or pdf file.
# > **Note**: Once you have completed all of the code implementations and successfully answered each question above, you may finalize your work by exporting the iPython Notebook as an HTML document. You can do this by using the menu above and navigating to \n",
# "**File -> Download as -> HTML (.html)**. Include the finished document along with this notebook as your submission.
# ---
#
# ## Step 4 (Optional): Visualize the Neural Network's State with Test Images
#
# This Section is not required to complete but acts as an additional excersise for understaning the output of a neural network's weights. While neural networks can be a great learning device they are often referred to as a black box. We can understand what the weights of a neural network look like better by plotting their feature maps. After successfully training your neural network you can see what it's feature maps look like by plotting the output of the network's weight layers in response to a test stimuli image. From these plotted feature maps, it's possible to see what characteristics of an image the network finds interesting. For a sign, maybe the inner network feature maps react with high activation to the sign's boundary outline or to the contrast in the sign's painted symbol.
#
# Provided for you below is the function code that allows you to get the visualization output of any tensorflow weight layer you want. The inputs to the function should be a stimuli image, one used during training or a new one you provided, and then the tensorflow variable name that represents the layer's state during the training process, for instance if you wanted to see what the [LeNet lab's](https://classroom.udacity.com/nanodegrees/nd013/parts/fbf77062-5703-404e-b60c-95b78b2f3f9e/modules/6df7ae49-c61c-4bb2-a23e-6527e69209ec/lessons/601ae704-1035-4287-8b11-e2c2716217ad/concepts/d4aca031-508f-4e0b-b493-e7b706120f81) feature maps looked like for it's second convolutional layer you could enter conv2 as the tf_activation variable.
#
# For an example of what feature map outputs look like, check out NVIDIA's results in their paper [End-to-End Deep Learning for Self-Driving Cars](https://devblogs.nvidia.com/parallelforall/deep-learning-self-driving-cars/) in the section Visualization of internal CNN State. NVIDIA was able to show that their network's inner weights had high activations to road boundary lines by comparing feature maps from an image with a clear path to one without. Try experimenting with a similar test to show that your trained network's weights are looking for interesting features, whether it's looking at differences in feature maps from images with or without a sign, or even what feature maps look like in a trained network vs a completely untrained one on the same sign image.
#
# <figure>
# <img src="visualize_cnn.png" width="380" alt="Combined Image" />
# <figcaption>
# <p></p>
# <p style="text-align: center;"> Your output should look something like this (above)</p>
# </figcaption>
# </figure>
# <p></p>
#
# +
test_image = X_valid[150]
layer_outputs = [layer.output for layer in model.layers[:6]]
activation_model = keras.Model(inputs=model.input, outputs=layer_outputs)
activations = activation_model.predict(np.array([test_image]))
# -
# predicting images
x = np.expand_dims(test_image, axis=0)
images = np.vstack([x])
classes = model.predict_classes(images, batch_size=10)
layer_activation = activations[2]
print(layer_activation.shape)
plt.imshow(test_image, cmap="gray")
plt.axis("off")
# +
import cv2
count = layer_activation.shape[-1]
fig1, f1_axes = plt.subplots(ncols=count//2, nrows=2, figsize=(5, 3), constrained_layout=True)
f1_axes = f1_axes.flatten()
for index in range(count):
# i = np.random.randint(0, 6)
img = layer_activation[0,:,:,index]
out = cv2.normalize(img, None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)
f1_axes[index].axis('off')
f1_axes[index].imshow(out, cmap="gray")
# +
layer_activation = activations[4]
print(layer_activation.shape)
count = layer_activation.shape[-1]
fig1, f1_axes = plt.subplots(ncols=count//4, nrows=4, figsize=(5,4), constrained_layout=True)
f1_axes = f1_axes.flatten()
for index in range(count):
# i = np.random.randint(0, 6)
img = layer_activation[0,:,:,index]
out = cv2.normalize(img, None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)
f1_axes[index].axis('off')
f1_axes[index].imshow(out, cmap="gray")
# -
| Traffic_Sign_Classifier.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: conda_tensorflow_p36
# language: python
# name: conda_tensorflow_p36
# ---
# # Working with TFRecord Datasets
#
# 1. [Introduction](#Introduction)
# 1. [Prerequisites](#Prerequisites)
# 1. [Converting a dataset from CSV to TFrecords](#Converting-a-dataset-from-CSV-to-TFrecords)
# 1. [Upload dataset to S3](#Upload-dataset-to-S3)
# 1. [Construct a DNNClassifier](#Construct-a-DNNClassifier)
# 1. [Train a Model](#Train-a-Model)
# 1. [Run Batch Transform](#Run-Batch-Transform)
# 1. [Build a container for transforming TFRecord input](#Build-a-container-for-transforming-TFRecord-input)
# 1. [Push container to ECR](#Push-container-to-ECR)
# 1. [Create a model with an inference pipeline](#Create-a-model-with-an-inference-pipeline)
# 1. [Run a batch transform job](#Run-a-batch-transform-job)
# 1. [Inspect batch transform output](#Inspect-batch-transform-output)
# ## Introduction
#
# TFRecord is a standard TensorFlow data format. It is a record-oriented binary file format that allows for efficient storage and processing of large datasets. In this notebook, we’ll demonstrate how to take an existing CSV dataset and convert it to TFRecord files. We’ll also build a TensorFlow training script that accepts serialized tf.Example protos (the payload of our TFRecords) as input during training. Then, we'll run a training job using the TFRecord dataset we've generated as input. Finally, we'll demonstrate how to run a batch transform job with an inference pipeline so that we can pass the TFRecord dataset as input.
# ## Prerequisites
#
# Let's start by specifying:
# * The S3 bucket and prefixes you'd like to use for training and batch transform data.
# * The IAM role that will be used for training and batch transform jobs, as well as ECR repository creation and image upload.
# +
import boto3
import sagemaker
import tensorflow as tf
bucket = '<your_bucket_name>'
training_prefix = 'training'
batch_input_prefix = 'batch_input'
batch_output_prefix ='batch_output'
sess = sagemaker.Session()
role = sagemaker.get_execution_role()
# -
# ## Converting a dataset from CSV to TFRecords
#
# First, we'll take an existing CSV dataset (located in `./dataset-csv/`) and convert it to the TFRecords file format:
# +
import os
csv_root = './dataset-csv/'
tfrecord_root = './dataset-tfrecord/'
test_csv_file = 'iris_test.csv'
train_csv_file = 'iris_train.csv'
test_tfrecord_file = 'iris_test.tfrecords'
train_tfrecord_file = 'iris_train.tfrecords'
def _floatlist_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=[float(value)]))
def _int64list_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
# create the tfrecord dataset dir
if not os.path.isdir(tfrecord_root):
os.mkdir(tfrecord_root)
for input_file, output_file in [(test_csv_file,test_tfrecord_file), (train_csv_file,train_tfrecord_file)]:
# create the output file
open(tfrecord_root + output_file, 'a').close()
with tf.python_io.TFRecordWriter(tfrecord_root + output_file) as writer:
with open(csv_root + input_file,'r') as f:
f.readline() # skip first line
for line in f:
feature = {
'sepal_length': _floatlist_feature(line.split(',')[0]),
'sepal_width': _floatlist_feature(line.split(',')[1]),
'petal_length': _floatlist_feature(line.split(',')[2]),
'petal_width': _floatlist_feature(line.split(',')[3]),
}
if f == train_csv_file:
feature['label'] = _int64list_feature(int(line.split(',')[4].rstrip()))
example = tf.train.Example(
features=tf.train.Features(
feature=feature
)
)
writer.write(example.SerializeToString())
# -
# ### Upload dataset to S3
#
# Next, we'll upload the TFRecord datasets to S3 so that we can use it in training and batch transform jobs.
# +
def upload_to_s3(bucket, key, file):
s3 = boto3.resource('s3')
data = open(file, "rb")
s3.Bucket(bucket).put_object(Key=key, Body=data)
upload_to_s3(bucket, training_prefix + '/' + train_tfrecord_file, tfrecord_root + train_tfrecord_file)
upload_to_s3(bucket, batch_input_prefix + '/' + test_tfrecord_file, tfrecord_root + test_tfrecord_file)
# -
# ## Construct a DNN Classifier
#
# In `./dnn-classifier/train.py` we've defined a neural network classifier using TensorFlow's DNNClassifier. We can take a look at the train script to see how the network and input functions are defined:
# !cat ./dnn-classifier/train.py
# ## Train a Model
#
# Next, we'll kick off a training job using the training script defined above.
# +
from sagemaker.tensorflow import TensorFlow
train_data_location = 's3://{}/{}'.format(bucket, training_prefix)
instance_type = 'ml.c4.xlarge'
estimator = TensorFlow(entry_point='train.py',
source_dir='dnn-classifier',
model_dir='/opt/ml/model',
train_instance_type=instance_type,
train_instance_count=1,
role=sagemaker.get_execution_role(), # Passes to the container the AWS role that you are using on this notebook
framework_version='1.11.0', # Uses TensorFlow 1.11
py_version='py3',
script_mode=True)
inputs = {'training': train_data_location}
estimator.fit(inputs)
# -
# ## Run Batch Transform
# ### Build a container for transforming TFRecord input
#
# The SageMaker TensorFlow Serving container uses the TensorFlow ModelServer RESTful API to serve predict requests. In the next step, we'll create a container to transform mini-batch TFRecord payloads into JSON objects that can be forwarded to the TensorFlow serving container. To do this, we've created a simple Python Flask app that does the transformation, the code for this container is available in the `./tfrecord-transformer-container/` directory. First, we'll build the container:
# !docker build -t tfrecord-transformer ./tfrecord-transformer-container/
# ### Push container to ECR
#
# Next, we'll push the docker container to an ECR repository in your account. In order to push the container to ECR, the execution role attached to this notebook should have permissions to create a repository, set a repository policy, and upload an image.
# +
account_id = boto3.client('sts').get_caller_identity().get('Account')
region = boto3.session.Session().region_name
ecr_repository = 'tfrecord-transformer'
tag = ':latest'
uri_suffix = 'amazonaws.com',
if region == 'cn-north-1' or 'cn-northwest-1':
uri_suffix = 'amazonaws.com.cn'
transformer_repository_uri = '{}.dkr.ecr.{}.{}/{}'.format(account_id, region, uri_suffix, ecr_repository + tag)
# docker login
# !$(aws ecr get-login --region $region --registry-ids $account_id --no-include-email)
# create ecr repository
# !aws ecr create-repository --repository-name $ecr_repository
# attach policy allowing sagemaker to pull this image
# !aws ecr set-repository-policy --repository-name $ecr_repository --policy-text "$( cat ./tfrecord-transformer-container/ecr_policy.json )"
# !docker tag {ecr_repository + tag} $transformer_repository_uri
# !docker push $transformer_repository_uri
# -
# ### Create a model with an inference pipeline
#
# Next, we'll create a SageMaker model with the two containers chained together (TFRecord transformer -> TensorFlow Serving).
# +
from sagemaker.tensorflow.serving import Model
from sagemaker.utils import name_from_base
client = boto3.client('sagemaker')
model_name = name_from_base('tfrecord-to-tfserving')
transform_container = {
"Image": transformer_repository_uri
}
tf_serving_model = Model(model_data=estimator.model_data,
role=sagemaker.get_execution_role(),
image=estimator.image_name,
framework_version=estimator.framework_version,
sagemaker_session=estimator.sagemaker_session)
tf_serving_container = tf_serving_model.prepare_container_def(instance_type)
model_params = {
"ModelName": model_name,
"Containers": [
transform_container,
tf_serving_container
],
"ExecutionRoleArn": sagemaker.get_execution_role()
}
client.create_model(**model_params)
# -
# ### Run a batch transform job
#
# Next, we'll run a batch transform job using our inference pipeline model. We'll specify `SplitType=TFRecord` and `BatchStrategy=MultiRecord` to specify that our dataset will be split by TFRecord boundaries, and multiple records will be batched in a single request up to the `MaxPayloadInMB=1` limit.
# +
input_data_path = 's3://{}/{}'.format(bucket, batch_input_prefix)
output_data_path = 's3://{}/{}'.format(bucket, batch_output_prefix)
transformer = sagemaker.transformer.Transformer(
model_name = model_name,
instance_count = 1,
instance_type = instance_type,
strategy = 'MultiRecord',
max_payload = 1,
output_path = output_data_path,
assemble_with= 'Line',
base_transform_job_name='tfrecord-transform',
sagemaker_session=sess,
)
transformer.transform(data = input_data_path,
split_type = 'TFRecord')
transformer.wait()
# -
# ### Inspect batch transform output
#
# Finally, we can inspect the output files of our batch transform job to see the predictions.
output_uri = transformer.output_path + '/' + test_tfrecord_file + '.out'
# !aws s3 cp $output_uri -
| sagemaker_batch_transform/working_with_tfrecords/working-with-tfrecords.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="QQrfFlDKMG4F"
# ## 데이터 로딩
# + id="_YFt1q5gtyN8"
import pandas as pd
# + colab={"base_uri": "https://localhost:8080/"} id="SWWssKdbt6bF" outputId="0b5d1199-d615-4241-fc8e-16e9dc666a28"
df = pd.read_csv('./auto-mpg.csv', header=None)
df.info()
# + colab={"base_uri": "https://localhost:8080/", "height": 419} id="sOBYhrZAuXuK" outputId="0067edcf-5697-4638-b562-bde73b6ae11f"
df.columns = ['mpg','cylinders','displacement','horsepower','weight',
'acceleration','model year','origin','name']
df
# + colab={"base_uri": "https://localhost:8080/"} id="xgrpTkPZu9tL" outputId="6094270a-2a18-45f3-b3d2-35e03e1cc2e7"
df.info()
# + colab={"base_uri": "https://localhost:8080/", "height": 173} id="QVBi3dwsu_-y" outputId="8bd7ecdb-dc3a-478f-fac3-f89cf68acabc"
df[['horsepower','name']].describe(include='all')
# + [markdown] id="gCUQRzFiMmU6"
# ## replace()
# + colab={"base_uri": "https://localhost:8080/"} id="oI1RggSdvLMS" outputId="b678ffdb-e1a2-453e-a2ae-ce8953b7b814"
df['horsepower'].value_counts()
# + colab={"base_uri": "https://localhost:8080/", "height": 322} id="3yTF9xfKv2fS" outputId="d3fd341e-4b7f-412f-da32-e4d553af6601"
df['horsepower'].astype('float64') # 이렇게 하면 안된다는 것을 확인하는 차원에서 들어간 코드임
# + colab={"base_uri": "https://localhost:8080/"} id="icawiRHUyAXZ" outputId="9877b6c6-3fad-4404-a265-580c3e828748"
df['horsepower'].unique()
# + colab={"base_uri": "https://localhost:8080/"} id="B7MyeTFryTCK" outputId="f05cdbce-6646-427e-948e-9a99c2e4344b"
df_horsepower = df['horsepower'].replace(to_replace='?', value=None, inplace=False) # ?를 None으로 바꾸면 통계값을 빼낼 수 있다.
# type(df_horsepower)
df_horsepower.unique()
# + id="VeKkMpoG0jLE"
df_horsepower = df_horsepower.astype('float')
# + colab={"base_uri": "https://localhost:8080/"} id="PGJRUAr_01iR" outputId="73171f87-db02-41a4-a55c-35dd112dc4ee"
df_horsepower.mean()
# + colab={"base_uri": "https://localhost:8080/"} id="pNYiw3rh0-66" outputId="874bffd6-e6aa-4e84-9cf9-3a38de1d4bc5"
df['horsepower'] = df_horsepower.fillna(104)
df.info()
# + id="Po0y9vkp1Y_q" colab={"base_uri": "https://localhost:8080/"} outputId="b0f32952-8db3-4a85-9a70-fb389dfb68fd"
df['name'].unique()
# + [markdown] id="C_ZMnLcAL3oW"
# ## 분류와 연속 컬럼 구분
# + colab={"base_uri": "https://localhost:8080/", "height": 297} id="FQ0Uw50u1wla" outputId="62bc0be0-f934-4ceb-f7fb-e3ad425cc2a6"
df.head(8) # acceleration은 소숫점 이하의 값이 있으니 연속형이라고 볼 수 있다.
# + [markdown] id="91ubQ7RdM_e5"
# ### check columns
# - 연속형: displacement, horsepower, weight, acceleration, mpg,
# - 분류형: model year, name, cylinders, origin
# - 중립형:
# + colab={"base_uri": "https://localhost:8080/"} id="mrzQeuGZWKzw" outputId="e038c8f5-32ae-4abf-88c0-077b2e1ebef2"
df['name'].value_counts()
# + colab={"base_uri": "https://localhost:8080/"} id="ZPxbrF2XLwFC" outputId="f6885a0a-dfb3-4d14-9142-9a4823016d0f"
df['mpg'].describe(include='all')
# + colab={"base_uri": "https://localhost:8080/"} id="wL_Q2mi8Ohnt" outputId="a37d54db-1360-48bf-c0fb-677c88b9fe9f"
df['mpg'].value_counts() # 소숫점 이하의 값이 있으니 연속형이라고 볼 수 있다.
# + colab={"base_uri": "https://localhost:8080/"} id="4ZtTxQdRPDi4" outputId="40e9d6d5-8c74-4662-8a54-fa6c7c371432"
df['cylinders'].value_counts()
# + colab={"base_uri": "https://localhost:8080/"} id="YjUotM5sP9eh" outputId="f7dd52bb-500a-4a19-8d6b-84547cbc311a"
df['origin'].value_counts()
# + [markdown] id="g7095VDIQRjZ"
# ## 정규화 단계
# x와 y형으로 나눈다.
# + id="EnolhnhhQIU7"
Y = df['mpg']
X_continues = df[['displacement', 'horsepower', 'weight', 'acceleration']]
X_category = df[['model year', 'cylinders', 'origin']]
# + id="1PVxdHS3W7qR"
from sklearn import preprocessing
# + colab={"base_uri": "https://localhost:8080/"} id="iEzEXeuWYgk4" outputId="bd6f5d7e-486e-473c-f603-498d1239ceea"
scaler = preprocessing.StandardScaler()
type(scaler)
# + colab={"base_uri": "https://localhost:8080/"} id="-D3TI-sGY8jp" outputId="ea706f68-b01b-4e38-812a-1f4706dbee18"
scaler.fit(X_continues)
# + id="O3_qfddrZbcn"
X = scaler.transform(X_continues)
# + id="iseKxVmnZwzd"
from sklearn.linear_model import LinearRegression
# + colab={"base_uri": "https://localhost:8080/"} id="k69WCKn7aMOG" outputId="2a097c14-9498-4697-a7a5-f012b1a8050f"
lr = LinearRegression()
type(lr)
# + colab={"base_uri": "https://localhost:8080/"} id="DetVv6biaQdV" outputId="9a8759ad-110f-49c3-b76c-dfd41b244730"
lr.fit(X,Y)
# + colab={"base_uri": "https://localhost:8080/"} id="TYzcihwMafLY" outputId="90fb72e0-34f4-47e1-dd41-e35013773682"
lr.score(X,Y)
# + colab={"base_uri": "https://localhost:8080/", "height": 80} id="pQpsA0Sxq12F" outputId="9ee2a239-c4b0-47d1-bf5f-b400555e20d8"
df.head(1)
# + [markdown] id="YOWYFLZ-q7rU"
# ### X_continues = df[['displacement', 'horsepower', 'weight', 'acceleration']]
# 307.0, 130.0, 3504.0, 12.0
#
# + colab={"base_uri": "https://localhost:8080/"} id="v21rRXHusOfE" outputId="c9d06843-edb3-4f72-8c43-694662ed2d12"
x_cusmter = scaler.transform([[307.0, 130.0, 3504.0, 12.0]])
x_cusmter
# + colab={"base_uri": "https://localhost:8080/"} id="OEbBKGZOso30" outputId="c6984803-0769-447a-ffcc-bc06cd250258"
lr.predict(x_cusmter)
# + id="nA9oJpBFq430"
# lr.predict([[307.0, 130.0, 3504.0, 12.0]])
# + [markdown] id="jpGrhfZ3nny-"
# ## pickle
# + id="gftvTuqNaw73"
import pickle
pickle.dump(lr, open('./autompg_lr.pkl','wb'))
# + colab={"base_uri": "https://localhost:8080/"} id="vsn4QYhseqqG" outputId="c427268f-4e67-4590-bc11-fba3b3b3f14f"
# !ls -l ./saves/autompg_lr.pkl
# + colab={"base_uri": "https://localhost:8080/"} id="Ddy7Bxvwglfu" outputId="accdf93e-ce4b-44c4-b022-93d9ea225613"
pickle.load(open('./saves/autompg_lr.pkl', 'rb'))
# + id="ZkWP6cpZgtp3"
pickle.dump(scaler, open('./autompg_standardscaler.pkl','wb'))
# + [markdown] id="3WV84ZGEfsNC"
# # One hot encoding
# + colab={"base_uri": "https://localhost:8080/", "height": 142} id="fcDDr7lutu-J" outputId="efe6acb8-78fc-4d8a-e6ae-aaa5b3a83b89"
X_category.head(3)
# + colab={"base_uri": "https://localhost:8080/"} id="REZH8wPjfus0" outputId="ceda5777-b51c-4e20-9247-0abb1bc90685"
X_category['origin'].value_counts()
# + id="s1hExn2HhHcN"
# data, prefix=None
df_origin = pd.get_dummies(X_category['origin'], prefix='origin')
# + id="Z6xXSBCTlI3w"
df_cylinders = pd.get_dummies(X_category['cylinders'], prefix='cylinders')
# + colab={"base_uri": "https://localhost:8080/"} id="xKl-p7-ymWix" outputId="2f6555b8-180e-49c9-f997-fac329ed8aa8"
df_origin.shape, df_cylinders.shape
# + colab={"base_uri": "https://localhost:8080/", "height": 142} id="0MDOqziWmfwQ" outputId="8765df76-1232-443d-bbdb-2d1622d97f55"
X_continues.head(3)
# + colab={"base_uri": "https://localhost:8080/", "height": 204} id="3_H_YyyWm37R" outputId="2910fffa-68cc-45e9-82f2-1051ffb5b1fc"
# X_continues + df_cylinders + df_origin
# objs, axis=0
X = pd.concat([X_continues,df_cylinders,df_origin], axis='columns')
X.head(5)
# + id="Pq0gwbuNvAoR"
scaler_xgb = preprocessing.StandardScaler()
scaler_xgb.fit(X)
X = scaler_xgb.transform(X)
# + colab={"base_uri": "https://localhost:8080/"} id="PNPKCY3S0RHI" outputId="0b46393d-d77c-4fde-f2b0-575b794d814c"
X
# + id="S9cDWrNI38zv"
import pickle
pickle.dump(scaler_xgb, open('./scaler_xgb.pkl','wb'))
# + colab={"base_uri": "https://localhost:8080/"} id="_c_J0qTonr2Q" outputId="500dfd89-1123-4108-8cf9-b30152fac7d4"
from sklearn.model_selection import train_test_split
x_train,x_test,y_train,y_test = train_test_split(X, Y)
x_train.shape,x_test.shape,y_train.shape,y_test.shape
# + id="lnkaa4fWpY5Y"
import xgboost
# + colab={"base_uri": "https://localhost:8080/"} id="qnyU7YPYsILP" outputId="6819a592-d113-4b79-b9c8-6aa748e75b54"
xgb = xgboost.XGBRegressor()
xgb
# + colab={"base_uri": "https://localhost:8080/"} id="k3JzWOzqslao" outputId="52443919-8a0c-40bf-9a65-cdf1bb2fb9db"
xgb.fit(x_train, y_train)
# + id="pzJ3b-Un4kX2"
pickle.dump(xgb,open('./xgb_model.pkl','wb'))
# + colab={"base_uri": "https://localhost:8080/"} id="KKjq_UnNs17e" outputId="dad074cd-fbb0-42c7-cb6f-6d719b57e56b"
xgb.score(x_train, y_train)
# + colab={"base_uri": "https://localhost:8080/"} id="0FZOkUkTtDcf" outputId="980f2943-f386-49cd-9d22-5ce767229cf2"
xgb.score(x_test, y_test)
| autompg_xgboost.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/KazumaShachou/Python_learning/blob/main/Dictionary.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="FEg9Nsc0M-tY"
alien = {'color': 'blue', 'eyes': 'black', 'points': 5}
# + colab={"base_uri": "https://localhost:8080/"} id="tjzCFGt_BiL3" outputId="1bc5a1aa-4092-4f36-e6b1-808f65458ffb"
print(alien['color'])
print(alien['eyes'])
print(alien['points'])
# + colab={"base_uri": "https://localhost:8080/"} id="TddhCR47Bo52" outputId="1e1c25bf-11ad-4db1-fab6-3019a049ee7e"
novos_pontos = alien['points']
print(f'Você ganhou {novos_pontos} pontos!')
# + colab={"base_uri": "https://localhost:8080/"} id="81e11L79Cy-2" outputId="3f1a7049-3aa8-4051-a195-5dd2e1e882c7"
alien['posição_x'] = 25
alien['posição_y'] = 30
print(alien)
# + colab={"base_uri": "https://localhost:8080/"} id="IXhOkXY0YPwG" outputId="ea95e0fd-8456-4e6b-8fb0-bf1d6be5a1ac"
alien1 = {'cor' : 'cinza'}
print(f"esse alien tem cor {alien1['cor']}")
alien1['cor'] = 'azul'
print(f"esse alien agora tem cor {alien1['cor']}")
# + [markdown] id="aWAU91VAaoSp"
#
# + colab={"base_uri": "https://localhost:8080/"} id="WvjlOM3zZvac" outputId="74cd3661-d554-4be6-f7d9-7babfc08a9ac"
print(alien)
# + colab={"base_uri": "https://localhost:8080/"} id="Gni9OM31XL-V" outputId="f8067697-a6a2-473a-c613-9161ffbea8ba"
del alien['color']
print(alien)
# + id="KIzNmvYnXcUM"
favorite_languages = {
'jen': 'python',
'sarah': 'c',
'edward': 'ruby',
'phil' : 'python',
}
# + colab={"base_uri": "https://localhost:8080/"} id="sPJagdIjhohB" outputId="e2857c3d-8a8b-4d2a-bce8-4196a6ebeff6"
language = favorite_languages['jen'].title()
print(f'linguagem favorita da jen é {language}')
# + colab={"base_uri": "https://localhost:8080/"} id="CsZditZXlghv" outputId="63f19f8a-6a8e-41f5-baf5-d8c739c14006"
alien_0 = {'color': 'green', 'speed': 'slow'}
point_value = alien_0.get('points', 'No point value assigned.')
print(point_value)
# + colab={"base_uri": "https://localhost:8080/"} id="whx96I1ltOS1" outputId="f714fc62-b61e-4d47-9720-0b165dda9319"
usuario = {'nickname': 'rai',
'primeiro_nome': 'sora',
'sobrenome': 'shiro'
}
for key, value in usuario.items():
print(f"\nKey: {key}")
print(f"Value: {value}")
# + colab={"base_uri": "https://localhost:8080/"} id="-A1lw3_D_ms7" outputId="bb4f4ffe-ee19-4b1e-9f06-2999e51ff15d"
favorite_languages = {
'jen': 'python',
'sarah': 'c',
'edward': 'ruby',
'phil' : 'python',
}
for nome, linguagem in favorite_languages.items():
print(f'{nome.title()} gosta da linguagem {linguagem.title()}')
# + colab={"base_uri": "https://localhost:8080/"} id="geDJKaLwG6F4" outputId="b63d3e26-365f-409b-b518-c7ce44c9166d"
favoritos = {
'jen': 'python',
'sarah': 'c',
'edward': 'ruby',
'phil' : 'python',
}
friends = ['phil', 'sarah']
for name in favoritos.keys():
print(name.title())
if name in friends: #ESTUDAR MUITO ESSA PARTE
linguagem = favoritos[name].title() #ESTUDAR MUITO ESSA PARTE - Acredito que o [name] ele tira o nome do print e apenas exibe a linguagem
print(f'\t{name.title()}, vejo que você gosta de {linguagem}') #ESTUDAR MUITO ESSA PARTE
# + id="W20ih34bP0_V" colab={"base_uri": "https://localhost:8080/"} outputId="43944a2e-5dc9-4da8-e578-20439fdab50b"
alien_0 = {'color': 'green', 'points': 5}
alien_1 = {'color': 'yellow', 'points': 10}
alien_2 = {'color': 'red', 'points': 15}
aliens = [alien_0, alien_1, alien_2]
for alien in aliens:
print(alien)
# + colab={"base_uri": "https://localhost:8080/"} id="r9i7EXYdUFDP" outputId="97cb7b0b-1917-4925-b623-7e2f7c46e397"
aliens = []
#fazer 50 aliens
for alien_number in range(50):
new_alien = {'color': 'blue', 'speed': 'slow', 'points': 20}
aliens.append(new_alien)
#mostrar 10 aliens
for alien in aliens[:10]:
print(alien)
print("...")
#quantos aliens foram criados
print(f'total number aliens: {len(aliens)}')
# + id="qiZJVs1uXaRu" colab={"base_uri": "https://localhost:8080/"} outputId="0c5eae0a-40a9-49fe-bec8-3c4a3c36750d"
monstros = []
for monstro in range(10):
novo_monstro = {'cor': 'vermelha', 'velocidade': 'lenta', 'vida': 1000}
monstros.append(novo_monstro)
for monstrinhos in monstros[5:]:
print(monstrinhos)
print("............................................................")
print(f'total de monstros é : {len(monstros)}')
# + colab={"base_uri": "https://localhost:8080/"} id="QXAYDHwkjjjp" outputId="6a9b2069-4216-407f-a07e-b8b71de9b48c"
#MUITO INTERESSANTE
bosses = []
for boss_number in range(30):
new_boss = {'color': 'black', 'speed': 'fast', 'points': 1000, 'life': 100000}
bosses.append(new_boss)
for boss in bosses[:5]:
if boss['color'] == 'black':
boss['color'] = 'red'
boss['speed'] = 'very_fast'
boss['points'] = '5000'
boss['life'] = 1000000
elif boss['color'] == 'red':
boss['color'] = 'white'
boss['speed'] = 'very_fast'
boss['points'] = '5000'
boss['life'] = 1000000
for boss in bosses[:10]:
print(boss)
print('----------------------------------------------------------------------')
# + colab={"base_uri": "https://localhost:8080/"} id="ZAaRVsZupnyG" outputId="6ba51402-ee48-4279-f924-b4d330959250"
favorite_languages = {
'jen': ['python', 'ruby'],
'sarah': ['c'],
'edward': ['ruby', 'go'],
'phil': ['python', 'haskell'],
}
for name, languages in favorite_languages.items():
print(f'\n{name.title()} favorite language are: ')
for language in languages:
print(f"\t{language.title()}")
# + colab={"base_uri": "https://localhost:8080/"} id="0fAbPRtEtF5U" outputId="eeaa0a79-2196-4a20-8006-3e320a7d3279"
vtubers = {
'pekora':{
'tipo': 'engraçada',
'gostos': 'cenoura'
},
'watame': {
'tipo': 'fofinha',
'gostos': 'rap'
},
'kanata':{
'tipo': 'gorila',
'gostos': 'banana'
}
}
for username, user_info in vtubers.items():
print(f"\nUsername: {username}")
caracteristicas = f"Ser {user_info['tipo']} e gostar de {user_info['gostos']}"
print(f"\t Suas caracteristicas são {caracteristicas.title()}")
# + id="NORHbtsmwlID"
| Dictionary.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Goal: Get the intersection area and union area between two irregular shapes.
# <p>
# <center>Author: Chieh</center>
# <p>
# > 目標: 可以讓兩個不規則形狀的圖形求得交集和聯集
#
#
# +
import cv2
import numpy as np
from matplotlib import pyplot as plt
import cv2
# %matplotlib inline
# It can output all of lines of array.
np.set_printoptions(threshold=np.inf)
# -
# Load the images
# +
img= cv2.imread('assets/sample1.png')
img2 = cv2.imread('assets/sample2.png')
# plt.imshow(img2)
assert img is not None, "Importing img fail."
assert img2 is not None, "Importing img fail."
titles = ['sample1.png','sample2.png']
images = [img,img2]
for i in range(2):
plt.subplot(1,2,i+1),plt.imshow(images[i])
plt.title(titles[i])
plt.xticks([]),plt.yticks([])
plt.show()
# -
# ### Introduce the `cv2.threshold` function
# cv2.threshold (src, thresh, maxval, type)
# > https://blog.csdn.net/JNingWei/article/details/77747959
ret1, th1 = cv2.threshold(img,127,255,cv2.THRESH_BINARY)
ret2, th2 = cv2.threshold(img2,127,255,cv2.THRESH_BINARY)
th3 = th1+th2
# After we did the cv2.threshold, the array remained the value of 0 and 255.
print("th1",np.unique(th1))
print("th2",np.unique(th2))
# Now we combined th1 and th2, and to see how th3 is going.
titles = ['20190604-093334_78.png','12-20_10-13-05_720.png','Both']
images = [th1,th2,th3]
for i in range(3):
plt.subplot(2,2,i+1),plt.imshow(images[i])
plt.title(titles[i])
plt.xticks([]),plt.yticks([])
plt.show()
# After we combined the th1 and th2, we could see the th3 which got the three kinds of values including 0, 254, 255.
np.max(th3)
np.unique(th3)
# Now we try to let the 254 value be 0.
#
# We copy an array as same as th3.
ret1, th1 = cv2.threshold(img,127,255,cv2.THRESH_BINARY)
ret2, th2 = cv2.threshold(img2,127,255,cv2.THRESH_BINARY)
th3 = th1+th2
th4 = th3.copy()
for i in range(len(th3)):
for j in range(len(th3[i])):
for k in range(len(th3[i][j])):
if th3[i][j][k] == 254:
th4[i][j][k] = 0
np.unique(th4)
# See the result of th4 which is organized by 0 and 255.
plt.imshow(th4)
# So we can know the intersection area is 255, and the white area is 254.
ret1, th1 = cv2.threshold(img,127,255,cv2.THRESH_BINARY)
ret2, th2 = cv2.threshold(img2,127,255,cv2.THRESH_BINARY)
th3 = th1+th2
th5 = th3.copy()
for i in range(len(th3)):
for j in range(len(th3[i])):
for k in range(len(th3[i][j])):
if th3[i][j][k] == 255:
th5[i][j][k] = 0
np.unique(th5)
# OK, so we can get the Union area (254).
plt.imshow(th5)
# # Compute the area
titles = ['Union (th3)','Intersection (th5)']
images = [th3,th5]
for i in range(2):
plt.subplot(1,2,i+1),plt.imshow(images[i])
plt.title(titles[i])
plt.xticks([]),plt.yticks([])
plt.show()
# ## Steps
# 1. We need to use `cv2.cvtColor` function to let th5 be a 2D array.
# 2. (Option) We can use `cv2.threshold` to let the array be 0 or 255 again. Because if your array is union, your original array is not binary value.
# 3. We use `cv2.findContours` to find the contour of image.
# 4. Use `cv2.contourArea` to compute the area of contour.
# # Intersection Area
# if your array (Such as th5) already is binary, you can skip the second step. Directly go the step from 1 to 3.
np.unique(th5)
# +
th5_gray = cv2.cvtColor(th5, cv2.COLOR_BGR2GRAY) # th5_gray is 2D array.
contours5, hierarchy5 = cv2.findContours(
th5_gray, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cnt5 = contours5[0]
area5 = cv2.contourArea(cnt5)
print("Intersection (th5) area is {}.".format(area5))
# -
# # Union Area
# Let's see the Union area.
#
# **Be careful about Union (th3) which is not binary value.**
np.unique(th3)
# 1. We skip the step of `cv2.threshold`, and directly find the contour.
# +
th3_gray = cv2.cvtColor(th3, cv2.COLOR_BGR2GRAY)
contours3, hierarchy3 = cv2.findContours(th3_gray, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cnt3 = contours3[0]
area3 = cv2.contourArea(cnt3)
print("Union (th3) area is {}.".format(area3))
# -
# 2. We did the step of `cv2.threshold` again, and then find the contour.
# +
th3_gray_thr = cv2.cvtColor(th3, cv2.COLOR_BGR2GRAY)
ret3_thr, th3_gray_binary_thr = cv2.threshold(th3_gray_thr,127,255,cv2.THRESH_BINARY)
contours3_thr, hierarchy3_thr = cv2.findContours(th3_gray_binary_thr, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
cnt3_thr = contours3_thr[0]
area3_thr = cv2.contourArea(cnt3_thr)
print("Union (th3) area is {}.".format(area3_thr))
# -
print("This is first case, np.unique(th3_gray) : {}.".format(np.unique(th3_gray)))
print("This is second case, np.unique(th3_gray_thr) : {}.".format(np.unique(th3_gray_thr)))
print("This is second case, np.unique(th3_gray_binary_thr) : {}.".format(np.unique(th3_gray_binary_thr)))
# As the result above, it looks no difference.
| Get the intersection area and union area between two irregular shapes.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3.9.7 ('torch-gym')
# language: python
# name: python3
# ---
# +
import gym
import pandas as pd
from stable_baselines3 import PPO
from stable_baselines3.ppo.policies import MlpPolicy
from stable_baselines3.ppo.policies import CnnPolicy
from stable_baselines3.common.evaluation import evaluate_policy
from stable_baselines3.common.monitor import Monitor
from stable_baselines3.common.env_util import make_vec_env
# -
# "A pole is attached by an un-actuated joint to a cart, which moves along a frictionless track. The system is controlled by applying a force of +1 or -1 to the cart. The pendulum starts upright, and the goal is to prevent it from falling over. A reward of +1 is provided for every timestep that the pole remains upright. The episode ends when the pole is more than 15 degrees from vertical, or the cart moves more than 2.4 units from the center." [CartPole-v1](https://gym.openai.com/envs/CartPole-v1/),
#
# [Max episode length is 500](https://github.com/openai/gym/blob/master/gym/envs/classic_control/cartpole.py), therefore the max reward is also 500.
# +
# Create new model
env = make_vec_env('CartPole-v1', n_envs=10)
model = PPO(
policy=MlpPolicy,
env=env,
learning_rate=0.0003,
n_steps=2048,
batch_size=64,
n_epochs=10,
gamma=0.99,
gae_lambda=0.95,
clip_range=0.2,
clip_range_vf=None,
ent_coef=0.0,
vf_coef=0.5,
max_grad_norm=0.5,
use_sde=False,
sde_sample_freq=-1,
target_kl=None,
tensorboard_log=None,
create_eval_env=False,
policy_kwargs=None,
verbose=1,
seed=None,
device='auto',
_init_setup_model=True
)
# Load existing model
# model = PPO.load("./models/cartpole-v1/cartpole-v1/cartpole_v1_ppo_{timesteps}")
# +
# Train model
timesteps = 500000
log_dir = "./out/training-log/"
log_path = log_dir + f"cartpole-v1/"
save_dir = "./models/cartpole-v1/"
# Train agent
model.learn(
total_timesteps=timesteps,
callback=None,
log_interval=-1,
eval_env=gym.make('CartPole-v1'),
eval_freq=10000/10,
n_eval_episodes=5,
tb_log_name='PPO',
eval_log_path=log_path,
reset_num_timesteps=False
)
# +
# Display log
import pandas as pd
from utils.utils import load_log
# Load logs and combine into one dataframe
log_dir = "./out/training-log/"
log_path = log_dir + f"cartpole-v1/"
files = [
log_path + "evaluations_to_510K.npz",
]
data = pd.concat([load_log(x) for x in files], axis='index')
data.sort_values(by='timesteps', inplace=True)
data
# +
# Show log as graphic
from utils.utils import plot_log
plot_log(data, title='CartPole-v1 mean reward', axis=[0, 500000, 0, 550])
# -
# Save model
save_dir = "./models/cartpole-v1/"
name = "cartpole_v1_ppo_510K"
model.save(save_dir + name)
| workspace/ppo/test_ppo_cartpole_v1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# Standard imports
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
import os
import sys
import re
import glob
import suftware as su
from scipy.stats import norm
from scipy.stats import poisson
e = np.exp(1)
pi = np.pi
# -
in_dir = '20.08.16_mpsa_data'
# +
x_sample = f'{in_dir}/brca2_lib1_rep1.csv'
y_sample = f'{in_dir}/brca2_lib1_rep2.csv'
x_data = pd.read_csv(x_sample, index_col='ss')
y_data = pd.read_csv(y_sample, index_col='ss')
# +
df = pd.merge(left=x_data, right=y_data, left_index=True, right_index=True, how='inner')
df = df.rename(columns={'log_psi_x':'x','dlog_psi_x':'dx','log_psi_y':'y','dlog_psi_y':'dy'})
df['x'] -= df['x'].mean()
df['y'] -= df['y'].mean()
ix = (df['x']>1) & (df['y']>1)
df = df[ix]
df['x'] -= df['x'].mean()
df['y'] -= df['y'].mean()
print(f'Showing data for {sum(ix)} splice sites.')
fig, ax = plt.subplots(figsize=[5,5])
lims = [-3,6]
ax.errorbar(x=df['x'],y=df['y'],xerr=df['dx'],yerr=df['dy'],elinewidth=2,linewidth=0,alpha=.1)
ax.plot(lims,lims,'--k')
ax.set_xlim(lims)
ax.set_ylim(lims)
# +
# Interesting, this looks like a Cauchy distribution
z = (df['x']-df['y'])/np.sqrt(df['dx']**2+df['dy']**2)
sns.distplot(z,hist=True)
from scipy.stats import cauchy, norm, laplace, t
z_grid = np.linspace(-4,4,1000)
plt.plot(z_grid, t.pdf(z_grid, scale=.8, df=3.5))
plt.plot(z_grid, norm.pdf(z_grid, scale=1))
print(f'std(z) = {np.std(z):.4f}')
#plt.yscale('log')
# +
# Compute the entropy of z-values
import suftware as sw
dist = sw.DensityEstimator(z.values)
dist.plot()
stats = dist.get_stats()
H_z = -stats.loc['posterior mean','entropy']
dH_z = stats.loc['posterior RMSD','entropy']
print(f'H[z] = {H_z:.4f} +- {dH_z:.4f}')
# -
# What is the entropy of a normal distribution?
H_normal = 0.5*np.log2(2*e*pi)
print(f'H_normal = {H_normal:.4f}')
# +
# Maybe I should use quantile normalization
| mavenn/development/20.08.15_analyze_cleaned_data.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # 모음 자료형
# (sec:string)=
# ## 문자열
# (sec:list)=
# ## 리스트
# - 변수 재할당을 다시 한 번 설명할 것.
# - {numref}`%s절 <sec:variable_reassignment>` 참고
# - 아래 코드 참고
#
# ```python
# >>> x = [0, 1, 2]
# >>> y = x
# >>> x[0] = 5
# >>> y[0]
# 5
# ```
# ### 영화 감독 봉준호
# 봉준호 영화 감독의 영화를 담고 있는 리스트가 아래와 같이 있다.
movie_Bong = ["기생충", 2019, ["설국열차", 2013, ["살인의 추억", 2003]]]
# 위 리스트는 3중이다. 리스트 안에 리스트, 또 리스트 안에 리스트.
# 이제 아래와 같이 모든 항목을 나열하고자 한다.
#
# ```
# 기생충
# 2019
# 설국열차
# 2013
# 살인의 추억
# 2003
# ```
for item in movie_Bong:
print(item)
# 그런데 위와 같이 하면 중첩으로 되어 있는 영화들을 제대로 풀어헤칠 수 없다.
# 2중 `for` 반복문을 활용해보자.
#
# **주의:** 아래 코드에서 `isinstance(item, list)`는 `item` 변수가 가리키는 항목이
# 리스트 자료형 여부를 확인한다.
for item in movie_Bong:
if isinstance(item, list):
for itemN in item:
print(itemN)
else:
print(item)
# 여전히 삼중 리스트의 모든 항목을 나열하진 못한다.
# 3중 `for` 반복문을 활용해보자.
for item in movie_Bong:
if isinstance(item, list):
for itemN in item:
if isinstance(itemN, list):
for itemNN in itemN:
print(itemNN)
else:
print(itemN)
else:
print(item)
# 그런데 프로그램을 이렇게 구현하면 안된다.
# 만약에 영화목록이 4중, 5중으로 구성된 리스트로 작성되었다면
# 위 프로그램은 4중, 5중 `for` 반복문으로 수정해야 하고,
# 그러면서 프로그램의 길이와 복잡도가 기하급수적으로 증가하기 때문이다.
#
# 처리해야 하는 데이터에 따라 프로그램이 수정되거나 복잡도가 증가하지 않는
# 프로그램을 구현해야 한다.
#
# 다시 한 번 위 세 개의 프로그램을 살펴보자.
# 세 개의 프로그램은 사실상 아래 명령문을 반복해서 사용한다.
#
# ```python
# for 항목 in 리스트:
# if isinstance(항목, list):
# 명령문
# else:
# print(항목)
# ```
#
# 위 명령문은 리스트의 항목이 또 다른 리스트이면 그 리스트의 항목들을
# 대상으로 동일한 확인작업을 수행하며,
# 더 이상 리스트가 다른 리스트의 항목으로 포함되지 않을 때 까지 반복된다.
# 즉, 모든 중첩 리스트가 해체될 까지 리스트 여부를 반복하며,
# 리스트가 아니면 해당 항목을 화면에 출력한다.
#
# 이런 반복작업을 **재귀**(recursion)라 부르며,
# 반복되는 작업에 이름을 주면, 위 세 개의 코드를 하나의 함수로 정의할 수 있다.
# 예를 들어, 아래와 같이 앞서 언급된 명령문에 `printItems`이란 이름을 주어 함수로 정의해보자.
def printItems(aList):
for item in aList:
if isinstance(item, list):
printItems(item)
else:
print(item)
# `printItems` 함수는 좀 이상하다.
# 정의가 끝나지 않았는데 자신을 자신 본체에서 사용한다.
# (4번 줄 참조)
#
# 실제로 `printItems` 함수를 호출하면 실행과정 중에
# 자신을 또다시 호출한다.
# 단, 사용되는 인자가 다르며, 애초에 사용된 리스트의 항목이면서
# 또다른 리스트가 인자로 사용된다.
# 이런 함수를 자기 자신을 호출한다는 의미로 **재귀함수**(recursive function)라 부른다.
#
# 사실 임의로 중첩된 리스트를 인자로 받아도 중첩을 모두 풀어버린다.
printItems(movie_Bong)
# ## 튜플
# ## 사전
# ## 집합
# ## 조건제시법
# ## 연습문제
# 1. `printItems` 수정하여 `movie_Bong`에 포함된 항목들을 아래와 같이
# 출력하도록 하는 `printItems2` 함수를 구현하라.
#
# 기생충
# 2019
# 설국열차
# 2013
# 살인의 추억
# 2003
#
# **힌트**
# * `printItems` 함수의 인자를 두 개로 수정한다.
# 하나는 리스트의 인자를 다루며, 다른 하나는 들어쓰기 정도를 다루는
# 인자를 하나 받는다.
#
# ```python
# def printItems2(aList, level):
# 명령문
# ```
#
# * 위에서 `level` 매개변수의 인자는 탭키를 사용하는 횟수를 나타내도록 한다.
# 그러면 `printItems2(movie_Bong, 0)`을 실행하면 원하는 결과가 나와야 한다.
# * 탭 출력은 `print('\t')`를 이용하면 된다.
# <br><br>
# 1. 위 과제에서 구현한 `printItems2` 함수를 아래와 같이 수정하라.
# * 인자수를 세 개로 늘린다.
#
# ```python
# def printItems3(aList, level, indent=False):
# 명령문
# ```
#
# * `indent` 매개변수의 키워드인자 값이 `True`이면 연습4에서 처럼 들여쓰기를 하고,
# `False`이면 들여쓰기를 하지 않아야 한다.
# <br><br>
# 1. `printItems` 함수를 재귀가 아닌 `while` 함수를 이용하여 구현하라.
#
# **힌트:** `while` 반복문에 사용되는 조건식을 선택하는 게 핵심이다.
# 재귀로 구현된 함수로부터 이에대한 힌트를 얻을 수 있다.
| notebooks/_build/jupyter_execute/ch11.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + jupyter={"source_hidden": true} papermill={"duration": 0.021263, "end_time": "2021-03-22T19:42:07.987969", "exception": false, "start_time": "2021-03-22T19:42:07.966706", "status": "completed"} tags=["parameters"]
a = 1
b = 2
# + papermill={"duration": 0.010737, "end_time": "2021-03-22T19:42:08.005068", "exception": false, "start_time": "2021-03-22T19:42:07.994331", "status": "completed"} tags=["injected-parameters"]
# Parameters
a = 5
# + jupyter={"source_hidden": true} papermill={"duration": 0.009283, "end_time": "2021-03-22T19:42:08.018737", "exception": false, "start_time": "2021-03-22T19:42:08.009454", "status": "completed"} tags=[]
print(f"a*b={a*b}")
# + papermill={"duration": 2.023764, "end_time": "2021-03-22T19:42:10.046365", "exception": false, "start_time": "2021-03-22T19:42:08.022601", "status": "completed"} tags=[]
import time
for i in range(1, 3):
print(i)
time.sleep(1)
# + jupyter={"source_hidden": true} papermill={"duration": 0.013821, "end_time": "2021-03-22T19:42:10.069622", "exception": false, "start_time": "2021-03-22T19:42:10.055801", "status": "completed"} tags=[]
# This cell contains commented Python code
| tests/tasks/jupyter/sample_notebook.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # Image Embedding Example Using Fiftyone
#
# This is an example showing how to visualize custom embedding, based on the input images, using Fiftyone and UMAP.
#
# * **required environment**: `machine-learning`
# +
import cv2
import numpy as np
import matplotlib.pyplot as plt
import fiftyone as fo
import fiftyone.zoo as foz
import fiftyone.brain as fob
# -
dataset = foz.load_zoo_dataset('mnist')
test_split = dataset.match_tags('test')
print(test_split)
# ## Generating the embeddings
#
# Here we generate the embeddings based on the images. Images are loaded, and stacked vertically in order to achieve the following embedding space: `num_samples x num_embedding_dims`.
# +
img = cv2.imread(test_split.values("filepath")[0], cv2.IMREAD_UNCHANGED)
plt.imshow(img, cmap='gray')
plt.title(f'Image shape: {img.shape}')
# Generate embeddings
embeddings = np.array([
cv2.imread(f, cv2.IMREAD_UNCHANGED).ravel()
for f in test_split.values("filepath")])
print(f'Size of the embedding: {embeddings.shape}')
# -
# ## Calculate 2D representation of the embedding
#
# Here we use [UMAP](https://umap-learn.readthedocs.io/en/latest/) (Uniform Manifold Approximation and Projection for Dimension Reduction) to calculate a 2D visualization of the embedding. For more information regarding the `compute_visualization()` method, take a look at the documentation https://voxel51.com/docs/fiftyone/api/fiftyone.brain.html#fiftyone.brain.compute_visualization.
#
# Main parameters for the `compute_visualization()` method:
#
# * **embeddings**. pre-computed embeddings to use. Can be any of the following
# * a `num_samples x num_embedding_dims` array of embeddings
# * if `patches_field` is specified, a dict mapping sample IDs to num_patches x num_embedding_dims arrays of patch embeddings
# * the name of a dataset field containing the embeddings to use
#
#
# Compute 2D representation
results = fob.compute_visualization(
test_split,
embeddings = embeddings,
num_dims = 2,
method = "umap",
brain_key = "mnist_test",
verbose = True,
seed = 51,
)
print(type(results))
print(results.points.shape)
# +
# Launch App instance
# session = fo.launch_app(view=test_split)
# +
# Plot embeddings colored by ground truth label
plot = results.visualize(labels="ground_truth.label")
plot.show(height=720)
# Comment out the following line in order to get interactive figure for the embedding visualization
plot.freeze()
# Attach plot to session
# session.plots.attach(plot)
| Fiftyone_image_embedding_example.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Lists
# ### Basic Operations
# +
l=[1,2,3]
print(len(l)) #length
print("\n")
print(l*4) #repetition
print("\n")
print(l+[4,5,6]) #concatenation
print(str([1,2])+'34') # Same as '[1,2]'+'34'
print([1,2]+list("34")) # Same as [1,2]+["3","4"]
print("\n")
l=['dogs','cats','hens','camels']
del l[0] #Deletion of one item
print(l)
del l[1:] #Deletion of an entire section
print(l)
# -
# ### List Iterations and Comprehensions
# +
# To check the membership of an element in the list.
print(3 in [1,2,3])
print(4 in [1,2,3])
print("\n")
# Iteration
for x in [1,2,3,4,5]:
print(x,end=' ')
print("\n")
# List Comprehension
res=[c*5 for c in "AYUSH"]
print(res)
#or
res=[]
for c in "AYUSH":
res.append(c*5)
print(res)
print("\n")
print(list(zip(['a','b','c'],[1,2,3]))) #Zips together keys and values and make a list
# -
# #### All about map() Function
# The map() function returns a map object(which is iterable) of the results after applying the given function to each item of a given iterable(list,tuples,etc).
#
# The syntax is map(function,iterable)
# +
print(list(map(abs,[-1,-2,0,1,2])))
print("\n")
def addition(n):
return n+n
num=[1,2,3,4]
res=list(map(addition,num))
print(res)
print("\n")
#or
num=[1,2,3,4]
res=list(map(lambda x:x+x,num))
print(res)
print("\n")
num1=[1,2,3,4]
num2=[5,6,7,8]
res=list(map(lambda x,y:x+y, num1,num2))
print(res)
print("\n")
#Listifying the list of strings individually.
l=['dogs','cats','hens']
res=list(map(list,l))
print(res)
# -
# **lambda()** - It is used to define an anonymous function in Python. This function can have any number of arguments but only one expression, which is evaluated and returned.
# ### Indexing,Slicing and Matrices
# +
#Indexing and Slicing
l=['dogs','cats','hens']
print(l[2],l[-2])
print(l[1:])
print("\n")
#Index and slice assignments
l[1]='horse' #Index assignment
print(l)
l[0:2]=['camel','owl'] #Slice assignment(delete+insert)
print(l)
l[1:2]=['donkey','crow'] #Replacement/insertion
print(l)
l[1:1]=['zebra','crocodile'] #Insertion(replace nothing)
print(l)
l[1:3]=[] #Deletion(insert nothing)
print(l)
print("\n")
l=[1]
l[:0]=[2,3,4] #Insert all at first
print(l)
l[len(l):]=[5,6,7] #Insert all at end
print(l)
print("\n")
#Matrices
matrix=[[1,2,3],[4,5,6],[7,8,9]]
print(matrix[1])
print(matrix[1][1],matrix[2][0])
# -
# ### List Method Calls
# +
#append and sort method calls
l=['dogs','hens','cats']
l.append('horse') #append adds the element at the end of the list
print(l)
l.sort()
'''Sorts the list alphabetically,i.e, c>d>h=h. Now since the third
and fourth element have the same starting letter 'h', it will compare
the next letter. We see that e>o, therefore, hens comes before horse'''
print(l)
l.append('Dogs')
l.sort()
'''D>c>d hence 'Dogs' will be placed first followed by cats and
then dogs'''
print(l)
print("\n")
l=['abc','ABD','aBe']
l.sort() #Sort with mixed case
print(l)
l.sort(key=str.lower) #Normalize to lowercase
print(l)
l.sort(key=str.lower,reverse=True) #Change sort order
print(l)
print("\n")
l=['abc','ABD','aBe']
print(sorted(l,key=str.lower,reverse=True))
l=['abc','ABD','aBe']
print(sorted([x.lower() for x in l],reverse=True))
'''Notice the last example here—we can convert to lowercase prior
to the sort with a list comprehension, but the result does not
contain the original list’s values as it does with the key argument.
The latter is applied temporarily during the sort, instead of
changing the values to be sorted altogether'''
print("\n")
#extend,pop,reverse,index,remove and count method calls
l=[1,2]
l.extend([3,4,5])
print(l)
print(l.pop())
print(l)
l.reverse()
print(l)
print("\n")
l=['dogs','cats','hens']
print(l.index('cats')) #Returns the index of a particular object in the list
l.insert(1,'dogs') #Insert at position(without deletion)
print(l)
l.remove('cats') #Delete by value
print(l)
print(l.count('dogs')) #Number of occurrences of a particular element
print("\n")
# -
# # Dictionaries
# ### Basic Operations
# +
D={'dogs':2,'cats':1,'hens':3} #Dictionary
print(D['dogs']) #Fetch a value by key
print(D)
print(len(D))
print("\n")
# Key Membership
print('cats' in D)
print('camels' in D)
print("\n")
D['hens']=['grill','roast','fry'] # Value in the form of list
print(D)
print(D['hens'][1])
print("\n")
del D['cats'] # Delete entry
print(D)
print("\n")
D['pigs']='pork' # Add new entry
print(D)
print("\n")
# Value in the form of dictionary (nested dictionary)
D['cows']={'color':'black','meat':'beef','price':12345}
print(D['cows']['price'])
# -
# ### Dictionary Methods
# +
print(list(D.keys())) # Returns the key values in the form of list
print(list(D.values())) # Returns the values in the form of list
print(list(D.items())) # Returns the items(key+values) in the form of list
print("\n")
print(D.get('pigs')) # get() returns the value corresponding to the specified key
print(D.get('camels')) # Returns None if no such key exists in the dictionary
print(D.get('monkey',9))
print("\n")
D1={'crow':4,'crocodile':2}
D.update(D1) # update() merges the keys and values of the dictionary into another
print(D)
print("\n")
D.pop('hens') # pop() deletes a key from the dictionary
print(D)
print("\n")
# +
table = {'1975': 'Holy Grail','1979': 'Life of Brian',
'1983': 'The Meaning of Life'}
year = '1983'
movie = table[year]
print(movie)
for year in table:
print(year + '\t' + table[year])
# -
table = {'Holy Grail':'1975','Life of Brian':'1979',
'The Meaning of Life': '1983'}
[title for (title, year) in table.items() if year == '1975']
# ### Dictionary Comprehensions
# +
print(dict(zip(['a','b','c'],[1,2,3]))) # Zips together keys and values and make a dict
print({k:v for (k,v) in zip(['a','b','c'],[1,2,3])})
print("\n")
print({x:x**2 for x in [1,2,3,4]})
print({c:c*4 for c in 'SPAM'})
print({c.lower(): c + '!' for c in ['DOGS','CATS','HENS']})
# +
D1=dict.fromkeys(['a','b','c'],0) # Initialize dictionary from keys
print(D1)
D2={k:0 for k in ['a','b','c']} # Same, but with a comprehension
print(D2)
print("\n")
D3=dict.fromkeys('dogs') # Sets default value as None
print(D3)
D4={k:None for k in ['dogs']}
print(D3)
# -
# # Dictionaries Verses Lists
l=[]
l[99]='dogs'
print(l)
d={}
d[99]='dogs'
print(d)
# Using Dictionaries for sparse data structures: Tuple keys
Matrix = {}
Matrix[(2, 3, 4)] = 88
Matrix[(7, 8, 9)] = 99
X = 2; Y = 3; Z = 4
print(Matrix[(X, Y, Z)])
print(Matrix)
# Here, we’ve used a dictionary to represent a three-dimensional array that is empty except for the two positions (2,3,4) and (7,8,9) . The keys are tuples that record the coordinates of nonempty slots. Rather than allocating a large and mostly empty three-dimensional matrix to hold these values, we can use a simple two-item dictionary. In this scheme, accessing an empty slot triggers a nonexistent key exception, as these slots are not physically stored:
print(Matrix[(2,3,6)])
# Although both are flexible collections of other objects, lists assign items to positions, and dictionaries assign them to more mnemonic keys. Because of this, dictionary data often carries more meaning to human readers.
#
# In practice, dictionaries tend to be best for data with labeled components, as well as
# structures that can benefit from quick, direct lookups by name, instead of slower linear
# searches. As we’ve seen, they also may be better for sparse collections and collections
# that grow at arbitrary positions.
# # Tuples
# Tuples construct simple groups of objects. They work exactly like lists, except that tuples can’t be changed in place (they’re immutable) and are usually written as a series of items in parentheses, not square brackets. Although they don’t support as many methods, tuples share most of their properties with lists.
# +
t1=(1,2)+(3,4) # Concatenation
print(t1)
print(t1[0],t1[1:3]) # Indexing and slicing
print("\n")
t2=(1,2)*4 # Repetition
print(t2)
# -
t=('dogs','cats','hens','camels')
tmp=list(t)
tmp.sort()
print(tmp)
t=tuple(tmp)
print(t)
# List comprehensions can be used to convert tuples. Foe example:
t=(1,2,3,4,5)
l=[x*5 for x in t]
print(l)
t=(1,2,3,2,4,2)
print(t.index(2)) # Offset of first appearance of 2
print(t.index(2,4)) # Offset of first appearance after offset 4
print(t.count(2))
t=(1,[2,3],4)
t[1]='spam' # This fails because we cannot change tuple itself
t[1][0]='spam' # This works because we can change mutables inside
print(t)
| Lists, Dictionaries and Tuples.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/tanyadixit21/Deep-Learning-RNN-tasks/blob/master/RNN_introduction.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="0cdQvSrtGJj4" colab_type="text"
# In this notebook we discuss different RNN architectures and see many optimization techniques as well.
#
# For data, we have many options like the nltk.corpus package
# https://www.nltk.org/book/ch02.html
#
# We will use the brown dataset.
#
# https://www.nltk.org/book/ch05.html
#
# + id="R9NHGifxKpti" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 85} outputId="9b1e9b85-fa65-4e37-e2b3-82c3e5b25186"
# !pip install sklearn
# + id="Fr0mZQw4GDKI" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 102} outputId="12244435-06f3-4e4f-8278-776215653a13"
import nltk
import sys
import numpy as np
nltk.download('brown')
nltk.download('universal_tagset')
# + id="yIayQ7FnG5NP" colab_type="code" colab={}
data = nltk.corpus.brown.tagged_sents(tagset='universal') #this will give sentences, use nltk.corpus.brown.tagged_words(tagset='universal') for random words
# + id="PB4J2T5yG5Qr" colab_type="code" colab={}
all_tags = ['#EOS#','#UNK#','ADV', 'NOUN', 'ADP', 'PRON', 'DET', '.', 'PRT', 'VERB', 'X', 'NUM', 'CONJ', 'ADJ']
# + id="YWPvgIfGG5T6" colab_type="code" colab={}
data = np.asarray([[(word.lower(), tag) for word, tag in sentence] for sentence in data])
# + id="faGS1QjRG5XJ" colab_type="code" colab={}
from sklearn.model_selection import train_test_split
train_data,test_data = train_test_split(data,test_size=0.25,random_state=42) #split into train and test dataset
# + id="WoGm6c3CG5Z_" colab_type="code" colab={}
from collections import Counter
# + id="KNsx0qxiMf-T" colab_type="code" colab={}
word_counter = Counter()
for sentence in data: #since data is an array of sentences
words, tags = zip(*sentence) #extract the words and tags individually
word_counter.update(words)
# + id="t28bLblNMgA2" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 34} outputId="e8b841f4-b052-48c4-ebab-a081b3e9a2cf"
all_words = ['#EOS#','#UNK#'] + list(list(zip(*word_counter.most_common(20000)))[0])#to make it an array
#let's measure what fraction of data words are in the dictionary
print("Coverage = %.5f"%(float(sum(word_counter[w] for w in all_words)) / sum(word_counter.values())))
# + id="J4lYwivWQRu2" colab_type="code" colab={}
from collections import defaultdict
word_to_id = defaultdict(lambda:1 , {word:i for i,word in enumerate(all_words)}) #we use default value as 1 as the id for words not in dictionary
tag_to_id = {tag:i for i,tag in enumerate(all_tags)}
# + [markdown] id="ZHbhgBoDRN05" colab_type="text"
# Till this point, we have our words, our tags and our ids. Now we need to create matrices for input as well as output.
# + id="7ry9ph11MgNF" colab_type="code" colab={}
def to_matrix(lines,token_to_id,max_len=None,pad=0,dtype='int32',time_major=False):
"""Converts a list of names into rnn-digestable matrix with paddings added after the end"""
max_len = max_len or max(map(len,lines))
matrix = np.empty([len(lines),max_len],dtype)
matrix.fill(pad)
for i in range(len(lines)):
line_ix = list(map(token_to_id.__getitem__,lines[i]))[:max_len]
matrix[i,:len(line_ix)] = line_ix
return matrix.T if time_major else matrix
# + id="-0D0YIG1MgQI" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 493} outputId="46daf8c2-353f-4454-f975-cdbb8beafb70"
batch_words,batch_tags = zip(*[zip(*sentence) for sentence in data[-3:]]) #3 sentences for testing
print("Word ids:")
print(to_matrix(batch_words,word_to_id))
print(to_matrix(batch_words,word_to_id).shape)
print("Tag ids:")
print(to_matrix(batch_tags,tag_to_id))
print(to_matrix(batch_tags,tag_to_id).shape)
# + id="MoP6HWvFMgMD" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 105} outputId="d974c225-4eb0-4ef3-d206-683c1b65daf5"
import keras
import keras.layers as L
model = keras.models.Sequential()
model.add(L.InputLayer([None],dtype='int32'))
model.add(L.Embedding(len(all_words),50))
model.add(L.SimpleRNN(64,return_sequences=True))
#add top layer that predicts tag probabilities
stepwise_dense = L.Dense(len(all_tags),activation='softmax')
stepwise_dense = L.TimeDistributed(stepwise_dense)
model.add(stepwise_dense)
# + [markdown] id="iyIzQXHNaeKg" colab_type="text"
# #Batch Training
#
# Training: in this case we don't want to prepare the whole training dataset in advance. The main cause is that the length of every batch depends on the maximum sentence length within the batch. This leaves us two options: use custom training code or use generators.
#
# Keras models have a model.fit_generator method that accepts a python generator yielding one batch at a time. But first we need to implement such generator:
# + id="02MgO0obMgLI" colab_type="code" colab={}
from keras.utils.np_utils import to_categorical
BATCH_SIZE=32
def generate_batches(sentences,batch_size=BATCH_SIZE,max_len=None,pad=0):
assert isinstance(sentences,np.ndarray),"Make sure sentences is a numpy array"
while True:
indices = np.random.permutation(np.arange(len(sentences)))
for start in range(0,len(indices)-1,batch_size):
batch_indices = indices[start:start+batch_size]
batch_words,batch_tags = [],[]
for sent in sentences[batch_indices]:
words,tags = zip(*sent)
batch_words.append(words)
batch_tags.append(tags)
batch_words = to_matrix(batch_words,word_to_id,max_len,pad)
batch_tags = to_matrix(batch_tags,tag_to_id,max_len,pad)
batch_tags_1hot = to_categorical(batch_tags,len(all_tags)).reshape(batch_tags.shape+(-1,))
yield batch_words,batch_tags_1hot
# + [markdown] id="79xYIFRidFe_" colab_type="text"
# https://adventuresinmachinelearning.com/keras-lstm-tutorial/
# + id="lDt5AXp5MgKJ" colab_type="code" colab={}
def compute_test_accuracy(model):
test_words,test_tags = zip(*[zip(*sentence) for sentence in test_data])
test_words,test_tags = to_matrix(test_words,word_to_id),to_matrix(test_tags,tag_to_id)
#predict tag probabilities of shape [batch,time,n_tags]
predicted_tag_probabilities = model.predict(test_words,verbose=1)
predicted_tags = predicted_tag_probabilities.argmax(axis=-1)
#compute accurary excluding padding
numerator = np.sum(np.logical_and((predicted_tags == test_tags),(test_words != 0)))
denominator = np.sum(test_words != 0)
return float(numerator)/denominator
class EvaluateAccuracy(keras.callbacks.Callback):
def on_epoch_end(self,epoch,logs=None):
sys.stdout.flush()
print("\nMeasuring validation accuracy...")
acc = compute_test_accuracy(self.model)
print("\nValidation accuracy: %.5f\n"%acc)
sys.stdout.flush()
# + id="p4rQk3Y_MgDP" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 785} outputId="c2c9c620-df76-4138-87b5-1e2f1ad27c7c"
model.compile('adam','categorical_crossentropy')
model.fit_generator(generate_batches(train_data),len(train_data)/BATCH_SIZE,
callbacks=[EvaluateAccuracy()], epochs=5,)
# + id="7wpWaRSta8Ce" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="f1f18eb9-b1a6-4b3a-caf2-eecc0196bfee"
acc = compute_test_accuracy(model)
print("Final accuracy: %.5f"%acc)
# + id="9hyNdqZUa8Np" colab_type="code" colab={}
birnn = keras.models.Sequential()
birnn.add(L.InputLayer([None],dtype='int32'))
birnn.add(L.Embedding(len(all_words),50))
birnn.add(L.Bidirectional(L.SimpleRNN(64,return_sequences=True), merge_mode='concat', weights=None))
#add top layer that predicts tag probabilities
stepwise_dense = L.Dense(len(all_tags),activation='softmax')
stepwise_dense = L.TimeDistributed(stepwise_dense)
birnn.add(stepwise_dense)
# + id="EmLaGVRla8P5" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 714} outputId="2f21ed5e-87d4-45c4-fd27-d9a805ca792c"
birnn.compile('adam','categorical_crossentropy')
birnn.fit_generator(generate_batches(train_data),len(train_data)/BATCH_SIZE,
callbacks=[EvaluateAccuracy()], epochs=5,)
# + id="Oy_nTSIqa8Ss" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 51} outputId="349c2dec-f4bd-4f79-de95-1f402ff7ffc3"
acc = compute_test_accuracy(model)
print("Final accuracy: %.5f"%acc)
# + id="Ux92jXxhkkm7" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 85} outputId="1dbfba45-5d50-4e3d-e05d-ad5a44dba39d"
# !pip install theano
# + id="1F8dLqRCg94K" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 554} outputId="7b8a03e9-be06-4e76-b467-cbf624e60352"
bi_lstm = keras.models.Sequential()
bi_lstm.add(L.InputLayer([None],dtype='int32'))
bi_lstm.add(L.Embedding(len(all_words),50))
bi_lstm.add(L.LSTM(64, recurrent_initializer='orthogonal'))
#add top layer that predicts tag probabilities
stepwise_dense = L.Dense(len(all_tags),activation='softmax')
stepwise_dense = L.TimeDistributed(stepwise_dense)
bi_lstm.add(stepwise_dense)
# + id="kmHJb5ALg97w" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 214} outputId="58f6071f-aa2f-491f-8296-39f98cc2383b"
bi_lstm.compile('adam','categorical_crossentropy')
bi_lstm.fit_generator(generate_batches(train_data),len(train_data)/BATCH_SIZE,
callbacks=[EvaluateAccuracy()], epochs=5,)
# + id="rlPnQtxPg9-2" colab_type="code" colab={}
| RNN_introduction.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + [markdown] id="zHz2jSzL9pRL"
# ## Preprocessing
# + colab={"base_uri": "https://localhost:8080/", "height": 325} id="n21np-9Y9pRS" outputId="2ac99fa4-c846-4473-b1f2-610c604f1999"
# Import our dependencies
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
import pandas as pd
import tensorflow as tf
# Import and read the charity_data.csv.
import pandas as pd
application_df = pd.read_csv("../Resources/charity_data.csv")
application_df.head()
# + id="WXVVqP8H9pRV"
# Drop the non-beneficial ID columns, 'EIN' and 'NAME'.
# YOUR CODE GOES HERE
application_df = application_df.drop(columns = ['EIN', 'NAME'])
# + colab={"base_uri": "https://localhost:8080/"} id="-7DnoVPxTeCI" outputId="4bd71d7d-aa9e-40e8-e8ef-98737bcafd43"
# Determine the number of unique values in each column.
for x in application_df.columns:
print(x, len(application_df[x].unique()))
# + colab={"base_uri": "https://localhost:8080/"} id="DJ21PMyp9pRW" outputId="e9febf4d-e363-4cd2-8b0c-ca7289ebec1f"
application_df.nunique()
# + colab={"base_uri": "https://localhost:8080/"} id="OIngJXXz9pRX" outputId="44ad325c-7fbc-46a6-d2bd-72ce6756c64c"
# Look at APPLICATION_TYPE value counts for binning
counts = application_df['APPLICATION_TYPE'].value_counts()
counts
# + colab={"base_uri": "https://localhost:8080/"} id="tlrHG7CyB5aQ" outputId="40f8765e-7850-4d82-8257-613c713d73ca"
# Choose a cutoff value and create a list of application types to be replaced
# use the variable name `application_types_to_replace`
application_types_to_replace = list(counts[counts<500].index)
application_types_to_replace
# + colab={"base_uri": "https://localhost:8080/"} id="BybZ6e0s9pRX" outputId="8104d61b-a0ff-4512-cae4-eb8d2f2860d7"
# Replace in dataframe
for app in application_types_to_replace:
application_df['APPLICATION_TYPE'] = application_df['APPLICATION_TYPE'].replace(app,"Other")
# Check to make sure binning was successful
application_df['APPLICATION_TYPE'].value_counts()
# + id="DYPOBMtR9pRY"
# Look at CLASSIFICATION value counts for binning
countsbinning = application_df['CLASSIFICATION'].value_counts()
# + colab={"base_uri": "https://localhost:8080/"} id="0tkVmLs59pRZ" outputId="28315f13-07d3-4d4c-ede6-51251ea944dc"
# You may find it helpful to look at CLASSIFICATION value counts >1
countsclassification = countsbinning[countsbinning>1]
countsclassification
# + colab={"base_uri": "https://localhost:8080/"} id="7xIL9ORqC4pO" outputId="8ad43f8b-3174-4bfc-e5ea-5009f2130127"
# Choose a cutoff value and create a list of classifications to be replaced
# use the variable name `classifications_to_replace`
classifications_to_replace = list (countsbinning[countsbinning<100].index)
classifications_to_replace
# + colab={"base_uri": "https://localhost:8080/"} id="jXn32dzf9pRb" outputId="a1232e1a-a874-4b4c-e442-ebce3152a091"
# Replace in dataframe
for cls in classifications_to_replace:
application_df['CLASSIFICATION'] = application_df['CLASSIFICATION'].replace(cls,"Other")
# Check to make sure binning was successful
application_df['CLASSIFICATION'].value_counts()
# + colab={"base_uri": "https://localhost:8080/", "height": 242} id="0lM-ku9j9pRc" outputId="61ff3ac5-c78c-4bed-8b0a-e8b9f64418e0"
# Convert categorical data to numeric with `pd.get_dummies`
application_df = pd.get_dummies(application_df,dtype=float)
application_df.head()
# + colab={"base_uri": "https://localhost:8080/"} id="8aC6XYq19pRc" outputId="c0e71df2-f054-4126-83f1-8bc67c8d4139"
# Split our preprocessed data into our features and target arrays
y = application_df['IS_SUCCESSFUL'].values
y
# + colab={"base_uri": "https://localhost:8080/"} id="JZQXbOCGHXX5" outputId="cd2e25cd-10fc-4d7a-d759-3d57db73eb5b"
# drop 'IS SUCCESSFUL'
X = application_df.drop('IS_SUCCESSFUL', axis=1).values
X
# + id="SaG-HmZmH42P"
# Split the preprocessed data into a training and testing dataset
X_train, X_test, y_train, y_test = train_test_split(X,y,random_state = 42)
# + id="VmNLUOl69pRd"
# Create a StandardScaler instances
scaler = StandardScaler()
# Fit the StandardScaler
X_scaler = scaler.fit(X_train)
# Scale the data
X_train_scaled = X_scaler.transform(X_train)
X_test_scaled = X_scaler.transform(X_test)
# + [markdown] id="sbJ1sngg9pRe"
# ## Compile, Train and Evaluate the Model
# + colab={"base_uri": "https://localhost:8080/"} id="lhP640e19pRe" outputId="eb1495e9-1c71-40a0-f49c-76f4ab6038c6"
# Define the model - deep neural net, i.e., the number of input features and hidden nodes for each layer.
number_input_features = len( X_train_scaled[0])
hidden_nodes_layer1=7
hidden_nodes_layer2=14
hidden_nodes_layer3=21
nn = tf.keras.models.Sequential()
nn = tf.keras.models.Sequential()
# First hidden layer
nn.add(tf.keras.layers.Dense(units=hidden_nodes_layer1, input_dim=number_input_features, activation='relu'))
# Second hidden layer
nn.add(tf.keras.layers.Dense(units=hidden_nodes_layer2, activation='relu'))
# Output layer
nn.add(tf.keras.layers.Dense(units=1, activation='sigmoid'))
# Check the structure of the model
nn.summary()
# + id="ZDyi8gUB9pRf"
# Compile the model
nn.compile(loss = 'binary_crossentropy', optimizer = 'adam', metrics=['accuracy'])
# + colab={"base_uri": "https://localhost:8080/"} id="43aY5Byv9pRf" outputId="a9ffed90-48cb-4c2e-86d4-22c6a897311b"
# Train the model
fit_model = nn.fit(X_train_scaled,y_train,validation_split=0.15, epochs=100)
# + colab={"base_uri": "https://localhost:8080/"} id="fa94y6Ck9pRg" outputId="cb1f02b9-5b80-4c2c-aead-7044155069d6"
# Evaluate the model using the test data
model_loss, model_accuracy = nn.evaluate(X_test_scaled,y_test,verbose=2)
print(f"Loss: {model_loss}, Accuracy: {model_accuracy}")
# + colab={"base_uri": "https://localhost:8080/", "height": 17} id="8UHbL_UW9pRh" outputId="ea255530-f9d1-431b-a232-95a1f7000daf"
# Export our model to HDF5 file
from google.colab import files
nn.save('/content/Model_1.h5')
files.download('/content/Model_1.h5')
| Starter_Code.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import os, sys, shutil, glob, string, ntpath
# +
path = '/home/mapuser/ai_imagery/NearMaps/bldgs/'
path = '/home/mapuser/ai_imagery/NearMaps/bldgs_0_35000/'
pattern = '{}*bldid*.png'.format(path)
bldgs = glob.glob(pattern)
pattern_i = '{}*_mask.png'.format(path)
masks = glob.glob(pattern_i)
# -
for el in masks[:5]:
print(el)
# +
for mask in masks:
head, tail = ntpath.split(mask)
rooter = head.split('/')
aaa = rooter[:-1]
rr = '/'.join(aaa)
tail_png = '{}.png'.format(tail.split('.')[0])
dst_folder = '{}/bldgs_masks/'.format(rr)
print(dst_folder)
maper = '{}{}'.format(dst_folder,tail_png)
try:
if os.path.isdir(dst_folder):
pass
else:
print('{} folder does not exists'.format(dst_folder))
os.makedirs(dst_folder)
except Exception as ex:
print(ex)
finally:
mask_dst_fldr = maper
shutil.move(mask, mask_dst_fldr)
# print(tail_png, maper)
# -
# http://christopherlovell.co.uk/blog/2016/04/27/h5py-intro.html
# +
for mask in mask_files:
# Get Mask Base File
file_mask_name = os.path.basename(mask)
# Generate Prototype name
proto_name = file_mask_name.replace('_mask.png', '')
if os.path.isdir(dst_folder + proto_name):
mask_dst_fldr = dst_folder + proto_name + '/' + file_mask_name
shutil.move(mask, mask_dst_fldr)
# print(mask, mask_dst_fldr)
print('duplicate but move')
srs_img = srs_imgs + proto_name + '.png'
if os.path.isfile(srs_img):
img = os.path.basename(srs_img)
dst_img = dst_folder + proto_name + '/' + img
print(srs_img, dst_img)
shutil.move(srs_img, dst_img)
else:
err_msg = 'missing {}'.format(srs_img)
logging.error(err_msg)
else:
try:
os.makedirs(dst_folder + proto_name)
mask_dst_fldr = dst_folder + proto_name + '/' + file_mask_name
shutil.move(mask, mask_dst_fldr)
srs_img = srs_imgs + proto_name + '.png'
if os.path.isfile(srs_img):
img = os.path.basename(srs_img)
dst_img = dst_folder + proto_name + '/' + img
shutil.move(srs_img, dst_img)
else:
err_msg = 'missing {}'.format(srs_img)
logging.error(err_msg)
except Exception as ex:
logging.error(str(ex))
# -
| books_201806/Untitled1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Sample supervised segmentation on Gray images
# Image segmentation is widely used as an initial phase of many image processing tasks in computer vision and image analysis. Many recent segmentation methods use superpixels, because they reduce the size of the segmentation problem by an order of magnitude. In addition, features on superpixels are much more robust than features on pixels only. We use spatial regularization on superpixels to make segmented regions more compact. The segmentation pipeline comprises: (i) computation of superpixels; (ii) extraction of descriptors such as color and texture; (iii) soft classification, using a standard classifier for supervised learning; (iv) final segmentation using Graph Cut. We use this segmentation pipeline on four real-world applications in medical imaging. We also show that unsupervised segmentation is sufficient for some situations, and provides similar results to those obtained using trained segmentation.
#
# <NAME>., <NAME>., <NAME>., & <NAME>. (2017). **Supervised and unsupervised segmentation using superpixels, model estimation, and Graph Cut.** Journal of Electronic Imaging.
# %matplotlib inline
import os, sys, glob, time
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
from skimage.segmentation import mark_boundaries
sys.path += [os.path.abspath('.'), os.path.abspath('..')] # Add path to root
import imsegm.utilities.data_io as tl_data
import imsegm.pipelines as segm_pipe
# ## Load image
path_dir = os.path.join(tl_data.update_path('data_images'), 'drosophila_ovary_slice')
path_images = os.path.join(path_dir, 'image')
print ([os.path.basename(p) for p in glob.glob(os.path.join(path_images, '*.jpg'))])
# loading images
path_img = os.path.join(path_images, 'insitu7545.jpg')
img = np.array(Image.open(path_img))[:, :, 0]
path_img = os.path.join(path_images, 'insitu4174.jpg')
img2 = np.array(Image.open(path_img))[:, :, 0]
# loading annotations
path_annots = os.path.join(path_dir, 'annot_struct')
path_annot = os.path.join(path_annots, 'insitu7545.png')
annot = np.array(Image.open(path_annot))
# Show that training example with annotation and testing image
FIG_SIZE = (8. * np.array(img.shape[:2]) / np.max(img.shape))[::-1]
fig = plt.figure(figsize=FIG_SIZE * 3)
_= plt.subplot(1,3,1), plt.imshow(img, cmap=plt.cm.Greys_r), plt.contour(annot, colors='y')
_= plt.subplot(1,3,2), plt.imshow(annot, cmap=plt.cm.jet)
_= plt.subplot(1,3,3), plt.imshow(img2, cmap=plt.cm.Greys_r)
# ## Segment Image
# Set segmentation parameters:
sp_size = 25
sp_regul = 0.2
dict_features = {'color': ['mean', 'std', 'median'], 'tLM': ['mean']}
# Train the classifier
classif, list_slic, list_features, list_labels = segm_pipe.train_classif_color2d_slic_features([img], [annot],
sp_size=sp_size, sp_regul=sp_regul, dict_features=dict_features, pca_coef=None)
# Perform the segmentation with trained classifier
dict_debug = {}
seg, _ = segm_pipe.segment_color2d_slic_features_model_graphcut(img2, classif, sp_size=sp_size, sp_regul=sp_regul,
gc_regul=1., dict_features=dict_features, gc_edge_type='model', debug_visual=dict_debug)
fig = plt.figure(figsize=FIG_SIZE)
plt.imshow(img2, cmap=plt.cm.Greys_r)
plt.imshow(seg, alpha=0.6, cmap=plt.cm.jet)
_= plt.contour(seg, levels=np.unique(seg), colors='w')
# ## Visualise intermediate steps
print ('debug fields: %s' % repr(dict_debug.keys()))
plt.figure(), plt.imshow(mark_boundaries(img2, dict_debug['slic'])), plt.title('SLIC')
plt.figure(), plt.imshow(dict_debug['slic_mean']), plt.title('SLIC mean')
plt.figure(), plt.imshow(dict_debug['img_graph_edges']), plt.title('graph edges')
for i, im_u in enumerate(dict_debug['imgs_unary_cost']):
plt.figure(), plt.title('unary cost: %i' % i), plt.imshow(im_u)
# plt.figure(), plt.imshow(dict_debug['img_graph_segm'])
| notebooks/segment-2d_slic-fts-classif-gc.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# <h3>Data set #6:<br>
# Investment in tangible fixed assets at current prices (2015) <br>
# http://osp.stat.gov.lt/en/statistiniu-rodikliu-analize?portletFormName=visualization&hash=061b4c9b-03d7-4303-84b0-2a39bc730e00</h3>
#pulling csv data from file
import csv
filename='Source_datasets/Exported matrix table_d6.csv'
with open(filename, 'r') as f:
data = list(csv.reader(f))
#print (data)
#print (f)
for row_num in range(0,len(data)):
print (data[row_num])
# +
#remove aggregated rows, as aggregation will take place later e.g. in pivotjs
keysList=['Republic','county']
tobedeletedList=[]
updated_lengh=len(data)
for row_num in range(0,updated_lengh):
for key in keysList:
#print (key)
if key in data[row_num][0]:
tobedeletedList.append(row_num)
#tobedeletedList.append(row_num+1)#for 2015 rows
print (tobedeletedList)
for aggregated_row_num in tobedeletedList:
#print (data[aggregated_row_num])
data[aggregated_row_num]=['']
#tobedeletedList[:] = [x - 1 for x in tobedeletedList]
#print (tobedeletedList)
for row_num in range(0,len(data)):
print (data[row_num])
# +
#permanent clean of unwanted rows in dataset
tobedeletedList=[]
for row_num in range (1,len(data)):
if (len(data[row_num])<3):#regarding temporary header size
#print ("row num{}:{}".format(i,data[row_num]))
tobedeletedList.append(row_num)
print (tobedeletedList)
for aggregated_row_num in tobedeletedList:
#print (data[aggregated_row_num])
del data[aggregated_row_num]
tobedeletedList[:] = [x - 1 for x in tobedeletedList]
#print (tobedeletedList)
for row_num in range(0,len(data)):
print (data[row_num])
# -
#add missing country values
for row_num in range(1,len(data)):
data[row_num].append('Republic of Lithuania')
for row_num in range(0,len(data)):
print (data[row_num])
# +
#add missing county values
for row_num in range(1,6):
data[row_num].append('Alytus county')
for row_num in range(6,14):
data[row_num].append('Kaunas county')
for row_num in range(14,21):
data[row_num].append('Klaipeda county')
for row_num in range(21,27):
data[row_num].append('Marijampole county')
for row_num in range(27,33):
data[row_num].append('Panevezys county')
for row_num in range(33,40):
data[row_num].append('Siauliai county')
for row_num in range(40,44):
data[row_num].append('Taurage county')
for row_num in range(44,48):
data[row_num].append('Telsiai county')
for row_num in range(48,54):
data[row_num].append('Utena county')
for row_num in range(54,62):
data[row_num].append('Vilnius county')
for row_num in range(0,len(data)):
print (data[row_num])
# -
#Adding collumn headers istead of the melformed header
headerList=['Municipality','Time_period','Investment_in_tangible_fixed_assets_at_current_prices_EUR_thousand','Country','County']
del data[0]
data.insert( 0, headerList)
for row_num in range(0,len(data)):
print (data[row_num])
import csv
#print (data)
with open("Ready_datasets/readyDataSet6.csv", "w") as f:
writer = csv.writer(f)
writer.writerows(data)
| Lithuanean_datasets_adaptation_d6.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: ssdeep
# language: python
# name: ssdeep
# ---
# # ssdeep hash: scalable pairwise comparisons in pure Python
#
#
# This notebook contains the following:
# * A class called ```DocHash``` which pairs an input text string/document with an index, and an ssdeep hash value;
# * A random document generator for testing the performance of ```DocHash``` comparison algorithms, below.
#
# Three algorithms are implemented to test the scalability of pairwise ```DocHash``` comparisons:
# * A naive pairwise ```DocHash``` instance comparator, comparing all n(n-1)/2 pairs in a list of n instances, to construct a baseline of both correct matches, and a time to compute all pairs.
# * A slightly improved naive pairwise test, which tests the optimisation that ssdeep hash pairs should only be compared if the chunksizes are equal, double, or half (see https://www.intezer.com/blog/malware-analysis/intezer-community-tip-ssdeep-comparisons-with-elasticsearch/)
# * An efficient ```DocHash``` ssdeep hash matching algorithm which constructs a set of all matching/similar pairs (documentation below).
# !apt install build-essential libffi-dev python3 python3-dev python3-pip libfuzzy-dev
# !pip3 install ssdeep
# +
import ssdeep
class DocHash:
"""An indexed document and ssdeep hash pair"""
def __init__(self, index, doc):
self.index = index
self.doc = doc
self.hash = ssdeep.hash(doc)
self.chunksize = int(self.hash.split(':', 1)[0])
self.chunk_ngrams = set(self._ngrams(self.hash.split(':', 2)[1], ngram_len=7))
self.double_chunk_ngrams = set(self._ngrams(self.hash.split(':', 2)[2], ngram_len=7))
def get_index(self):
return self.index
def get_doc(self):
return self.doc
def get_hash(self):
return self.hash
def get_chunksize(self):
return self.chunksize
def _ngrams(self, s, ngram_len=7):
if len(s) < ngram_len:
return [s]
else:
# generate all ngrams of length ngraph_len from s
return [s[i:i+ngram_len] for i in range(0, len(s)) if i + ngram_len <= len(s)]
def get_chunk_ngrams(self):
return self.chunk_ngrams
def get_double_chunk_ngrams(self):
return self.double_chunk_ngrams
def compare(self, that):
return ssdeep.compare(self.hash, that.get_hash())
def comparable(self, that):
c0 = self.chunksize
c1 = that.chunksize
if(c0 > c1):
c0 = that.chunksize
c1 = self.chunksize
# Rule: Only compare hashes that have chunksize equal, double or half of the chunksize of the other
return c0 == c1 or c0 * 2 == c1
def __repr__(self):
return str((self.doc, self.hash))
# +
# Computes a list of DocHash instances based on randomly generated text.
import string
import random
# seed the random generator for reproducibility
random.seed(42)
# test parameters
NUM_DOCS = 10000
SEED_STRING = "This is a seed sentence which will be randomly permuted for input to the ssdhash comparison test."
MIN_SENTENCE_LENGTH = 1
MIN_SENTENCES_PER_DOC = 10
MAX_SENTENCES_PER_DOC = 100
MAX_PERMUTATIONS = 5
# a random permutaion method which randomly permutes 'permutations' characters of an the input string, s
def random_permute(s, permutations):
if permutations <= 0:
# no more permutations to be computed, return s
return s
else:
# index to permute
index = random.randrange(len(s))
# permute the string at the randomly chosen index
s = s[:index] + ''.join(random.choices(string.ascii_lowercase, k=1)) + s[index + 1:]
# permute some more
return random_permute(s, permutations - 1)
# the random document generator
docs = []
for i in range(0, NUM_DOCS):
doc = ''.join([random_permute(SEED_STRING[:random.randint(MIN_SENTENCE_LENGTH, len(SEED_STRING))], random.randint(1, MAX_PERMUTATIONS)) for i in range(MIN_SENTENCES_PER_DOC, random.randint(MIN_SENTENCES_PER_DOC + 1, MAX_SENTENCES_PER_DOC))])
docs.append(DocHash(i, doc))
# print some stats for the randomly generated list of documents
csmap = {}
for d in docs:
if d.chunksize not in csmap:
csmap[d.chunksize] = 0
csmap[d.chunksize] += 1
print('Chunksizes {(chunksize: count)}: %s' % (csmap))
# +
# Test 1: Naively compare all n(n-1)/2 pairs to compute a full set of comparisons, together with the set of
# all (i,j) document list index pairs for matching/similar documents.
import time
all_doc_pair_count = 0
naive_similar_doc_pairs = set()
start_time = time.time()
for i in range(0, len(docs)):
d0 = docs[i]
for j in range(i+1, len(docs)):
d1 = docs[j]
all_doc_pair_count += 1
compscore = d0.compare(d1)
if(compscore > 0 and compscore <= 100):
naive_similar_doc_pairs.add((i,j))
#print("%s, %s: %s" % (docs[i], docs[j], compscore))
print("(All, Similar) pairs: (%.0f, %s) computed in %.3f seconds" % (all_doc_pair_count, len(naive_similar_doc_pairs), time.time() - start_time))
# +
# Test 2: Naively compare all n(n-1)/2 pairs to compute a full set of comparisons, but skip any pairs for which
# the relative chunk sizes are not equal, double, or half each other.
all_doc_pair_count = 0
faster_similar_doc_pairs = set()
start_time = time.time()
for i in range(0, len(docs)):
d0 = docs[i]
for j in range(i+1, len(docs)):
d1 = docs[j]
all_doc_pair_count += 1
if d0.comparable(d1):
compscore = d0.compare(d1)
if(compscore > 0 and compscore <= 100):
faster_similar_doc_pairs.add((i,j))
#print("%s, %s: %s" % (docs[i], docs[j], compscore))
print("(All, Similar) pairs: (%.0f, %s) computed in %.3f seconds" % (all_doc_pair_count, len(faster_similar_doc_pairs), time.time() - start_time))
if naive_similar_doc_pairs == faster_similar_doc_pairs:
print("Naive/Faster pair sets match!")
else:
print("Naive/Faster pair sets DON'T match!")
# +
# get_matching_pairs() is an optimised method of computing matching/similar DocHash instances. Given a list of
# DocHash instances as input, this method will compute all matching/similar pairs by building a lookup map based
# on chunked ngrams (of length 7) which appear common to at least two DocHash instances in the input list.
def get_matching_pairs(document_list, minimum_ssdeep_match_score=1):
# create a map of chunk ngram -> {doc index} as a forward lookup to
# see, given a certain chunk ngram, which document indexes contain this
all_chunk_index_lookup_table = {}
# for every documet in the list
for d in document_list:
index = d.get_index()
# for every chunk ngram
for c in d.get_chunk_ngrams().union(d.get_double_chunk_ngrams()):
# add this document's index to the set against this chunk ngram
if c not in all_chunk_index_lookup_table:
all_chunk_index_lookup_table[c] = set()
all_chunk_index_lookup_table[c].add(index)
# iterate over the lookup map entries and retrieve all chunk ngram keys which are
# associated with more than one index, as these belong to DocHash instances which
# may be matching/similar. All other chunk ngram key entries associated with a
# single instance refer to singular DocHash instances with no comparable pair,
# which we can use as a filter when performing lookups for comparison canidates.
repeated_ngram_chunks = set()
for k in all_chunk_index_lookup_table:
if len(all_chunk_index_lookup_table[k]) > 1:
repeated_ngram_chunks.add(k)
# recompute the forward lookup map based only on the intersection
# of each document's chunk ngram set and the match_all_chunks set
all_chunk_index_lookup_table = {}
for d in document_list:
index = d.get_index()
for c in d.get_chunk_ngrams().union(d.get_double_chunk_ngrams()) & repeated_ngram_chunks:
if c not in all_chunk_index_lookup_table:
all_chunk_index_lookup_table[c] = set()
all_chunk_index_lookup_table[c].add(index)
# finally, with the minimised lookup map, perform all comparisons for all docs
similar_doc_pairs = set()
for d0 in document_list:
# compute a set of indices of potential matches for d0 amongst the documents_list
# based on all index entries in the lookup map for every chunk ngram key
potential_match_indices = set()
for c in d0.get_chunk_ngrams().union(d0.get_double_chunk_ngrams()) & repeated_ngram_chunks:
potential_match_indices = potential_match_indices.union(all_chunk_index_lookup_table[c])
# for every potential match accumulated from the lookup map
for i in potential_match_indices:
# don't compare any index which is equal or lower than this document's one, as
# we only want to compute pairs in one direction, e.g., (1,2) but not (2,1)
if i <= d0.index:
continue
# retrieve the document from the list
d1 = document_list[i]
# if the documents are comparable based on the chunk size test
if d0.comparable(d1):
# then compute the expensive ssdeep hash comparison score
compscore = d0.compare(d1)
# if the resulting score lies within the necessary range
if(compscore >= minimum_ssdeep_match_score and compscore <= 100):
# capture this index pair as being a matching/similar pair of documents
similar_doc_pairs.add((d0.index, d1.index))
# return any similar documents with a non-zero
return similar_doc_pairs
# +
# Test 3: Execute the get_matching_pairs() over the test list of documents to compare performance to the naive
# comparison methods.
start_time = time.time()
optim_similar_doc_pairs = get_matching_pairs(docs, minimum_ssdeep_match_score=1)
print("(All, Similar) pairs: (%.0f, %s) computed in %.3f seconds" % ((len(docs)*(len(docs)-1))/2, len(optim_similar_doc_pairs), time.time() - start_time))
if naive_similar_doc_pairs == optim_similar_doc_pairs:
print("Naive/Faster/Optim similar pair sets match!")
else:
print("Naive/Faster/Optim similar pair sets DON'T match!")
| SSDeep.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # EPL Historical Data | Clustering clubs
# +
# load libraries
import os
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
# %matplotlib inline
# -
# reading data
df = pd.read_json('ml_python_epl_data_17_18.json')
df = df.T
df.reset_index(level=0, inplace = True)
cols = list(df.columns)
cols
df.describe()
df.describe(include='all')
col_types = list(df.dtypes)
len(col_types), set(col_types)
# ##### all columns are of type object
df.head()
# ##### sorting by goals scored
# +
df.sort_values(by=['Goals'], ascending=False)
# -
players = df['index'].unique()
# ##### check if any player field has missing values
for i, player in enumerate(players):
if player == '':
print(i)
df.iloc[[14]]
# ##### drop row 14 - player name is blank
df.drop(df.index[[14]], inplace=True)
df.iloc[[14]]
# ##### filling na values
# na values
df.fillna(0, inplace=True)
# +
tx = df.isnull().any()
tx.describe()
# +
##### if any column still has got na values it should have displayed True
# -
set(tx)
# ##### Accurate Long Balls True
df['Accurate Long Balls'].isnull().sum()
df['Throw Outs'].isnull().sum()
df['Throw Outs'].unique()
# +
# df['Accurate Long Balls'] = df['Accurate Long Balls'].fillna(0)
# df['Accurate Long Balls'].isnull().sum()
##### replace ',''s from columns
# df['Accurate Long Balls'] = df['Accurate Long Balls'].str.replace(',','')
# df['Accurate Long Balls'] = pd.to_numeric(df['Accurate Long Balls'])
# df['Accurate Long Balls'].isnull().sum()
# df['Accurate Long Balls'].iloc[[0,1,2,3,4]]
# df['Accurate Long Balls'].head(5)
##### clearances column
# df['Clearances'].describe()
# df['Clearances'].unique()
# -
# ##### helper function to remove ',', '.' and '%' from values in columns
# +
def remove_commas(df, col):
df[col] = df[col].str.replace(',','')
df.fillna(0, inplace=True)
return pd.to_numeric(df[col])
def remove_dots(df, col):
df[col] = df[col].str.replace('.','')
df.fillna(0, inplace=True)
return pd.to_numeric(df[col])
def remove_percentages(df, col):
df[col] = df[col].str.replace('%','')
df.fillna(0, inplace=True)
return pd.to_numeric(df[col])
# +
num_cols_with_commas = ['Accurate Long Balls','Clearances','Crosses','Duels Lost','Shots',
'Duels Won','Goal Kicks','Headed Clearance','Passes','Recoveries','Throw Outs']
# num_cols_with_dots = ['Throw Outs']
num_cols_with_percentages = ['Cross Accuracy %','Shooting Accuracy %','Tackle Success %']
# -
for col in num_cols_with_commas:
df[col] = remove_commas(df, col)
for col in num_cols_with_percentages:
df[col] = remove_percentages(df, col)
# +
df['Throw Outs'].isnull().sum()
df['Throw Outs'].unique()
set(df.isnull().any())
# -
df['Throw Outs'].unique()
# for col in num_cols_with_dots:
# df[col] = remove_dots(df, col)
# ##### convert columns with numeric values to type int
# +
numeric_cols = cols.copy()
numeric_cols.remove('index')
numeric_cols.remove('Club')
numeric_cols.remove('Position')
#numeric_cols.remove('Throw Outs')
#numeric_cols.remove('Cross Accuracy %')
#numeric_cols.remove('Crosses')
#numeric_cols.remove('Duels Lost')
#numeric_cols.remove('Duels Won')
#numeric_cols.remove('Goal Kicks')
#numeric_cols.remove('Headed Clearance')
#numeric_cols.remove('Passes')
#numeric_cols.remove('Recoveries')
#numeric_cols.remove('Shooting Accuracy %')
#numeric_cols.remove('Shots')
#numeric_cols.remove('Tackle Success %')
numeric_cols
# -
try:
for i, col in enumerate(numeric_cols):
print(i, col)
df[col] = df[col].apply(pd.to_numeric)
except:
print('%s cannot convert to numeric' % (col))
# +
tx = df.dtypes
set(list(tx))
# -
df.describe()
for col in cols:
print(col, df[col].dtype)
df['Team Play']
pd.CategoricalIndex(df)
# #### Correlation
correlations = df.corr()
# +
# Generate a mask for the upper triangle
mask = np.zeros_like(correlations, dtype=np.bool)
# array([[False, False],
# [False, False]])
mask[np.triu_indices_from(mask)] = True
# array([[ True, True],
# [False, True]])
# +
# Set up the matplotlib figure
f, ax = plt.subplots(figsize=(17, 13))
# Generate a custom diverging colormap
cmap = sns.diverging_palette(220, 10, as_cmap=True)
# Draw the heatmap with the mask and correct aspect ratio
sns.heatmap(correlations, mask=mask, cmap=cmap, vmax=.3, center=0,
square=True, linewidths=.5, cbar_kws={"shrink": .5})
# -
# ### Visualizations!!
#
# ---
#
# #### Univariate analysis
# ---
plt.figure(figsize=(17,7))
df['Club'].value_counts().plot.bar()
plt.title('Clubs')
plt.xlabel('Club')
plt.ylabel('Number of players')
plt.legend()
len(df['index'].unique()), len(df['Club'].unique())
# ##### Out of the data for 626 players from 81 clubs, Brighton and Howe Albion has got the max number of players - 35, followed by Watford, Arsenal, Liverpool and Newcastle United
#
# +
plt.figure(figsize=(17,7))
df['Goals'].value_counts().plot.bar()
plt.title('Goals scored by players')
plt.xlabel('Goals')
plt.ylabel('Count')
# for a,b in zip(df.Goals.values, df.index.values):
# plt.text(0,0,b)
plt.legend()
# plt.savefig('dd.png')
# -
# ##### Maximum number of goals scored by a player is 200; whereas more than 300 players (nearly half) has not scored a single goal!
#
df[['Goals','Own Goals']].hist()
len(numeric_cols)
len(df.columns)
cat_cols = ['index','Club','Position']
len(cat_cols)
df['Throw Outs'].unique()
pd.get_dummies(df['Position']).head(7)
# +
df = pd.concat([df, pd.get_dummies(df['Position'])], axis=1)
df.head()
# -
numeric_cols.append('Defender')
numeric_cols.append('Forward')
numeric_cols.append('Goalkeeper')
numeric_cols.append('Midfielder')
len(numeric_cols)
# +
# numeric_cols.append('Team Play')
# numeric_cols.remove('Attack')
# numeric_cols.remove('Defence')
# numeric_cols.remove('Discipline')
#
# len(numeric_cols)
# -
# ### Clustering
from sklearn.cluster import KMeans
from mpl_toolkits.mplot3d import Axes3D
df.fillna(0, inplace=True)
X_epl = df[numeric_cols].values
X_epl.shape
# +
# df.fillna(0, inplace=True)
# +
# df.isnull().any()
# -
#KMeans
km = KMeans(n_clusters=3)
km.fit(X_epl)
km.predict(X_epl)
labels = km.labels_
# +
#Plotting
fig = plt.figure(1, figsize=(17,11))
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=88, azim=-14)
ax.scatter(X_epl[:, 3], X_epl[:, 0], X_epl[:, 2], cmap='rainbow',
c=labels.astype(np.float), edgecolor="k", s=50)
ax.set_xlabel("")
ax.set_ylabel("")
ax.set_zlabel("")
plt.title("K Means", fontsize=14)
# -
# ##### ValueError: Colormap blue is not recognized. Possible values are: Accent, Accent_r, Blues, Blues_r, BrBG, BrBG_r, BuGn, BuGn_r, BuPu, BuPu_r, CMRmap, CMRmap_r, Dark2, Dark2_r, GnBu, GnBu_r, Greens, Greens_r, Greys, Greys_r, OrRd, OrRd_r, Oranges, Oranges_r, PRGn, PRGn_r, Paired, Paired_r, Pastel1, Pastel1_r, Pastel2, Pastel2_r, PiYG, PiYG_r, PuBu, PuBuGn, PuBuGn_r, PuBu_r, PuOr, PuOr_r, PuRd, PuRd_r, Purples, Purples_r, RdBu, RdBu_r, RdGy, RdGy_r, RdPu, RdPu_r, RdYlBu, RdYlBu_r, RdYlGn, RdYlGn_r, Reds, Reds_r, Set1, Set1_r, Set2, Set2_r, Set3, Set3_r, Spectral, Spectral_r, Wistia, Wistia_r, YlGn, YlGnBu, YlGnBu_r, YlGn_r, YlOrBr, YlOrBr_r, YlOrRd, YlOrRd_r, afmhot, afmhot_r, autumn, autumn_r, binary, binary_r, bone, bone_r, brg, brg_r, bwr, bwr_r, cividis, cividis_r, cool, cool_r, coolwarm, coolwarm_r, copper, copper_r, cubehelix, cubehelix_r, flag, flag_r, gist_earth, gist_earth_r, gist_gray, gist_gray_r, gist_heat, gist_heat_r, gist_ncar, gist_ncar_r, gist_rainbow, gist_rainbow_r, gist_stern, gist_stern_r, gist_yarg, gist_yarg_r, gnuplot, gnuplot2, gnuplot2_r, gnuplot_r, gray, gray_r, hot, hot_r, hsv, hsv_r, icefire, icefire_r, inferno, inferno_r, jet, jet_r, magma, magma_r, mako, mako_r, nipy_spectral, nipy_spectral_r, ocean, ocean_r, pink, pink_r, plasma, plasma_r, prism, prism_r, rainbow, rainbow_r, rocket, rocket_r, seismic, seismic_r, spring, spring_r, summer, summer_r, tab10, tab10_r, tab20, tab20_r, tab20b, tab20b_r, tab20c, tab20c_r, terrain, terrain_r, viridis, viridis_r, vlag, vlag_r, winter, winter_r
#
# # to be continued!
| machinelearning/ml_python_epl_data_17_18.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Overview
#
# We're now switching focus away from the Network Science (for a little bit), beginning to think about _Natural Language Processing_ instead. In other words, today will be all about teaching your computer to "understand" text. This ties in nicely with our work on Reddit, because subbmisions and comments often contain text. We've looked at the network so far - now, let's see if we can include the text. Today is about
#
# * Installing the _natural language toolkit_ (NLTK) package and learning the basics of how it works (Chapter 1)
# * Figuring out how to make NLTK to work with other types of text (Chapter 2).
# > **_Video Lecture_**. [Intro to Natural Language processing](https://www.youtube.com/watch?v=Ph0EHmFT3n4). Today is all about working with NLTK, so not much lecturing - we will start with a perspective on text analysis by Sune (you will hear him talking about Wikipedia data here and there. Everything he sais applies to Reddit data as well!)
from IPython.display import YouTubeVideo
YouTubeVideo("Ph0EHmFT3n4",width=800, height=450)
# # Installing and the basics
#
# > _Reading_
# > The reading for today is Natural Language Processing with Python (NLPP) Chapter 1, Sections 1.1, 1.2, 1.3\. [It's free online](http://www.nltk.org/book/).
#
# > *Exercises*: NLPP Chapter 1\.
# >
# > * First, install `nltk` if it isn't installed already (there are some tips below that I recommend checking out before doing installing)
# > * Second, work through chapter 1. The book is set up as a kind of tutorial with lots of examples for you to work through. I recommend you read the text with an open IPython Notebook and type out the examples that you see. ***It becomes much more fun if you to add a few variations and see what happens***. Some of those examples might very well be due as assignments (see below the install tips), so those ones should definitely be in a `notebook`.
#
# ### NLTK Install tips
#
# Check to see if `nltk` is installed on your system by typing `import nltk` in a `notebook`. If it's not already installed, install it as part of _Anaconda_ by typing
#
# conda install nltk
#
# at the command prompt. If you don't have them, you can download the various corpora using a command-line version of the downloader that runs in Python notebooks: In the iPython notebook, run the code
#
# import nltk
# nltk.download()
#
# Now you can hit `d` to download, then type "book" to fetch the collection needed today's `nltk` session. Now that everything is up and running, let's get to the actual exercises.
# !pip install nltk
import nltk
nltk.download()
# + jupyter={"outputs_hidden": true}
from nltk.book import *
# -
# > *Exercises: NLPP Chapter 1 (the stuff that might be due in an upcoming assignment).
# >
# > The following exercises from Chapter 1 are what might be due in an assignment later on.
# >
# > * Try out the `concordance` method, using another text and a word of your own choosing.
# > * Also try out the `similar` and `common_context` methods for a few of your own examples.
# > * Create your own version of a dispersion plot ("your own version" means another text and different word).
# > * Explain in your own words what aspect of language _lexical diversity_ describes.
# > * Create frequency distributions for `text2`, including the cumulative frequency plot for the 75 most common words.
# > * What is a bigram? How does it relate to `collocations`. Explain in your own words.
# > * Work through ex 2-12 in NLPP's section 1.8\.
# > * Work through exercise 15, 17, 19, 22, 23, 26, 27, 28 in section 1.8\.
# # Working through the chapters examples
# 1. Try out the concordance method, using another text and a word of your own choosing.
# This enables us to see words in context.
text1.concordance("Moby")
# 2. Also try out the similar and common_contexts methods for a few of your own examples.
text1.similar("home")
# + jupyter={"outputs_hidden": true}
# 2. fortsat
text2.similar("sweet")
text2.similar("very")
text2.common_contexts(["sweet", "very"])
# -
# 3. Create your own version of a dispersion plot ("your own version" means another text and different word)
text4.dispersion_plot(["God", "democracy", "freedom", "duties", "constitution"])
# 4. Explain in your own words what aspect of language lexical diversity describes.
# Something something
# +
# Extra: Generate random text inspired by the words and style of writing in another text
# text2.generate() # Jeg kan dog ikke lave dette, da jeg har 3.0 tror jeg
# len(text3) # Regner både ord og punctuations, som er . og , og : sådan noget. Det regner altså "tokens"
# hvilket er en sequence of characters (ord) og :) hvilket også tæller som 1.
# Hvis man gerne bare vil have the vobaulary, altså hvor mange forskellige ord, der er, kan man benytte denne:
# print(sorted(set(text3)))
# print(len(sorted(set(text3))))
# Tæl specifikke ord
text3.count("smote")
100 * text4.count('a') / len(text4)
# Gør det nemmere med functioner
def lexical_diversity(text):
return len(set(text)) / len(text)
def percentage(count, total):
return 100 * count / total
# -
saying = ['After', 'all', 'is', 'said', 'and', 'done',
... 'more', 'is', 'said', 'than', 'done']
tokens = set(saying)
tokens = sorted(tokens)
print(tokens)
tokens[-4:]
total = len(text4)
count = text4.count("a")
print(percentage(count, total) ,lexical_diversity(text3))
# Although it has 44,764 tokens, this book has only 2,789 distinct words, or "word types." A word type is the form or spelling of the word independently of its specific occurrences in a text — that is, the word considered as a unique item of vocabulary.
#
# the lexical richness of the text = antal forskellige ord / totale antal ord
# print(lexical_diversity(text3), lexical_diversity(text5), percentage(4, 5), percentage(text4.count('a'), len(text4)))
sent1
len(sent1)
# +
# 5. Create frequency distributions for text2, including the cumulative frequency plot for the 75 most common words.
import matplotlib.pylab as plt
import numpy as np
fdist1 = FreqDist(text2)
print(fdist1)
most_common_75 = fdist1.most_common(75)
# print(most_common_75)
fdist1['whale']
plt.plot(text2)
# -
# #### 6. What is a bigram? How does it relate to collocations. Explain in your own words.
# A bigram is
# # Working through ex 2-12 in NLPP's section 1.8.
# ### exercise 2
# Given an alphabet of 26 letters, there are 26 to the power 10, or 26 ** 10, ten-letter strings we
# can form. That works out to 141167095653376. How many hundred-letter strings are possible?
#
# That will be 26^100 = 314293064158293883017435778850162642728266998876247525637417317539899590842
# 0104023465432599069702289330964075081611719197835869803511992549376
#
# ### exercise 3
# The Python multiplication operation can be applied to lists. What happens when you type ['Monty', 'Python'] * 20, or 3 * sent1?
# +
# ['Monty', 'Python'] * 20 # Så er der 20 monty og 20 python.
# 3 * sent1
# Resultatet:
# ['Call',
# 'me',
# 'Ishmael',
# '.',
# 'Call',
# 'me',
# 'Ishmael',
# '.',
# 'Call',
# 'me',
# 'Ishmael',
# '.']
# -
# +
# 8. Work through exercise 15, 17, 19, 22, 23, 26, 27, 28 in section 1.8.
# -
# # Working with NLTK and other types of text
#
# Chapter 2 in NLPP1e is all about getting access to nicely curated texts that you can find built into NLTK.
# >
# > Reading: NLPP Chapter 2.1 - 2.4\.
# >
# > *Exercises*: NLPP Chapter 2\.
# >
# > * Solve exercise 4, 8, 11, 15, 16, 17, 18 in NLPP1e, section 2.8\. As always, I recommend you write up your solutions nicely in a `notebook`.
#
| lectures/Week5.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
###import important modules
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# +
##read data and print top-5 rows
data = pd.read_csv("archive/food_coded.csv", delimiter=',')
data.head()
# -
###get the information/details of the dataset like total values, data type, and the shape of the complete dataset
### which is rows*cols - 125*61 here
print(data.info())
print(data.shape)
data.describe() ###statistical analysis of the dataset
data.isnull().sum() ###check for the null values and take their sum
# +
###draw the various plots to understand the data distribution
# -
| geolocation_analysis/Untitled.ipynb |
-- ---
-- jupyter:
-- jupytext:
-- text_representation:
-- extension: .hs
-- format_name: light
-- format_version: '1.5'
-- jupytext_version: 1.14.4
-- kernelspec:
-- display_name: Haskell - haskell
-- language: haskell
-- name: ihaskell_haskell
-- ---
-- +
import Graphics.Rendering.Chart
import Data.Colour
import Data.Colour.Names
import Data.Default.Class
import Graphics.Rendering.Chart.Backend.Cairo
import Control.Lens
setLinesBlue :: PlotLines a b -> PlotLines a b
setLinesBlue = plot_lines_style . line_color .~ opaque blue
chart = toRenderable layout
where
am :: Double -> Double
am x = (sin (x*3.14159/45) + 1) / 2 * (sin (x*3.14159/5))
sinusoid1 = plot_lines_values .~ [[ (x,(am x)) | x <- [0,(0.5)..400]]]
$ plot_lines_style . line_color .~ opaque blue
$ plot_lines_title .~ "am"
$ def
sinusoid2 = plot_points_style .~ filledCircles 2 (opaque red)
$ plot_points_values .~ [ (x,(am x)) | x <- [0,7..400]]
$ plot_points_title .~ "am points"
$ def
layout = layout_title .~ "Amplitude Modulation"
$ layout_plots .~ [toPlot sinusoid1,
toPlot sinusoid2]
$ def
chart
-- -
| jupyter/plot-test.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
import numpy
from scipy import ndimage, signal
from galpy.util import bovy_conversion, bovy_plot
import gd1_util
from gd1_util import R0, V0
import seaborn as sns
# %pylab inline
# # Some peppering of a GD-1 like stream
def nsubhalo(m):
return 0.3*(10.**6.5/m)
def rs(m):
return 1.05/R0*(m/10.**8.)**0.5
def dNencdm(m,Xrs=3.):
return sdf_pepper.subhalo_encounters(sigma=120./220.,nsubhalo=nsubhalo(m),bmax=Xrs*rs(m))
# Single time
sdf_smooth= gd1_util.setup_gd1model()
print "Stream length in arc degree and physical kpc", sdf_smooth.length(ang=True), sdf_smooth.length(phys=True)
sdf_pepper= gd1_util.setup_gd1model(timpact=[1./bovy_conversion.time_in_Gyr(V0,R0)])
figsize(8,8)
smooth_len= 0
ntrials= 101
apars= numpy.linspace(0.2,sdf_smooth.length(),201)
#Run one simulation to setup power-spectrum x axis
sdf_pepper.simulate(rate=1.,sample_GM=lambda: 10.**-2./bovy_conversion.mass_in_1010msol(V0,R0),
sample_rs=rs)
px= sdf_pepper.csd(apars=apars)[0]
ppy= numpy.zeros((len(px),ntrials))
ppy_err= numpy.zeros((len(px),ntrials))
all_ppy= []
for kk,m in enumerate([10.**5.5,10.**6.5,10.**7.5,10.**8.5]):
for ii in range(ppy.shape[1]):
# Simulate
sdf_pepper.simulate(rate=dNencdm(m,Xrs=3.),
sample_GM=lambda: m/10.**10./bovy_conversion.mass_in_1010msol(V0,R0),
sample_rs=lambda x: rs(x*bovy_conversion.mass_in_1010msol(V0,R0)*10.**10.),
Xrs=3.)
ppy[:,ii]= ndimage.filters.gaussian_filter1d(sdf_pepper.csd(d1='density',
d2='density',
apars=apars)[1].real,
smooth_len)
if kk == 0:
mock_dens= 0.1*numpy.random.normal(size=len(apars))
ppy_err[:,ii]= signal.csd(mock_dens,mock_dens,
fs=1./(apars[1]-apars[0]),scaling='spectrum')[1].real
loglog(px,numpy.sqrt(numpy.median(ppy,axis=1)),lw=2.,color=sns.color_palette()[kk],zorder=kk+6)
ppy.sort(axis=1)
fill_between(px,numpy.sqrt(ppy[:,int(numpy.round(0.25*ntrials))]),
numpy.sqrt(ppy[:,int(numpy.round(0.75*ntrials))]),
color=sns.color_palette()[kk],zorder=kk+2,
alpha=0.5)
all_ppy.append(ppy)
# Also plot error
loglog(px,numpy.sqrt(numpy.median(ppy_err,axis=1)),lw=2.,color='k',zorder=1)
ppy_err.sort(axis=1)
fill_between(px,numpy.sqrt(ppy_err[:,int(numpy.round(0.25*ntrials))]),
numpy.sqrt(ppy_err[:,int(numpy.round(0.75*ntrials))]),
color='0.5',zorder=0,
alpha=0.5)
ylim(0.001,1.)
xlim(0.1,100.)
figsize(8,8)
smooth_len= 0
ntrials= 101
apars= numpy.linspace(0.2,sdf_smooth.length(),201)
#Run one simulation to setup power-spectrum x axis
sdf_pepper.simulate(rate=1.,sample_GM=lambda: 10.**-2./bovy_conversion.mass_in_1010msol(V0,R0),
sample_rs=rs)
px= sdf_pepper.csd(apars=apars)[0]
ppy= numpy.zeros((len(px),ntrials))
ppy_err= numpy.zeros((len(px),ntrials))
all_ppy= []
for kk,m in enumerate([10.**5.5,10.**6.5,10.**7.5,10.**8.5]):
for ii in range(ppy.shape[1]):
# Simulate
sdf_pepper.simulate(rate=dNencdm(m,Xrs=3.),
sample_GM=lambda: m/10.**10./bovy_conversion.mass_in_1010msol(V0,R0),
sample_rs=lambda x: rs(x*bovy_conversion.mass_in_1010msol(V0,R0)*10.**10.),
Xrs=3.)
ppy[:,ii]= ndimage.filters.gaussian_filter1d(sdf_pepper.csd(d1='meanOmega',
d2='meanOmega',
apars=apars)[1].real,
smooth_len)
if kk == 0:
mock_mO= 0.0005*numpy.random.normal(size=len(apars))
ppy_err[:,ii]= signal.csd(mock_mO,mock_mO,
fs=1./(apars[1]-apars[0]),scaling='spectrum')[1].real
loglog(px,numpy.sqrt(numpy.median(ppy,axis=1)),lw=2.,color=sns.color_palette()[kk],zorder=kk+6)
ppy.sort(axis=1)
fill_between(px,numpy.sqrt(ppy[:,int(numpy.round(0.25*ntrials))]),
numpy.sqrt(ppy[:,int(numpy.round(0.75*ntrials))]),
color=sns.color_palette()[kk],zorder=kk+2,
alpha=0.5)
all_ppy.append(ppy)
# Also plot error
loglog(px,numpy.sqrt(numpy.median(ppy_err,axis=1)),lw=2.,color='k',zorder=1)
ppy_err.sort(axis=1)
fill_between(px,numpy.sqrt(ppy_err[:,int(numpy.round(0.25*ntrials))]),
numpy.sqrt(ppy_err[:,int(numpy.round(0.75*ntrials))]),
color='0.5',zorder=0,
alpha=0.5)
ylim(0.000001,0.001)
xlim(0.1,100.)
figsize(8,8)
smooth_len= 0
ntrials= 101
apars= numpy.linspace(0.2,sdf_smooth.length(),201)
#Run one simulation to setup power-spectrum x axis
sdf_pepper.simulate(rate=1.,sample_GM=lambda: 10.**-2./bovy_conversion.mass_in_1010msol(V0,R0),
sample_rs=rs)
px= sdf_pepper.csd(apars=apars)[0]
ppy= numpy.zeros((len(px),ntrials))
ppy_err= numpy.zeros((len(px),ntrials))
all_ppy= []
for kk,m in enumerate([10.**5.5,10.**6.5,10.**7.5,10.**8.5]):
for ii in range(ppy.shape[1]):
# Simulate
sdf_pepper.simulate(rate=dNencdm(m,Xrs=3.),
sample_GM=lambda: m/10.**10./bovy_conversion.mass_in_1010msol(V0,R0),
sample_rs=lambda x: rs(x*bovy_conversion.mass_in_1010msol(V0,R0)*10.**10.),
Xrs=3.)
ppy[:,ii]= ndimage.filters.gaussian_filter1d(numpy.absolute(sdf_pepper.csd(d1='density',
d2='meanOmega',
apars=apars)[1]),
smooth_len)
if kk == 0:
mock_dens= 0.1*numpy.random.normal(size=len(apars))
mock_mO= 0.0005*numpy.random.normal(size=len(apars))
ppy_err[:,ii]= numpy.absolute(signal.csd(mock_dens,mock_mO,
fs=1./(apars[1]-apars[0]),scaling='spectrum')[1])
loglog(px,numpy.sqrt(numpy.median(ppy,axis=1)),lw=2.,color=sns.color_palette()[kk],zorder=kk+6)
ppy.sort(axis=1)
fill_between(px,numpy.sqrt(ppy[:,int(numpy.round(0.25*ntrials))]),
numpy.sqrt(ppy[:,int(numpy.round(0.75*ntrials))]),
color=sns.color_palette()[kk],zorder=kk+2,
alpha=0.5)
all_ppy.append(ppy)
# Also plot error
loglog(px,numpy.sqrt(numpy.median(ppy_err,axis=1)),lw=2.,color='k',zorder=1)
ppy_err.sort(axis=1)
fill_between(px,numpy.sqrt(ppy_err[:,int(numpy.round(0.25*ntrials))]),
numpy.sqrt(ppy_err[:,int(numpy.round(0.75*ntrials))]),
color='0.5',zorder=0,
alpha=0.5)
ylim(0.00001,0.01)
xlim(0.1,100.)
from galpy.potential import LogarithmicHaloPotential
from galpy.actionAngle import actionAngleIsochroneApprox
from galpy.orbit import Orbit
lp= LogarithmicHaloPotential(normalize=1.,q=0.9)
aAI= actionAngleIsochroneApprox(pot=lp,b=0.8)
obs= Orbit([1.56148083,0.35081535,-1.15481504,0.88719443,
-0.47713334,0.12019596])
indx= 3
print (aAI.actionsFreqs(obs.R(),obs.vR(),obs.vT()+1./gd1_util.V0,
obs.z(),obs.vz(),obs.phi())[indx]/\
aAI.actionsFreqs(obs.R(),obs.vR(),obs.vT(),obs.z(),obs.vz(),obs.phi())[indx]-1.)
print ((aAI.actionsFreqs(obs.R(),obs.vR(),obs.vT()+1./gd1_util.V0,
obs.z(),obs.vz(),obs.phi())[indx]\
-aAI.actionsFreqs(obs.R(),obs.vR(),obs.vT(),obs.z(),obs.vz(),obs.phi())[indx])/sdf_smooth._progenitor_Omega[indx-3])
m=10.**5.5
sdf_pepper.simulate(rate=dNencdm(m,Xrs=3.),
sample_GM=lambda: m/10.**10./bovy_conversion.mass_in_1010msol(V0,R0),
sample_rs=lambda x: rs(x*bovy_conversion.mass_in_1010msol(V0,R0)*10.**10.),
Xrs=3.)
print len(sdf_pepper._timpact)
apars= numpy.linspace(0.,sdf_pepper.length(),201)
xs= apars
dens_1e5= numpy.array([sdf_pepper.density_par(x) for x in xs])
figsize(12,6)
subplot(1,2,1)
plot(xs,dens_1e5/numpy.sum(dens_1e5)/(xs[1]-xs[0]),lw=4.)
plot(xs,sdf_pepper._dens_unp/numpy.sum(sdf_pepper._dens_unp)/(xs[1]-xs[0]),lw=4.)
xlabel(r'$\theta_\parallel$')
subplot(1,2,2)
plot(xs,(dens_1e5/numpy.sum(dens_1e5))/(sdf_pepper._dens_unp/numpy.sum(sdf_pepper._dens_unp)),lw=4.)
xlabel(r'$\theta_\parallel$')
mO_1e6= numpy.array([sdf_pepper.meanOmega(x,oned=True) for x in xs])
mO_unp= sdf_pepper._mO_unp
figsize(6,6)
plot(xs,mO_1e6/mO_unp,lw=4.)
xlabel(r'$\theta_\parallel$')
mO_1e6= numpy.array([sdf_pepper.meanOmega(x,oned=True) for x in xs])
Opars= numpy.linspace(0.,0.2,101)/bovy_conversion.freq_in_Gyr(V0,R0)
papars= numpy.linspace(0.,1.2,101)
y= numpy.array([sdf_pepper.pOparapar(Opars,a) for a in papars])
figsize(6,6)
bovy_plot.bovy_dens2d(y.T,
origin='lower',
cmap='afmhot_r',colorbar=True,
vmin=0.,
xrange=[papars[0],papars[-1]],
yrange=[Opars[0]*bovy_conversion.freq_in_Gyr(V0,R0),
Opars[-1]*bovy_conversion.freq_in_Gyr(V0,R0)],
zlabel=r'$p(\Omega\parallel,\theta_\parallel)$')
plot(xs,mO_1e6*bovy_conversion.freq_in_Gyr(V0,R0),lw=4.)
xlabel(r'$\theta_\parallel$')
ylabel(r'$\Omega_\parallel\,(\mathrm{Gyr}^{-1})$')
sdf_smooth.length(ang=True), sdf_smooth.length(phys=True), sdf_smooth.length()
plot(sdf_pepper._sgapdfs_uniq[0]._kick_dOaparperp[:,2])
for ii in range(len(sdf_pepper._timpact)):
plot(sdf_pepper._sgapdfs[ii]._kick_dOaparperp[:,2],color='k',alpha=0.1)
plot(sdf_pepper._sgapdfs_uniq[0]._kick_dOaparperp[:,2]/10.)
ylim(-0.0002,0.0002)
xlim(200,250)
sdf_pepper._sgapdfs_uniq[0]._nKickPoints
dNencdm(10.**5.5), dNencdm(10.**6.5), dNencdm(10.**7.5), dNencdm(10.**8.5)
1./px/numpy.pi*180.
dNencdm(10.**5.5)
| dev/galpyGD1likePepper.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %matplotlib inline
#
#
# # Tutorial 05: Kernels and averages
#
# Simulating swarming models requires expensive mean-field convolution operations of the form:
#
# \begin{align}J^i = \frac{1}{N}\sum_{j=1}^N K(|X^j-X^i|) U^j,\end{align}
# for $1\leq i\leq N$, where $(X^i)_{1\leq i \leq N}$ are the positions of the particles, $(U^j)_{1\leq j\leq N}$ are given vectors and $K$ is an **observation kernel**. Typically, $K(|X^i-X^j|)$ is equal to 1 if $X^i$ and $X^j$ are at distance smaller than a fixed interaction distance and 0 otherwise. Other kernels are defined in the module :mod:`sisyphe.kernels`. Below, we show a simple application case.
#
# ## Linear local averages
#
# First, some standard imports...
#
#
#
# +
import time
import math
import torch
from matplotlib import pyplot as plt
use_cuda = torch.cuda.is_available()
dtype = torch.cuda.FloatTensor if use_cuda else torch.FloatTensor
# -
# Let the $N$ particles be uniformly scattered in a box of size $L$ with interaction radius $R$.
#
#
#
# +
N = 100000
L = 1.
R = .15
pos = L*torch.rand((N,2)).type(dtype)
# -
# We can also assume that the particles have a bounded cone of vision around an axis (defined by a unit vector). The default behaviour is a full vision angle equal to $2\pi$ in which case the axis is a :data:`None` object. Here we take a cone of vision with angle $\pi/2$ around an axis which is sampled uniformly. For the :class:`sisyphe.particles.KineticParticles`, the default axis is the velocity.
#
#
angle = math.pi/2
axis = torch.randn(N,2).type(dtype)
axis = axis/torch.norm(axis,dim=1).reshape((N,1))
# Let us create an instance of a particle system with these parameters.
#
#
# +
from sisyphe.particles import Particles
particles = Particles(
pos = pos,
interaction_radius = R,
box_size = L,
vision_angle = angle,
axis = axis)
# -
# <div class="alert alert-info"><h4>Note</h4><p>By default, the system and the operations below are defined with periodic boundary conditions.</p></div>
#
#
# As a simple application, we can compute the number of neighbours of each particle and print the number of neighbours of the first particle. This operation is already implemented in the method :func:`number_of_neighbours() <sisyphe.particles.Particles.number_of_neighbours>`. It simply corresponds to the average:
#
# \begin{align}N^i_\mathrm{neigh} = \sum_{j=1}^N K(|X^j-X^i|).\end{align}
#
#
# +
Nneigh = particles.number_of_neighbours()
Nneigh0 = int(Nneigh[0].item())
print("The first particle sees " + str(Nneigh0) + " other particles.")
# -
# For custom objects, the mean-field average can be computed using the method :func:`linear_local_average() <sisyphe.particles.Particles.linear_local_average>`. As an example, let us compute the center of mass of the neighbours of each particle. First we define the quantity $U$ that we want to average. Here, since we are working on a torus, there are two: the sine and the cosine of the spatial coordinates.
#
#
cos_pos = torch.cos((2*math.pi / L) * particles.pos)
sin_pos = torch.sin((2*math.pi / L) * particles.pos)
# Then we compute the two mean field averages, i.e. the standard convolution over the $N$ particles. The center of mass along each dimension is the argument of the complex number whose coordinates are the average cosine and sine.
#
#
# +
average_cos, average_sin = particles.linear_local_average(cos_pos, sin_pos)
center_x = torch.atan2(average_sin[:,0], average_cos[:,0])
center_x = (L / (2*math.pi)) * torch.remainder(center_x, 2*math.pi)
center_y = torch.atan2(average_sin[:,1], average_cos[:,1])
center_y = (L / (2*math.pi)) * torch.remainder(center_y, 2*math.pi)
center_of_mass = torch.cat((center_x.reshape((N,1)), center_y.reshape((N,1))),
dim=1)
# -
# In the method :func:`linear_local_average() <sisyphe.particles.Particles.linear_local_average>`, the default observation kernel is a :class:`LazyTensor` of size $(N,N)$ whose $(i,j)$ component is equal to 1 when particle $j$ belongs to the cone of vision of particle $i$ and 0 otherwise. To retrieve the indexes of the particles which belong to the cone of vision of the first particle, we can use the `K-nearest-neighbours reduction <https://www.kernel-operations.io/keops/_auto_tutorials/knn/plot_knn_mnist.html#sphx-glr-auto-tutorials-knn-plot-knn-mnist-py>`_ provided by the `KeOps <https://www.kernel-operations.io/keops/index.html>`_ library.
#
#
# +
from sisyphe.kernels import lazy_interaction_kernel
interaction_kernel = lazy_interaction_kernel(
particles.pos,
particles.pos,
particles.R,
particles.L,
boundary_conditions = particles.bc,
vision_angle = particles.angle,
axis = particles.axis)
K_ij = 1. - interaction_kernel
neigh0 = K_ij.argKmin(Nneigh0, dim=1)[0]
print("The indexes of the neighbours of the first particles are: ")
print(neigh0)
# -
# Finally, a fancy display of what we have computed. We plot the full particle system in black, the first particle in orange, its neighbours in blue and the center of mass of the neighbours in red.
#
#
# +
xall = particles.pos[:,0].cpu().numpy()
yall = particles.pos[:,1].cpu().numpy()
x = particles.pos[neigh0,0].cpu().numpy()
y = particles.pos[neigh0,1].cpu().numpy()
x0 = particles.pos[0,0].item()
y0 = particles.pos[0,1].item()
xc = center_of_mass[0,0].item()
yc = center_of_mass[0,1].item()
fig, ax = plt.subplots(figsize=(6,6))
ax.scatter(xall, yall, s=.003, c='black')
ax.scatter(x, y, s=.3)
ax.scatter(x0, y0, s=24)
ax.scatter(xc, yc, s=24, c='red')
ax.axis([0, L, 0, L])
ax.set_aspect("equal")
# -
# ## Nonlinear averages
#
# In some cases, we need to compute a **nonlinear average** of the form
#
# \begin{align}J^i = \frac{1}{N}\sum_{j=1}^N K(|X^j-X^i|) b(U^i,V^j),\end{align}
#
# where $(U^i)_{1\leq i \leq N}$ and $(V^j)_{1\leq j \leq N}$ are given vectors and $b$ is a given function. When the **binary formula** $b$ can be written as a :class:`LazyTensor`, this can be computed with the method :func:`nonlinear_local_average() <sisyphe.particles.Particles.nonlinear_local_average>`.
#
# For instance, let us compute the local mean square distance:
#
# \begin{align}J^i = \frac{\sum_{j=1}^N K(|X^j-X^i|) |X^j-X^i|^2}{\sum_{j=1}^N K(|X^j-X^i|)}.\end{align}
#
# In this case, we can use the function :func:`sisyphe.kernels.lazy_xy_matrix` to define a custom binary formula. Given two vectors $X=(X^i)_{1\leq i\leq M}$ and $Y = (Y^j)_{1\leq j\leq N}$, respectively of sizes $(M,d)$ and $(N,d)$, the $XY$ matrix is a $(M,N,d)$ LazyTensor whose $(i,j,:)$ component is the vector $Y^j-X^i$.
#
#
#
# +
from sisyphe.kernels import lazy_xy_matrix
def b(x,y):
K_ij = lazy_xy_matrix(x,y,particles.L)
return (K_ij ** 2).sum(-1)
x = particles.pos
y = particles.pos
mean_square_dist = N/Nneigh.reshape((N,1)) * particles.nonlinear_local_average(b,x,y)
# -
# Since the particles are uniformly scattered in the box, the theoretical value is
#
# \begin{align}MSD_0 = \frac{\int_0^R \int_0^{\pi/2} r^3 \mathrm{d}r\mathrm{d}\theta}{\int_0^R \int_0^{\pi/2} r \mathrm{d}r\mathrm{d}\theta} = \frac{R^2}{2}\end{align}
#
#
#
print("Theoretical value: " + str(R**2/2))
print("Experimental value: " + str(mean_square_dist[0].item()))
| doc/_auto_tutorials/plot_e_kernels.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Training networks of PRC neurons to recognize spoken digits
#
# This notebook contains code for Fig. 8 of [<NAME> _et al_. (2021)](https://doi.org/10.1101/2021.03.25.437091), which shows that multi-layered neural networks containing PRC neurons can be trained to recognize sounds and generalize well to new examples.
#
# To reproduce these findings, first run `heidelberg.py` to train several networks of PRC neurons.
# +
import os
from copy import deepcopy
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.gridspec as gs
from matplotlib.patheffects import Normal, Stroke
import numpy as np
import seaborn as sns
from ezephys import pltools
import heidelberg
# -
# ## Inspect Heidelberg spoken digits data
#
# The dataset we'll use to evaluate the generalization performance of networks containing PRC neurons is the [Heidelberg digits dataset](https://doi.org/10.1109/TNNLS.2020.3044364).
# +
digits = heidelberg.Data(
os.path.join('..', 'data', 'shd_train.h5'),
os.path.join('..', 'data', 'shd_test.h5'),
)
def get_minibatch():
with digits as d:
for x, y in heidelberg.sparse_data_generator_from_hdf5_spikes(
d.x_train,
d.y_train,
heidelberg.SWEEP_DURATION,
shuffle=False
):
break
return x, y
# -
x, y = get_minibatch()
# +
def tensor_to_events(tensor):
return [np.nonzero(tensor[i, :]).flatten() for i in range(tensor.shape[0])]
def savefig(fname, **pltargs):
plt.savefig(fname + '.png', dpi=300, bbox_inches='tight', **pltargs)
plt.savefig(fname + '.svg', dpi=300, bbox_inches='tight', **pltargs)
# -
numbers = (
'zero', 'one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight', 'nine', 'ten',
'eleven', 'twelve', 'thirteen', 'fourteen', 'fifteen', 'sixteen', 'seventeen', 'eighteen', 'nineteen'
)
# +
plt.figure(figsize=(3, 1.7), dpi=120)
for i, example in enumerate([14, 221, 53]):
plt.subplot(1, 3, i+1)
plt.title(f'\"{numbers[y[example]].capitalize()}\"')
plt.eventplot(tensor_to_events(x.to_dense()[example, ...].T), color='k')
if i == 0:
plt.yticks([0, 350, 700])
plt.ylabel('Input units')
pltools.hide_border('trb', trim=True)
else:
pltools.hide_border()
plt.tight_layout()
savefig(os.path.join('..', 'data', 'heidelberg_examples'))
plt.show()
# -
# ## Load training set performance
# +
DATA_FILE_PREFIX = os.path.join('..', 'data', 'heidelberg_training_results_')
performance = []
for i in range(3):
single_seed_performance = pd.read_csv(DATA_FILE_PREFIX + str(i) + '.csv')
single_seed_performance['seed'] = i
performance.append(single_seed_performance)
performance = pd.concat(performance).reset_index(drop=True)
# -
performance.head()
performance.query('epoch == 199').groupby(['model_name']).mean()
# ## Load test set performance
# +
DATA_FILE_PREFIX = os.path.join('..', 'data', 'heidelberg_test_accuracy_')
test = []
for i in range(3):
single_seed_performance = pd.read_json(DATA_FILE_PREFIX + str(i) + '.json', orient='index')
single_seed_performance['seed'] = i
test.append(single_seed_performance)
test = pd.concat(test)
test['model_name'] = test.index
test.reset_index(drop=True, inplace=True)
test = test.melt(['model_name', 'seed'], ['initial', 'final'], var_name='time', value_name='accuracy')
test.head()
# -
test.groupby(['model_name', 'time']).mean()
# ## Extract performance before and after training
#
# Used for a swarmplot.
# +
train_plot_data = performance.query('epoch in [0, 199]').copy()
train_plot_data.loc[:, 'Epoch'] = train_plot_data.epoch.astype(str)
train_plot_data.replace('0', 'Before training', inplace=True)
train_plot_data.replace('199', 'After training', inplace=True)
test_plot_data = test.query('time == \"final\"').copy()
test_plot_data.rename(columns={'time': 'Epoch'}, inplace=True)
test_plot_data.replace('final', 'Test data', inplace=True)
swarmplot_data = pd.concat([train_plot_data, test_plot_data])
del train_plot_data, test_plot_data
swarmplot_data.reset_index(inplace=True, drop=True)
swarmplot_data.head()
# -
# ## Prepare the figure
palette = {
'One compartment': 'gray',
'No BAP': 'xkcd:ocean',
'BAP': 'xkcd:cherry',
'Parallel subunits, no BAP': 'xkcd:iris',
'Parallel subunits + BAP (full PRC model)': 'xkcd:blood orange'
}
# +
def performance_lineplot(model_name, metric_loss=True, ax=None, **pltargs):
if ax is None:
ax = plt.gca()
if metric_loss:
metric = 'loss'
else:
metric = 'accuracy'
this_model = performance.query('model_name == @model_name')
if 'label' not in pltargs:
pltargs['label'] = model_name
for seed in this_model.seed.unique():
this_seed = this_model.query('seed == @seed')
ax.plot(
this_seed.epoch,
this_seed[metric],
label=pltargs.pop('label', None),
**pltargs
)
def performance_bandplot(model_name, metric_loss=True, ax=None, **pltargs):
if ax is None:
ax = plt.gca()
if metric_loss:
metric = 'loss'
else:
metric = 'accuracy'
alpha = min(pltargs.pop('alpha', 1), 0.8)
this_model = performance.query('model_name == @model_name').sort_values(
'epoch'
)
if 'label' not in pltargs:
pltargs['label'] = model_name
this_mean = (
this_model.groupby(['model_name', 'epoch']).mean().sort_values('epoch')
)
label = pltargs.pop('label', None)
maincolor = pltargs.pop('color', None)
ax.plot(
this_mean.index.get_level_values('epoch'),
this_mean[metric],
color=maincolor,
label=label,
**pltargs
)
# +
plt.figure(figsize=(4, 2), dpi=120)
plt.subplot(121)
plt.axhline(0.05, color='k', ls='--', dashes=(10, 5), lw=0.7, zorder=-1)
for model_name in palette:
performance_bandplot(
model_name, metric_loss=False,
color=palette[model_name], alpha=0.4, clip_on=False
)
#plt.legend(loc='upper left', bbox_to_anchor=(1, 1))
plt.ylim(0, 1)
plt.ylabel('Accuracy')
plt.xticks([0, 100, 200])
plt.xlabel('Epoch')
pltools.hide_border('tr')
plt.subplot(122)
plt.axhline(0.05, color='k', ls='--', dashes=(10, 5), lw=0.7, zorder=-1)
plt.subplots_adjust(wspace=0.2)
sns.swarmplot(
x='model_name',
y='accuracy',
hue='Epoch',
clip_on=False,
data=swarmplot_data,
)
plt.legend(title='', loc='upper left', bbox_to_anchor=(1, 1))
plt.ylim(0, 1)
plt.gca().set_yticklabels([])
plt.ylabel('')
plt.xticks(rotation=45, ha='right')
pltools.hide_border('tr')
savefig(os.path.join('..', 'data', 'heidelberg_performance_comparison'))
plt.show()
# -
| training/heidelberg-results.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/rizavelioglu/hateful_memes-hate_detectron/blob/main/notebooks/%5BGitHub%5Dbenchmarks.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + [markdown] id="kE9_rJZfF_Rf"
# ## **Install MMF**
# + id="CtyIgblgvdoY"
# !git clone https://github.com/facebookresearch/mmf.git
# + id="2eVZPra-wMgt"
import os
os.chdir("mmf")
# !pip install --editable .
# + [markdown] id="FyX6Qos3Olyg"
# ---
# ## **Download the dataset (Phase2) & convert it into *MMF* format**
# + id="pruHlZCZht3p" cellView="form"
#@markdown ---
#@title <h1><b><font color='red'> --Action required!-- </b></font></h1> { run: "auto" }
#@markdown First, please specify the download link and the `.zip` password which both can be taken from [DrivenData](https://www.drivendata.org/competitions/70/hateful-memes-phase-2/data/)
YOUR_LINK_TO_DOWNLOAD_PHASE2_DATA = '' #@param {type:"string"}
PASSWORD_OF_ZIP = '' #@param {type:"string"}
# + id="XutqDqeyJrNm"
# !wget -O XjiOc5ycDBRRNwbhRlgH.zip --no-check-certificate --no-proxy "$YOUR_LINK_TO_DOWNLOAD_PHASE2_DATA"
# + id="aDzGAepgJrNp"
# !mmf_convert_hm --zip_file="XjiOc5ycDBRRNwbhRlgH.zip" --password=$PASSWORD_OF_ZIP --bypass_checksum 1
# + [markdown] id="dQfmpANWZO7i"
# ---
# ## **Finetuning from a pretrained model & Generating Submission for the Challenge**
# https://mmf.sh/docs/tutorials/checkpointing/
#
#
# > **Example**
# https://github.com/apsdehal/hm_example_mmf
#
#
# After we trained the model and evaluated on the validation set, we will generate the predictions on the test set. The prediction file should contain the following three columns:
#
# - Meme identification number, id
# - Probability that the meme is hateful, proba
# - Binary label that the meme is hateful (1) or non-hateful (0), label
#
# > With MMF you can directly generate the predictions in the required submission format with the following command:
#
# **Note**: This command will output where the generated predictions csv file is stored.
# + id="8phSchWH9FN7"
# Free up the disk by removing .zip, .tar files
# !rm -rf /root/.cache/torch/mmf/data/datasets/hateful_memes/defaults/features/features.tar.gz
# !rm -rf /root/.cache/torch/mmf/data/datasets/hateful_memes/defaults/images/XjiOc5ycDBRRNwbhRlgH.zip
# !rm -rf /content/mmf/XjiOc5ycDBRRNwbhRlgH.zip
# !rm -rf /root/.cache/torch/mmf/data/datasets/hateful_memes/defaults/extras.tar.gz
# + [markdown] id="OvN397kXT-S5"
# ### <font color='magenta'> <b> VisualBERT </b> </font>
# + id="9M1a1hRZNgHB"
import os
os.chdir("/content")
# !mmf_run config="projects/visual_bert/configs/hateful_memes/from_coco.yaml" \
# model="visual_bert" \
# dataset=hateful_memes \
# run_type=train_val \
# training.batch_size=32 \
# training.tensorboard=True \
# env.tensorboard_logdir="logs/fit/" \
# training.checkpoint_interval=100 \
# training.evaluation_interval=100 \
# checkpoint.max_to_keep=1 \
# training.max_updates=3000 \
# training.log_interval=100 \
# checkpoint.resume_zoo=visual_bert.pretrained.coco.fifty_pc \
# training.lr_ratio=0.3 \
# dataset_config.hateful_memes.annotations.train[0]="hateful_memes/defaults/annotations/train.jsonl" \
# dataset_config.hateful_memes.annotations.val[0]="hateful_memes/defaults/annotations/dev_unseen.jsonl" \
# dataset_config.hateful_memes.annotations.test[0]="hateful_memes/defaults/annotations/test_unseen.jsonl" \
# + [markdown] id="w5BfCb_YdeX4"
# ##### **Visualize losses/accuracy via Tensorboard**
# + id="jeFuYAVzI_Nx"
# Load the TensorBoard notebook extension
# %load_ext tensorboard
# + id="OVB4eQ1gOw4j"
# %tensorboard --logdir logs/fit
# + [markdown] id="82U--UJSAJbI"
# ##### **Running validation using the trained model**
# + id="0Q2jIOaqANR2"
# !mmf_run config="projects/visual_bert/configs/hateful_memes/defaults.yaml" \
# model="visual_bert" \
# dataset=hateful_memes \
# run_type=val \
# checkpoint.resume_file="/content/save/best.ckpt" \
# dataset_config.hateful_memes.annotations.test[0]="hateful_memes/defaults/annotations/test_unseen.jsonl" \
# dataset_config.hateful_memes.annotations.val[0]="hateful_memes/defaults/annotations/dev_unseen.jsonl" \
# dataset_config.hateful_memes.features.train[0]="/content/features" \
# dataset_config.hateful_memes.features.val[0]="/content/features" \
# dataset_config.hateful_memes.features.test[0]="/content/features" \
# + [markdown] id="CBhK1UPCcLCd"
# ##### **Generate predictions for the Challenge**
# + id="XmaeQY_GcS93"
# !mmf_predict config="projects/visual_bert/configs/hateful_memes/defaults.yaml" \
# model="visual_bert" \
# dataset=hateful_memes \
# run_type=test \
# checkpoint.resume_file="/content/mmf/save/best.ckpt"
# + [markdown] id="btDZNM5jhUbk"
# ---
# ### <font color='magenta'> <b> ViLBERT </b> </font>
# + id="qq7UMTzRd7TW"
"""
https://github.com/facebookresearch/mmf/tree/master/projects/pretrain_vl_right
Pretrained Model: Pretrained Key
--------------------------------
Masked COCO 100% : vilbert.pretrained.coco
Masked VQA2 100% : vilbert.pretrained.vqa2
Masked CC 100% : vilbert.pretrained.cc.full
"""
# !export OC_DISABLE_DOT_ACCESS_WARNING=1
# !mmf_run config="projects/vilbert/configs/hateful_memes/from_cc.yaml" \
# model="vilbert" \
# dataset=hateful_memes \
# run_type=train_val \
# training.batch_size=32 \
# training.tensorboard=True \
# env.tensorboard_logdir="logs/fit/" \
# training.checkpoint_interval=500 \
# training.evaluation_interval=200 \
# checkpoint.max_to_keep=1 \
# training.max_updates=10000 \
# training.log_interval=50 \
# checkpoint.resume_zoo=vilbert.pretrained.cc.full \
# training.lr_ratio=0.3 \
# + [markdown] id="DLckFL32OylY"
# ##### **Visualize losses/accuracy via Tensorboard**
# + id="CMKeQv3gOylq"
# Load the TensorBoard notebook extension
# %load_ext tensorboard
# + id="xjBlELRrOylz"
# %tensorboard --logdir logs/fit
# + [markdown] id="FAUlE_OrUNQf"
# ---
# ### <font color='magenta'> <b> MMBT </b> </font>
# + id="i2OcnOjpbffp"
"""
To train MMBT model with Faster RCNN region features on the Hateful Memes dataset, run the following command
"""
# To train MMBT model with grid features on the Hateful Memes dataset, run the following command
# mmf_run config=projects/mmbt/configs/hateful_memes/defaults.yaml run_type=train_val dataset=hateful_memes model=mmbt
# !mmf_run config="projects/mmbt/configs/hateful_memes/with_features.yaml" \
# model="mmbt" \
# dataset=hateful_memes \
# run_type=train_val \
# training.batch_size=32 \
# training.tensorboard=True \
# env.tensorboard_logdir="logs/fit/" \
# training.checkpoint_interval=2000 \
# training.evaluation_interval=2000 \
# + [markdown] id="HpskfGOMdoks"
# ##### **Visualize losses/accuracy via Tensorboard**
# + id="bfD8smLPbn1q"
# Load the TensorBoard notebook extension
# %load_ext tensorboard
# + id="tnndNj19boMO"
# %tensorboard --logdir logs/fit
# + [markdown] id="GJlMuZ_tbvlc"
# ##### **Running validation using the trained model**
# + id="YJJ0WWXybvle"
# !mmf_run config="projects/mmbt/configs/hateful_memes/with_features.yaml" \
# model="mmbt" \
# dataset=hateful_memes \
# run_type=val \
# checkpoint.resume_file="/content/mmf/save/best.ckpt"
# + [markdown] id="SyR4Q5blcnkQ"
# ##### **Generate predictions for the Challenge**
# + id="Fa4VVVvNAZKq"
# !mmf_predict config="projects/mmbt/configs/hateful_memes/with_features.yaml" \
# model="mmbt" \
# dataset=hateful_memes \
# run_type=test \
# checkpoint.resume_file="/content/mmf/save/best.ckpt"
# + [markdown] id="ApuIb9gThV2f"
# ---
# ### <font color='magenta'> <b> ConcatBERT </b> </font>
# + id="ZatY_Cm5uuIz"
import os
os.chdir("/content")
# !mmf_run config="projects/others/concat_bert/hateful_memes/defaults.yaml" \
# model="concat_bert" \
# dataset=hateful_memes \
# run_type=train_val \
# training.batch_size=32 \
# training.tensorboard=True \
# env.tensorboard_logdir="logs/fit/" \
# training.checkpoint_interval=100 \
# training.evaluation_interval=100 \
# checkpoint.max_to_keep=1 \
# training.max_updates=3000 \
# training.log_interval=100 \
# checkpoint.resume_zoo=visual_bert.pretrained.cc.full \
# training.lr_ratio=0.3 \
# dataset_config.hateful_memes.annotations.train[0]="hateful_memes/defaults/annotations/train.jsonl" \
# dataset_config.hateful_memes.annotations.val[0]="hateful_memes/defaults/annotations/dev_unseen.jsonl" \
# dataset_config.hateful_memes.annotations.test[0]="hateful_memes/defaults/annotations/test_unseen.jsonl" \
# + [markdown] id="qDk4s7aohs6w"
# ---
# ### <font color='magenta'> <b> ConcatBOW </b> </font>
# + id="lN-fm7LBhtEL"
# !mmf_run config="projects/others/concat_bow/hateful_memes/defaults.yaml" \
# model="concat_bow" \
# dataset=hateful_memes \
# run_type=train_val \
# training.batch_size=32 \
# + [markdown] id="xV-sj_wdhwCK"
# ---
# ### <font color='magenta'> <b> Late Fusion </b> </font>
# + id="MqIcPT2rhwMw"
# !mmf_run config="projects/others/late_fusion/hateful_memes/defaults.yaml" \
# model="late_fusion" \
# dataset=hateful_memes \
# run_type=train_val \
# training.batch_size=32 \
# + [markdown] id="jwcXrl0jh51I"
# ---
# ### <font color='magenta'> <b> CNN-LSTM </b> </font>
# + id="0JL7SFdQh58p"
# !mmf_run config="projects/others/cnn_lstm/hateful_memes/defaults.yaml" \
# model="cnn_lstm" \
# dataset=hateful_memes \
# run_type=train_val \
# training.batch_size=32 \
| notebooks/[GitHub]benchmarks.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Занятие 6. Линейная регрессия
# ===========
import pandas as pd
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
matplotlib.style.use('ggplot')
# %matplotlib inline
# ### Предсказание цены домов в Альбукерке
df = pd.read_csv('Albuquerque Home Prices.txt', sep='\t')
df = df.replace(-9999, np.nan)
df.head()
# Прежде, чем строить модель, нужно решить вопрос с пропущенными значениями. Сначала посчитаем, сколько строчек их содержат.
print 'Rows in the data frame: {0}'.format(len(df))
print 'Rows without NAN: {0}'.format(len(df.dropna(how='any')))
# Таким образом, около половины наших данных содержат хотя бы одно пропущенное значение. Учитывая то, что данных относительно мало, выбрасывать все такие строчки, было бы слишком расточительно.
#
# Попробуем копнуть глубже и посмотреть на распределение пропущенных значений по колонкам.
#
# Функция ```DataFrame.apply``` применяет указанную функцию ко всей таблице. Первый аргумент -- применяемая функция, второй аргумент -- направление её применения (0 -- применить к колонкам, 1 -- ко строчкам).
df.apply(lambda x: sum(x.isnull()), axis=0)
# Для определения функции здесь использовано так называемое лямбда-выражение (лямбда-функция). Это способ записать короткую функцию в одну строчку, не задавая её имени. В данном случае такое лямбда-выражение эквивалентно следующему определению функции:
#
# `
# def f(x):
# return sum(x.isnull())
# `
# Таким образом, видим, что источниками пропущенных значений являются колонки ```AGE``` и ```TAX```. Раз в колонке ```AGE``` так много пропусков, то проще её будет убрать из анализа, чем пытаться заделать эти дырки.
del df['AGE']
# А колонку ```TAX``` можно попробовать починить. Чтобы это сделать, сначала посмотрим, как выглядит распределение этой колонки.
df['TAX'].hist()
# Раз распределение похоже на нормальное, то можно заменить пропуски на среднее значение этой колонки. Для этого в pandas есть функция ```Series.fillna```, которая заменяет все пропущенные в колонке значения на заданное.
df['TAX'] = df['TAX'].fillna(df['TAX'].mean())
# Убедимся, что все проблемы с пропусками устранены. Длина таблицы с пропусками равна длине исходного датасета.
len(df.dropna())
# #### Построение модели
# Теперь можно переходить непосредственно к построению модели.
from sklearn.linear_model import LinearRegression
X = df.drop('PRICE', axis=1)
y = df['PRICE']
model = LinearRegression()
model.fit(X, y)
# Считаем качество модели (коэффициент $R^2$).
print 'R^2: {0}'.format(model.score(X, y))
# Выведем регрессионные коэффициенты от метода ```model.coef_``` и свободный член от метода ```model.intercept_```.
coef = pd.DataFrame(zip(['intercept'] + X.columns.tolist(), [model.intercept_] + model.coef_.tolist()),
columns=['predictor', 'coef'])
coef
# К сожалению, в ```sklearn``` не предусмотрена процедура определения статистической значимости регрессионных коэффициентов. Поэтому нужно это делать либо руками, вооружившись знанием о распределении коэффициентов, либо воспользовавшись моделью из пакета ```statsmodels```.
# +
from scipy import stats
def regression_coef(model, X, y):
coef = pd.DataFrame(zip(['intercept'] + X.columns.tolist(), [model.intercept_] + model.coef_.tolist()),
columns=['predictor', 'coef'])
X1 = np.append(np.ones((len(X),1)), X, axis=1)
b = np.append(model.intercept_, model.coef_)
MSE = np.sum((model.predict(X) - y) ** 2, axis=0) / float(X.shape[0] - X.shape[1])
var_b = MSE * (np.linalg.inv(np.dot(X1.T, X1)).diagonal())
sd_b = np.sqrt(var_b)
t = b / sd_b
coef['pvalue'] = [2 * (1 - stats.t.cdf(np.abs(i), (len(X1) - 1))) for i in t]
return coef
regression_coef(model, X, y)
# -
# Как интерпретировать модель? У нас есть 3 значимых предиктора: ```SQFT```, ```CUST``` и ```TAX```. Значение коэффициента говорит, на сколько вырастет значение сигнала при увеличении предиктора на одну единицу. Смысл первых двух коэффициентов понятен: площадь и специальный вид постройки ожидаемо влияют на цену. Но есть ли смысл включать в предсказание цены дома налог с этого же дома? Ведь налог, как и цена, также начисляется исходя из похожих предикторов. Они должны сильно коррелировать. И действительно. Построим таблицу парных корреляций предикторов.
df.corr()
# Налоги сильно коррелируют с площадью. Предлагается откинуть налоги.
X = df[['SQFT', 'CUST']]
model.fit(X, y)
print model.score(X, y)
regression_coef(model, X, y)
# ### Полиномиальная регрессия
df = pd.read_csv('diamond.dat', header=None, sep='\s+', names=['weight', 'price'])
df.head()
# Будем предсказывать цену кольца с бриллиантом от веса бриллианта. Попробуем предсказать результат не только с помощью самой переменной, но и с помощью её степеней. Первая модель будет зависеть только от самой переменной ```weight```, вторая -- от переменной ```weight^2```, а третья -- от двух этих переменных сразу.
#
# В этих случаях, когда мы хотим попробовать в качестве фичей их степени и произведения фич, в ```sklearn.preprocessing``` есть класс ```PolynomialFeatures```. Метод ```fit_transform``` этого класса сгенерирует из данного множества фич множество одночленов заданной степени. Например, для степени 2 и фич ```a```, ```b``` будут сгенерированы фичи ```[a, b, a^2, b^2, ab]```, а при указанном параметре ```include_bias=True``` ещё и вектор-свободный член из единиц. Для данной задачи, конечно, проще было сгенерить один столбец из квадратов значений колонки ```weight```. Но нам важно ознакомиться с классом ```PolynomialFeatures``` для дальнейшего применения.
from sklearn.preprocessing import PolynomialFeatures
poly = PolynomialFeatures(
# Максимальная степень
degree=2,
# Не генерировать свободный член
include_bias=False)
y = df['price']
X0 = poly.fit_transform(df[['weight']])
X0 = pd.DataFrame(X0, columns=['weight', 'weight^2'])
X0.head()
X0 = [
# Одна оригинальная переменная weight
X0[['weight']],
# Одна переменная weight^2
X0[['weight^2']],
# Две переменных weight и weight^2
X0.copy()]
models = [LinearRegression() for _ in X0]
for X, model in zip(X0, models):
model.fit(X, y)
print model.score(X, y)
# Получилось, что $R^2$ во всех моделях примерно одинаков. Однако не все они одинаково полезны.
regression_coef(models[0], X0[0], y)
regression_coef(models[1], X0[1], y)
regression_coef(models[2], X0[2], y)
import statsmodels.api as sm
X2 = sm.add_constant(X0[2])
est = sm.OLS(y, X2)
est2 = est.fit()
print(est2.summary())
# ### Прогнозирование временных рядов.
df = pd.read_csv('series_g.csv', sep=';')
df.head()
# Преобразуем строчки с датами в объект datetime
# format='%b %Y' означает, что в нашей строчке сначала идёт трёхбуквенное название месяца (%b), а затем год (%Y)
df['date'] = pd.to_datetime(df['date'], format='%b %Y')
# Построим графики объёма пассажироперевозок и проверим, какой тип тренда (линейный или нет) и какой тип сезонности ( аддитивный или мультипликативный), наблюдается. По первому графику уже прослеживается линейный тренд и мультипликативная сезонность. Но чтобы окончательно убедиться в последнем, добавим график логарифма от этой же величины. После логирафмирования циклы стали одинаковой высоты, а это и говорит о мультипликативном харакрете сезонности.
# +
fig = plt.figure(figsize=(12, 4))
ax1 = fig.add_subplot(121)
df['series_g'].plot(ax=ax1)
ax1.set_title(u'Объём пассажироперевозок')
ax1.set_ylabel(u'Тысяч человек')
ax2 = fig.add_subplot(122)
pd.Series(np.log10(df['series_g'])).plot(ax=ax2)
ax2.set_title(u'log10 от объёма пассажироперевозок')
ax2.set_ylabel(u'log10 от тысяч человек')
# -
# Вывод: будем строить модель линейной регрессии для приближения логарифма от объёма перевозок. То есть
# $$\log y_i = \beta x_i + c(x_i) + \varepsilon_i,$$
# где $y_i$ -- объём перевозок, $x_i$ -- порядковый номер месяца, $c(x_i)$ -- сезонная составляющая, $\varepsilon_i$ -- случайный шум.
# Для удобства дальнейшего использования создадим дополнительно 12 новых месяцев для построения прогноза на них. Для этого создадим эти 12 новых дат с помощью функции ```pd.date_range```. Данный объект будет объектом класса ```DateTimeIndex``` (наследованный от класса ```Index```), и чтобы объединить их с колонкой ```df['date']```, принадлежащей классу ```datetime64```, придётся привести последнюю к классу ```Index```. Объединим два набора дат и сохраним их в объекте ```new_dates```.
#
# Далее создадим фиктивный датафрейм ```df2```, состоящий из одной колонки с этими новыми датами, и приклеим его к исходному датафрейму ```df``` с помозью функции ```pd.merge```. Эта функция склеивает два датасета по указанному набору колонок (параметр ```on```) и по указанному правилу склейки (параметр ```how```). В ```on``` указываем одну общую колонку ```date```, по которой нужно произвести склейку. В ```how``` указываем ```right```, что означает следующее:
# возьми весь правый датасет и приклей к нему левый датасет по условию совпадения значений колонки ```on```, а в случае если для значений из правой колонки ```on``` не найдётся соотвествующих значений в левой колонке ```on```, то тогда приклей ```NaN``` значения. Вообще говоря, опция ```how``` соответствует опциям ```JOIN``` в языке SQL (```LEFT JOIN```, ```RIGHT_JOIN```, ```INNER JOIN```, ```OUTER_JOIN```).
# Создаём последовательсть месяцев. freq='MS' означает первое число каждого месяца из указанного диапазона дат
new_dates = pd.date_range('1961-01-01', '1961-12-01', freq='MS')
# Приводим df['date'] к типу Index, объединяем с 12 месяцами, полученными на предыдущем шаге
new_dates = pd.Index(df['date']) | new_dates
# Создаём датафрейм из одной колонки с расширенным набором дат
df2 = pd.DataFrame({'date': new_dates})
# Объединяем два датафрейма по колонке 'date'.
df = pd.merge(df, df2, on='date', how='right')
# Создадим регрессионную переменную ```month_num``` -- порядковый номер пары (месяц, год). И прологарифмируем таргет.
df['month_num'] = range(1, len(df) + 1)
df['log_y'] = np.log10(df['series_g'])
# Создадим 12 колонок ```season_1```, ```season_2```, ..., ```season_12```, в которые поместим индикаторы соответствующего месяца. Чтобы достать порядковый номер месяца в каждой строчке, применим последовательно пару методов ```dt``` и ```month``` к колонке ```df['date']```. Внутри цикла будем проверять, равен ли очередной месяц текущему значению из цикла.
for x in xrange(1, 13):
df['season_' + str(x)] = df['date'].dt.month == x
# Правда, для устранения линейной зависимости между колонками, один из сезонных индикаторов придётся исключить. Пусть базовым месяцем будет январь.
# +
# xrange(2, 13) соответствует всем месяцам с февраля по декабрь
season_columns = ['season_' + str(x) for x in xrange(2, 13)]
# Создадим объекты матрицу X и вектор y для обучения модели
X = df[['month_num'] + season_columns]
y = df['log_y']
# Оставим только те строчки, у которых известны значения y (с номером < 144)
X1 = X[X.index < 144]
y1 = y[y.index < 144]
# -
# Настраиваем линейную регрессионную модель.
model = LinearRegression()
model.fit(X1, y1)
pred = pd.DataFrame({
'pred': model.predict(X1),
'real': y1})
pred.plot()
# Теперь построим предсказание для всей матрицы ```X```, включая неизвестные 12 месяцев.
pred = pd.DataFrame({
'pred': model.predict(X),
'real': y})
pred.plot()
| ML/Datasets/linear_regression.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
plt.rcParams['axes.facecolor'] = 'lightgray'
sns.set(style="darkgrid")
np.set_printoptions(precision=3)
# +
def boxplot_all_methods(plt_handle, res_all, title='', names=[], color=[]):
res_all_df = pd.DataFrame(res_all.T)
res_all_df.columns = names
res_all_df_melt = res_all_df.melt(var_name='methods', value_name='accuracy')
res_all_mean = np.mean(res_all, axis=1)
# print(res_all_df_melt)
print(res_all_df.shape, res_all_mean.shape, res_all_df_melt.shape)
# plt_handle.set_title(title, fontsize=15)
plt_handle.axhline(res_all_mean[2], ls='--', color='b')
plt_handle.axhline(res_all_mean[1], ls='--', color='r')
ax = sns.boxplot(x="methods", y="accuracy", data=res_all_df_melt, palette=color, ax=plt_handle)
ax.set_xticklabels(ax.get_xticklabels(), rotation=-60, ha='left', fontsize=15)
ax.tick_params(labelsize=15)
ax.yaxis.grid(False) # Hide the horizontal gridlines
ax.xaxis.grid(True) # Show the vertical gridlines
ax.set_xlabel("methods")
ax.set_ylabel("accuracy")
ax.set_xlabel("")
ax.set_ylabel("Accuracy (%)", fontsize=15)
# -
# perturb = 'whitepatch'
perturb = 'rotation'
M = 5
subset_prop = 0.2
lamL2 = 0.
lamL1 = 0.
lr = 1e-4
epochs= 100
# +
names_short = ['Original', "Tar", "Src[1]",
'DIP[1]', 'DIPweigh', 'CIP', 'CIRMweigh',
'DIP[1]-MMD', 'DIPweigh-MMD', 'CIP-MMD', 'CIRMweigh-MMD']
prefix_template = 'results_MNIST/report_v8_%s_M%d_subsetprop%s_%s_lamMatch%s_lamCIP%s_lamMatchMMD%s_lamCIPMMD%s_epochs%d_seed%d'
# -
repeats = 10
nb_ba = 3 # Original, Tar, Src[1]
results_src_ba = np.zeros((M-1, nb_ba, 2, 10))
results_tar_ba = np.zeros((nb_ba, 2, 10))
for seed in range(repeats):
savefilename_prefix = prefix_template % (perturb,
M, str(subset_prop), 'baseline', 1., 0.1, 1., 0.1, epochs, seed)
res = np.load("%s.npy" %savefilename_prefix, allow_pickle=True)
results_src_ba[:, :, :, seed] =res.item()['src']
results_tar_ba[:, :, seed] = res.item()['tar']
lamMatches = [10.**(k) for k in (np.arange(10)-5)]
# DAmean methods: DIP, DIPOracle, DIPweigh, CIRMweigh
nb_methods_damean = 4
repeats = 10
results_src_damean = np.zeros((len(lamMatches), M-1, nb_methods_damean, 2, 10))
results_tar_damean = np.zeros((len(lamMatches), nb_methods_damean, 2, 10))
for i, lam in enumerate(lamMatches):
for seed in range(repeats):
savefilename_prefix = prefix_template % (perturb,
M, str(subset_prop), 'DAmean', lam, 10., lam, 10., epochs, seed)
res = np.load("%s.npy" %savefilename_prefix, allow_pickle=True)
results_src_damean[i, :, :, :, seed] =res.item()['src']
results_tar_damean[i, :, :, seed] = res.item()['tar']
# choose lambda based on the source test performance
lam_index_damean = np.zeros(nb_methods_damean, dtype=int)
for i in range(nb_methods_damean):
if i == 0 or i == 1:
src_test_acc_all = results_src_damean[:, 0, i, 1, :].mean(axis=1)
else:
# M-2 for the source environment that is selected by weighting methods
src_test_acc_all = results_src_damean[:, M-2, i, 1, :].mean(axis=1)
# choose the largest lambda such that the source performance does not drop too much (5%)
lam_index = 0
for k, src_test_acc in enumerate(src_test_acc_all):
if src_test_acc > np.max(src_test_acc_all) * 0.99:
lam_index = k
lam_index_damean[i] = lam_index
print(lam_index, lamMatches[lam_index])
# DAMMD methods: DIP-MMD, DIPweigh-MMD, CIRMweigh-MMD
nb_methods_dammd = 3
repeats = 10
results_src_dammd = np.zeros((len(lamMatches), M-1, nb_methods_dammd, 2, 10))
results_tar_dammd = np.zeros((len(lamMatches), nb_methods_dammd, 2, 10))
for i, lam in enumerate(lamMatches):
for seed in range(repeats):
savefilename_prefix = prefix_template % (perturb,
M, str(subset_prop), 'DAMMD', lam, 10., lam, 10., epochs, seed)
res = np.load("%s.npy" %savefilename_prefix, allow_pickle=True)
results_src_dammd[i, :, :, :, seed] =res.item()['src']
results_tar_dammd[i, :, :, seed] = res.item()['tar']
# choose lambda based on the source test performance
lam_index_dammd = np.zeros(nb_methods_dammd, dtype=int)
for i in range(nb_methods_dammd):
if i == 0:
src_test_acc_all = results_src_dammd[:, 0, i, 1, :].mean(axis=1)
else:
# M-2 for the source environment that is selected by weighting methods
src_test_acc_all = results_src_dammd[:, M-2, i, 1, :].mean(axis=1)
# choose the largest lambda such that the source performance does not drop too much (5%)
lam_index = 0
for k, src_test_acc in enumerate(src_test_acc_all):
if src_test_acc > np.max(src_test_acc_all) * 0.99:
lam_index = k
lam_index_dammd[i] = lam_index
print(lam_index)
# DACIPmean methods
nb_methods_dacipmean = 1
repeats = 10
results_src_dacipmean = np.zeros((len(lamMatches), M-1, nb_methods_dacipmean, 2, 10))
results_tar_dacipmean = np.zeros((len(lamMatches), nb_methods_dacipmean, 2, 10))
for i, lam in enumerate(lamMatches):
for seed in range(repeats):
savefilename_prefix = prefix_template % (perturb,
M, str(subset_prop), 'DACIPmean', 1., lam, 1., lam, 100, seed)
res = np.load("%s.npy" %savefilename_prefix, allow_pickle=True)
results_src_dacipmean[i, :, :, :, seed] = res.item()['src']
results_tar_dacipmean[i, :, :, seed] = res.item()['tar']
# choose lambda based on the source test performance
lam_index_dacipmean = np.zeros(nb_methods_dacipmean, dtype=int)
for i in range(nb_methods_dacipmean):
src_test_acc_all = results_src_dacipmean[:, :-1, i, 1, :].mean(axis=2).mean(axis=1)
# choose the largest lambda such that the source performance does not drop too much (5%)
lam_index = 0
for k, src_test_acc in enumerate(src_test_acc_all):
if src_test_acc > np.max(src_test_acc_all) * 0.99:
lam_index = k
lam_index_dacipmean[i] = lam_index
print(lam_index, lamMatches[lam_index])
# DACIPMMD methods
nb_methods_dacipmmd = 1
repeats = 10
results_src_dacipmmd = np.zeros((len(lamMatches), M-1, nb_methods_dacipmmd, 2, 10))
results_tar_dacipmmd = np.zeros((len(lamMatches), nb_methods_dacipmmd, 2, 10))
for i, lam in enumerate(lamMatches):
for seed in range(repeats):
savefilename_prefix = prefix_template % (perturb,
M, str(subset_prop), 'DACIPMMD', 1., lam, 1., lam, 100, seed)
res = np.load("%s.npy" %savefilename_prefix, allow_pickle=True)
results_src_dacipmmd[i, :, :, :, seed] = res.item()['src']
results_tar_dacipmmd[i, :, :, seed] = res.item()['tar']
# choose lambda based on the source test performance
lam_index_dacipmmd = np.zeros(nb_methods_dacipmmd, dtype=int)
for i in range(nb_methods_dacipmmd):
src_test_acc_all = results_src_dacipmmd[:, :-1, i, 1, :].mean(axis=2).mean(axis=1)
# choose the largest lambda such that the source performance does not drop too much (5%)
lam_index = 0
for k, src_test_acc in enumerate(src_test_acc_all):
if src_test_acc > np.max(src_test_acc_all) * 0.99:
lam_index = k
lam_index_dacipmmd[i] = lam_index
print(lam_index, lamMatches[lam_index])
lam_index_damean[[0, 2, 3]]
# +
test_err_index = 0
results_tar_plot = np.concatenate((results_tar_ba[:, test_err_index, :],
results_tar_damean[lam_index_damean[0], 0, test_err_index, :].reshape((-1, 10), order='F'),
results_tar_damean[lam_index_damean[2], 2, test_err_index, :].reshape((-1, 10), order='F'),
results_tar_dacipmean[lam_index_dacipmean, 0, test_err_index, :].reshape((-1, 10), order='F'),
results_tar_damean[lam_index_damean[3], 3, test_err_index, :].reshape((-1, 10), order='F'),
results_tar_dammd[lam_index_dammd[0], 0, test_err_index, :].reshape((-1, 10), order='F'),
results_tar_dammd[lam_index_dammd[1], 1, test_err_index, :].reshape((-1, 10), order='F'),
results_tar_dacipmmd[lam_index_dacipmmd, 0, test_err_index, :].reshape((-1, 10), order='F'),
results_tar_dammd[lam_index_dammd[2], 2, test_err_index, :].reshape((-1, 10), order='F')), axis=0)
# -
COLOR_PALETTE1 = sns.color_palette("Set1", 9, desat=1.)
COLOR_PALETTE2 = sns.color_palette("Set1", 9, desat=.7)
COLOR_PALETTE3 = sns.color_palette("Set1", 9, desat=.5)
COLOR_PALETTE4 = sns.color_palette("Set1", 9, desat=.3)
# COLOR_PALETTE2 = sns.color_palette("Dark2", 30)
# COLOR_PALETTE = COLOR_PALETTE1[:8] + COLOR_PALETTE2[:30]
COLOR_PALETTE = [COLOR_PALETTE1[8], COLOR_PALETTE1[0], COLOR_PALETTE1[1],
COLOR_PALETTE1[3], COLOR_PALETTE1[4], COLOR_PALETTE1[7],
COLOR_PALETTE1[6],
COLOR_PALETTE4[3], COLOR_PALETTE4[4], COLOR_PALETTE4[7],
COLOR_PALETTE4[6]]
sns.palplot(COLOR_PALETTE)
# +
# hyperparameter choice plot
fig, axs = plt.subplots(1, 1, figsize=(10,5))
boxplot_all_methods(axs, results_tar_plot*100,
title="", names=names_short,
color=np.array(COLOR_PALETTE)[:len(names_short)])
plt.savefig("paper_figures/MNIST_%s_M5_Yintervention.pdf" %perturb, bbox_inches="tight")
plt.show()
# -
| MNIST/MNIST_read_and_plot_rotation5M.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# + [markdown] slideshow={"slide_type": "slide"}
# <h1>CS4618: Artificial Intelligence I</h1>
# <h1>Neural Network Examples</h1>
# <h2>
# <NAME><br>
# School of Computer Science and Information Technology<br>
# University College Cork
# </h2>
# + [markdown] slideshow={"slide_type": "slide"}
# <h1>Initialization</h1>
# $\newcommand{\Set}[1]{\{#1\}}$
# $\newcommand{\Tuple}[1]{\langle#1\rangle}$
# $\newcommand{\v}[1]{\pmb{#1}}$
# $\newcommand{\cv}[1]{\begin{bmatrix}#1\end{bmatrix}}$
# $\newcommand{\rv}[1]{[#1]}$
# $\DeclareMathOperator{\argmax}{arg\,max}$
# $\DeclareMathOperator{\argmin}{arg\,min}$
# $\DeclareMathOperator{\dist}{dist}$
# $\DeclareMathOperator{\abs}{abs}$
# + slideshow={"slide_type": "-"}
# %load_ext autoreload
# %autoreload 2
# %matplotlib inline
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import accuracy_score
from tensorflow.keras import Model
from tensorflow.keras import Sequential
from tensorflow.keras import Input
from tensorflow.keras.layers.experimental.preprocessing import Rescaling
from tensorflow.keras.layers import Dense
from tensorflow.keras.wrappers.scikit_learn import KerasRegressor
from tensorflow.keras.wrappers.scikit_learn import KerasClassifier
from tensorflow.keras.optimizers import SGD
from sklearn.datasets import load_iris
from tensorflow.keras.datasets.mnist import load_data
# +
# Cork Property Prices Dataset
# Use pandas to read the CSV file into a DataFrame
cork_df = pd.read_csv("../datasets/dataset_corkA.csv")
# Shuffle the dataset
cork_df = cork_df.sample(frac=1, random_state=2)
cork_df.reset_index(drop=True, inplace=True)
# Split off the test set: 20% of the dataset.
dev_cork_df, test_cork_df = train_test_split(cork_df, train_size=0.8, random_state=2)
# The features
cork_features = ["flarea", "bdrms", "bthrms"]
# Create the preprocessor
cork_preprocessor = ColumnTransformer([
("scaler", StandardScaler(), cork_features)],
remainder="passthrough")
# Extract the features but leave as a DataFrame
dev_cork_X = dev_cork_df[cork_features]
test_cork_X = test_cork_df[cork_features]
# Target values, converted to a 1D numpy array
dev_cork_y = dev_cork_df["price"].values
test_cork_y = test_cork_df["price"].values
# +
# CS1109 Dataset
# Use pandas to read the CSV file into a DataFrame
cs1109_df = pd.read_csv("../datasets/dataset_cs1109.csv")
# Shuffle the dataset
cs1109_df = cs1109_df.sample(frac=1, random_state=2)
cs1109_df.reset_index(drop=True, inplace=True)
# Split off the test set: 20% of the dataset. Note the stratification
dev_cs1109_df, test_cs1109_df = train_test_split(cs1109_df, train_size=0.8,
stratify=cs1109_df["outcome"], random_state=2)
# The features
cs1109_features = ["lect", "lab", "cao"]
# Create the preprocessor
cs1109_preprocessor = ColumnTransformer([
("scaler", StandardScaler(), cs1109_features)],
remainder="passthrough")
# Extract the features but leave as a DataFrame
dev_cs1109_X = dev_cs1109_df[cs1109_features]
test_cs1109_X = test_cs1109_df[cs1109_features]
# Target values, encoded and converted to a 1D numpy array
label_encoder = LabelEncoder()
label_encoder.fit(cs1109_df["outcome"])
dev_cs1109_y = label_encoder.transform(dev_cs1109_df["outcome"])
test_cs1109_y = label_encoder.transform(test_cs1109_df["outcome"])
# +
# Iris dataset
# Load the dataset (a dictionary) and get the features DataFrame and target values from the dictionary
iris = load_iris(as_frame=True)
iris_df = iris.data
iris_y = iris.target
# Shuffle the features and the target values in the same way
idx = np.random.permutation(iris_df.index)
iris_df.reindex(idx)
iris_y.reindex(idx)
iris_df.reset_index(drop=True, inplace=True)
iris_y.reset_index(drop=True, inplace=True)
# Split off the test set: 20% of the dataset.
dev_iris_df, test_iris_df, dev_iris_y, test_iris_y = train_test_split(iris_df, iris_y, train_size=0.8,
random_state=4)
# Create the preprocessor
iris_preprocessor = ColumnTransformer([
("scaler", StandardScaler(), iris_df.columns)],
remainder="passthrough")
# + [markdown] slideshow={"slide_type": "slide"}
# <h1>Introduction</h1>
# <ul>
# <li>We'll use layered, dense, feedforward neural networks for regression, binary classification
# and multi-class classification:
# <ul>
# <li>We'll use our three small datasets that contain 'structured' data (sometimes
# called 'tabular' data): not necessarily ideal for deep learning.</li>
# <li>We'll see one example that uses images.</li>
# </ul>
# </li>
# <li>This will illustrate some of the different activation functions we can use:
# <ul>
# <li>in the output layer: linear, sigmoid or softmax; and</li>
# <li>in the hidden layers: sigmoid or ReLU.</li>
# </ul>
# </li>
# <li>This will also introduce the Keras library.</li>
# </ul>
# + [markdown] slideshow={"slide_type": "slide"}
# <h1>The Keras library</h1>
# <ul>
# <li>scikit-learn has very limited support for neural networks.</li>
# <li>There are now many libraries that do support tensor computation, neural neworks and deep learning
# including in Python:
# <ul>
# <li>Tensorflow, PyTorch, Caffe, Theano, CNTK.</li>
# </ul>
# </li>
# <li>We will use Keras, which is a high-level API for Tensorflow, first released in 2015
# by Franç<NAME> of Google (<a href="https://keras.io">https://keras.io</a>), which has done
# a lot to make Deep Learning accessible to people:
# <ul>
# <li>It is very high-level, making it easy to construct networks, fit models and make predictions.</li>
# <li>The downside is it gives less fine-graned control than TensorFlow itself.</li>
# <li>This seems a suitable trade-off for us: our module is about AI, not the intricacies of
# TensorFlow.
# </li>
# </ul>
# </li>
# <li>For simple neural networks, Keras is even compatible with scikit-learn:
# <ul>
# <li>Using wrappers, we can have simple neural networks at the end of our pipelines.</li>
# </ul>
# </li>
# </ul>
# + [markdown] slideshow={"slide_type": "slide"}
# <h1>Keras concepts</h1>
# <ul>
# <li><b>Layers</b> are the building blocks.
# <ul>
# <li>To begin with, we will use <b>dense layers</b>.</li>
# <li>The activation functions of <em>hidden layers</em> are open for you to choose,
# e.g. sigmoid or ReLU.
# </li>
# <li>But the activation functions of <em>output layers</em> are determined by the task:
# <ul>
# <li>Regression: linear activation function (default);</li>
# <li>Binary classification: sigmoid activation function; and</li>
# <li>Multiclass classification: softmax activation function.</li>
# </ul>
# </li>
# </ul>
# </li>
# <li>Layers are combined into <b>networks</b>:
# <ul>
# <li>Consecutive layers must be compatible: the shape of the input to one layer is the shape of
# the output of the preceding layer.
# </li>
# <li>In early lectures, we only consider a stack of layers but Keras allows directed acyclic graphs
# and, later, we will briefly discuss some examples that are not just stacks of layers.
# </li>
# </ul>
# </li>
# <li>Once the network is built, we <b>compile</b> it, specifying:
# <ul>
# <li>A <b>loss function</b>:
# <ul>
# <li>Regression, e.g. mean-squared-error (<code>mse</code>);</li>
# <li>Binary classification, e.g. (binary) cross-entropy (<code>binary_crossentropy</code>);</li>
# <li>Multiclass classification, e.g. (categorical) cross-entropy
# (<code>sparse_categorical_crossentropy</code> if the labels are encoded as
# integer labels
# or <code>categorical_crossentropy</code> if the integer labels are then also one-hot
# encoded).</li>
# </ul>
# </li>
# <li>An <b>optimizer</b>, such as SGD — but see below.</li>
# <li>A list of metrics to monitor during training and testing:
# <ul>
# <li>Regression, e.g. mean-absolute-error (<code>mae</code>);</li>
# <li>Classification, e.g. accuracy (<code>acc</code>).</li>
# </ul>
# </li>
# </ul>
# </li>
# </ul>
# + [markdown] slideshow={"slide_type": "slide"}
# <h1>Keras optimizers</h1>
# <ul>
# <li>We know about Gradient Descent: Batch, Mini-Batch, Stochastic.</li>
# <li>Without going into details, many other variants of Gradient Descent have been devised:
# <ul>
# <li>some may have better convergence behaviour in the case of local minima;</li>
# <li>some may converge more quickly.</li>
# </ul>
# although a disadvantage is that they typically introduce further hyperparameters
# (e.g. momentum) in addition to learning rate.
# </li>
# <li>We'll use SGD below.
# <ul>
# <li>Be aware, the Keras default SGD learning rate is 0.01 — quite high,
# often resulting in divergence, so we must often change it.
# </li>
# <li>Be aware too that, although this optimizer is called SGD, behaviour depends on the
# <code>batch_size</code>. If <code>batch_size</code> is 1, then this is what we earlier
# called Stochastic Gradient Descent; if <code>batch_size</code> is equal to the size
# of the training set
# (excluding the validation set), then we get the equivalent of what we called Batch Gradient
# Descent; and if it is somewhere in between (which, in fact, is the most common way
# of training models in Keras), then it is Mini-Batch Gradient Descent.
# </li>
# </ul>
# </li>
# </ul>
# +
# tensorflow give warnings that explain that tensorflow was originally compiled on a different computer
# architecture from the one you are using. This means its performance may not be optimal.
# It explains that, if you want to optimixe tensorflow for your architecture, then you need to rebuild (recompile)
# tensorflow from scratch. We won't do this!
# + [markdown] slideshow={"slide_type": "slide"}
# <h1>A Neural Network for Regression</h1>
# <ul>
# <li>For regression on structured/tabular data, we might use a network with the following architecture:
# <ul>
# <li>Input layer: one input per feature.</li>
# <li>Hidden layers: one or more hidden layers.
# <ul>
# <li>Activation function for neurons in hidden layers can be the sigmoid function or ReLU.
# </ul>
# </li>
# <li>Output layer: just one output neuron (assuming we're predicting a single number).
# <ul>
# <li>Activation function for the output neuron should be the <b>linear function</b>:
# $g(z) = z$
# </li>
# </ul>
# </li>
# </ul>
# </li>
# <li>(There are also biases in each layer except the output layer — Keras will give us these
# 'for free'.)
# </li>
# </ul>
# + [markdown] slideshow={"slide_type": "slide"}
# <h2>Example: Cork Property Prices</h2>
# <ul>
# <li>We don't want too many hidden layers, nor too many neurons in each hidden layer. Why?</li>
# <li>Let's start with this:
# <ul>
# <li>An input layer with three inputs ($\mathit{flarea}$, $\mathit{bdrms}$,
# $\mathit{bthrms}$);
# </li>
# <li>Two hidden layers, with 64 neurons in each, and ReLU activation function;</li>
# <li>An output layer with a single neuron and linear activation function.</li>
# </ul>
# </li>
# </ul>
# +
inputs = Input(shape=(3,))
x = Dense(64, activation="relu")(inputs)
x = Dense(64, activation="relu")(x)
outputs = Dense(1, activation="linear")(x)
cork_model = Model(inputs, outputs)
cork_model.compile(optimizer=SGD(learning_rate=0.0001), loss="mse")
# -
# <ul>
# <li>Keras has improved its classes for preprocessing of data recently, and even does them as layers
# in the network. E.g. there are layers for scaling and one-hot encoding.
# </li>
# <li>However, when you have structured/tabular data, it remains easiest to preprocess it with scikit-learn.
# There is then a wrapper class that allows us to add a function that returns our neural network to the
# end of a scikit-learn
# pipeline and run our usual scikit-learn methods.
# </li>
# </ul>
# Create a pipeline
cork_pipeline = Pipeline([
("preprocessor", cork_preprocessor),
("predictor", KerasRegressor(build_fn=lambda: cork_model, verbose=0, epochs=60, batch_size=32))
])
# +
# Error estimation
# We'll just train on the dev set and test on the test set
# If you want to use ShuffleSplit, KFold, cross_val_score and so on, then you can - in the usual way.
# But you may get a warning about an inefficiency in your code. We won't worry aboout this warning.
cork_pipeline.fit(dev_cork_X, dev_cork_y)
mean_absolute_error(test_cork_y, cork_pipeline.predict(test_cork_X))
# -
# <ul>
# <li>Feel free to edit the code, e.g. add or remove hidden layers, change the number of neurons in the
# hidden layers, change ReLU to sigmoid, change from SGD to another optimizer, change the learning rate,
# change the number of epochs,
# or change the batch size.
# </li>
# </ul>
# + [markdown] slideshow={"slide_type": "slide"}
# <h1>A Neural Network for Binary Classification</h1>
# <ul>
# <li>For binary classification, we might use a network with the following architecture:
# <ul>
# <li>Input layer: one input per feature.</li>
# <li>Hidden layers: one or more hidden layers.
# <ul>
# <li>Activation function for neurons in hidden layers can be sigmoid or ReLU.</li>
# </ul>
# </li>
# <li>Output layer: just one output neuron (for binary classification).
# <ul>
# <li>Activation function for the output neuron should be the sigmoid function also. Why?</li>
# </ul>
# </li>
# </ul>
# </li>
# </ul>
# + [markdown] slideshow={"slide_type": "slide"}
# <h2>Example: CS1109 Dataset</h2>
# <ul>
# <li>Let's start with this:
# <ul>
# <li>An input layer with 3 inputs ($\mathit{lect}$, $\mathit{lab}$, $\mathit{cao}$).</li>
# <li>Two hidden layers, with 64 neurons in each, and ReLU activation function.</li>
# <li>An output layer with a single neuron and sigmoid activation function.</li>
# </ul>
# </li>
# </ul>
# +
inputs = Input(shape=(3,))
x = Dense(64, activation="relu")(inputs)
x = Dense(64, activation="relu")(x)
outputs = Dense(1, activation="sigmoid")(x)
cs1109_model = Sequential(Model(inputs, outputs)) # For classification, scikit-learn requires me to add Sequential(...)
cs1109_model.compile(optimizer=SGD(learning_rate=0.001), loss="binary_crossentropy")
# -
# Create a pipeline
cs1109_pipeline = Pipeline([
("preprocessor", cs1109_preprocessor),
("estimator", KerasClassifier(build_fn=lambda: cs1109_model, verbose=0, epochs=60, batch_size=32))
])
# +
# Accuracy estimation
cs1109_pipeline.fit(dev_cs1109_X, dev_cs1109_y)
# You may get a warning because scikit-learn is using tensorflow in a deprecated way
accuracy_score(test_cs1109_y, cs1109_pipeline.predict(test_cs1109_X))
# + [markdown] slideshow={"slide_type": "slide"}
# <h1>A Neural Network for Multi-Class Classification</h1>
# <ul>
# <li>For multi-class classification, we might use a network with the following architecture:
# <ul>
# <li>Input layer: one input per feature.</li>
# <li>Hidden layers: one or more hidden layers.
# <ul>
# <li>Activation function for neurons in hidden layers can be sigmoid or ReLU.</li>
# </ul>
# </li>
# <li>Output layer: one output neuron per class.
# <ul>
# <li>Activation function for the output neurons should be the softmax function.</li>
# </ul>
# </li>
# </ul>
# </li>
# </ul>
# + [markdown] slideshow={"slide_type": "slide"}
# <h2>Example: Iris Dataset</h2>
# <ul>
# <li>Let's start with this:
# <ul>
# <li>An input layer with 4 inputs (petal width and length, and sepal width and length).</li>
# <li>Two hidden layers, with 64 neurons in each, and ReLU activation function.</li>
# <li>An output layer with three neurons (one for Setosa, Versicolor and Virginica) and
# softmax activation function.
# </li>
# </ul>
# </li>
# </ul>
# +
inputs = Input(shape=(4,))
x = Dense(64, activation="relu")(inputs)
x = Dense(64, activation="relu")(x)
outputs = Dense(3, activation="softmax")(x)
iris_model = Sequential(Model(inputs, outputs))
iris_model.compile(optimizer=SGD(learning_rate=0.01), loss="sparse_categorical_crossentropy")
# -
# <ul>
# <li>Note the loss function above:
# <ul>
# <li><code>sparse_categorical_crossentropy</code> for multi-class classificastion when the classes
# are integers, e.g. 0 = one kind of Iris, 1 = another kind, 2 = a third kind (which is what
# we have in the Iris dataset).
# </li>
# <li><code>categorical_cross_entropy</code> for multi-class classification when the classes have
# been one-hot encoded.
# </li>
# <li>(And, as we've seen, <code>binary_crossentropy</code> for binary classification, where the classes
# will be 0 or 1.)
# </li>
# </ul>
# </li>
# </ul>
# Create a pipeline
iris_pipeline = Pipeline([
("preprocessor", iris_preprocessor),
("predictor", KerasClassifier(build_fn=lambda: iris_model, verbose=0, epochs=60, batch_size=32))
])
# +
# Accuracy estimation
iris_pipeline.fit(dev_iris_df, dev_iris_y)
# May get same warning
accuracy_score(test_iris_y, iris_pipeline.predict(test_iris_df))
# + [markdown] slideshow={"slide_type": "slide"}
# <h1>A Final Example: MNIST</h1>
# <ul>
# <li>MNIST is a classic dataset for multi-class classification.</li>
# <li>The task is classification of hand-written digits.
# <ul>
# <li>Features: 28 pixel by 28 pixel grayscale images of hand-written digits.
# <ul>
# <li>The values are integers in $[0, 255]$.</li>
# </ul>
# </li>
# <li>Classes: 0 to 9.</li>
# </ul>
# </li>
# <li>Dataset: 70,000 images, so we can safely use holdout, and it is already partitioned:
# <ul>
# <li>60,000 training images;</li>
# <li>10,000 test images.</li>
# </ul>
# </li>
# </ul>
# -
# Keras has a utility function for downloading it into four Numpy arrays
# To get this to work on macOS, I also had to run this in a terminal:
# $ /Applications/Python\ 3.8/Install\ Certificates.command
# You may need something similar
(mnist_x_train, mnist_y_train), (mnist_x_test, mnist_y_test) = load_data()
mnist_x_train.shape
mnist_x_train.dtype
np.unique(mnist_y_train)
mnist_x_test.shape
np.unique(mnist_y_test)
idx = 126 # Change this number to look at other images
some_example = mnist_x_train[idx]
# Look at the raw data for this image. Warning: large! (28 by 28)
some_example
# +
# Draw it
some_example = some_example.reshape(28, 28)
fig = plt.figure()
plt.imshow(some_example, cmap=plt.cm.binary, interpolation="nearest")
plt.axis("off")
plt.show()
# -
# Take a look at its class
mnist_y_train[idx]
# <ul>
# <li>We don't really need scikit-learn pipelines this time.</li>
# <li>But we do need to reshape:
# <ul>
# <li>Our training data is in a 3D array of shape (60000, 28, 28).</li>
# <li>We change it to a 2D array of shape (60000, 28 * 28).
# <ul>
# <li>This 'flattens' the images.</li>
# <li>When working with images, it is often better not to do this. In a future lecture, we'll
# build neural networks that do not require us to flatten.
# </li>
# </ul>
# </li>
# <li>Similarlly, the test data.</li>
# </ul>
# </li>
# </ul>
# +
mnist_x_train = mnist_x_train.reshape((60000, 28 * 28))
mnist_x_test = mnist_x_test.reshape((10000, 28 * 28))
# -
# <ul>
# <li>We'll do a two-layer network:
# <ul>
# <li>One hidden layer with 512 neurons, using the ReLU activation function.</li>
# <li>The output layer will have 10 neurons, one per class, and
# will use the softmax activation function.</li>
# </ul>
# </li>
# <li>Prior to those two layers of neurons, we'll have another layer, which will scale:
# <ul>
# <li>The values in the orihginal datasset are integers in $[0, 255]$.</li>
# <li>The Rescaling layer changes them to floats in $[0, 1]$.</li>
# </ul>
# By doing this in a layer, we don't need a scikit-learn pipeline.
# </li>
# </ul>
# +
inputs = Input(shape=(28 * 28,))
x = Rescaling(scale=1./255)(inputs)
x = Dense(512, activation="relu")(x)
outputs = Dense(10, activation="softmax")(x)
mnist_model = Model(inputs, outputs)
mnist_model.compile(optimizer=SGD(learning_rate=0.01), loss="sparse_categorical_crossentropy", metrics=["accuracy"])
# -
# <ul>
# <li>We'll use Keras functions for training and testing.</li>
# </ul>
mnist_model.summary()
# <ul>
# <li>Make sure you understand all the numbers above!
# </li>
# </ul>
mnist_model.fit(mnist_x_train, mnist_y_train, epochs=10, batch_size=32)
test_loss, test_acc = mnist_model.evaluate(mnist_x_test, mnist_y_test)
test_acc
# <ul>
# <li>Compare training accuracy and test accuracy.</li>
# </ul>
# <h1>Concluding Remarks</h1>
# <ul>
# <li>A few decisions are constrained: number of inputs; number of output neurons; activation
# function of output neurons; and (to some extent) loss function.
# </li>
# <li>But there are numerous hyperparameters (and even more to come!)
# <ul>
# <li>Even making a good guess at them is more art than science, although this is changing.</li>
# <li>On the other hand, grid search or randomized search will make things even slower than they
# already are — and we still have to specify some sensible values for
# them to search through.
# </li>
# </ul>
# </li>
# <li>There is a considerable risk of overfitting.</li>
# </ul>
| ai1/lectures/AI1_18_examples_of_neural_networks.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# +
# %reload_ext autoreload
# %autoreload 2
# %matplotlib inline
import os
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID";
os.environ["CUDA_VISIBLE_DEVICES"]="" # Enforce CPU usage
from psutil import cpu_count # Do "pip install psutil" if not already installed
import tensorflow as tf
import numpy as np
# Constants from the performance optimization available in onnxruntime
# It needs to be done before importing onnxruntime
os.environ["OMP_NUM_THREADS"] = str(cpu_count(logical=True))
os.environ["OMP_WAIT_POLICY"] = 'ACTIVE'
# -
# ## ONNX and TensorFlow Lite Support in `ktrain`
#
# As of v0.24.x, `predictors` in **ktrain** provide built-in support for exports to [ONNX](https://github.com/onnx/onnx) and [TensorFlow Lite](https://www.tensorflow.org/lite) formats. This allows you to more easily take a **ktrain**-trained model and use it to make predictions *outside* of **ktrain** (or even TensorFlow) in deployment scenarios. In this notebook, we will show a text classification example of this.
#
# Let us begin by loading a previously trained `Predictor` instance, which consists of both the **DistilBert** model and its associated `Preprocessor` instance.
import ktrain
predictor = ktrain.load_predictor('/tmp/my_distilbert_predictor')
print(predictor.model)
print(predictor.preproc)
# The cell above assumes that the model was previously trained on the 20 Newsgroup corpus using a GPU (e.g., on Google Colab). The files in question can be easily created with **ktrain**:
#
# ```python
# # install ktrain
# # !pip install ktrain
#
# # load text data
# categories = ['alt.atheism', 'soc.religion.christian','comp.graphics', 'sci.med']
# from sklearn.datasets import fetch_20newsgroups
# train_b = fetch_20newsgroups(subset='train', categories=categories, shuffle=True)
# test_b = fetch_20newsgroups(subset='test',categories=categories, shuffle=True)
# (x_train, y_train) = (train_b.data, train_b.target)
# (x_test, y_test) = (test_b.data, test_b.target)
#
# # build, train, and validate model (Transformer is wrapper around transformers library)
# import ktrain
# from ktrain import text
# MODEL_NAME = 'distilbert-base-uncased'
# t = text.Transformer(MODEL_NAME, maxlen=500, class_names=train_b.target_names)
# trn = t.preprocess_train(x_train, y_train)
# val = t.preprocess_test(x_test, y_test)
# model = t.get_classifier()
# learner = ktrain.get_learner(model, train_data=trn, val_data=val, batch_size=6)
# learner.fit_onecycle(5e-5, 1)
#
# # save predictor
# predictor = ktrain.get_predictor(learner.model, t)
# predictor.save('/tmp/my_distilbert_predictor')
# ```
# ## TensorFlow Lite Inferences
#
# Here, we export our model to TensorFlow LITE and use it to make predictions *without* **ktrain**.
# +
# export TensorFlow Lite model
tflite_model_path = '/tmp/model.tflite'
tflite_model_path = predictor.export_model_to_tflite(tflite_model_path)
# load interpreter
interpreter = tf.lite.Interpreter(model_path=tflite_model_path)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
# set maxlen, class_names, and tokenizer (use settings employed when training the model - see above)
maxlen = 500 # from above
class_names = ['alt.atheism', 'comp.graphics', 'sci.med', 'soc.religion.christian'] # from above
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained('distilbert-base-uncased')
# preprocess and predict outside of ktrain
doc = 'I received a chest x-ray at the hospital.'
inputs = tokenizer(doc, max_length=maxlen, padding='max_length', truncation=True, return_tensors="tf")
interpreter.set_tensor(input_details[0]['index'], inputs['attention_mask'])
interpreter.set_tensor(input_details[1]['index'], inputs['input_ids'])
interpreter.invoke()
output_tflite = interpreter.get_tensor(output_details[0]['index'])
print()
print('text input: %s' % (doc))
print()
print('predicted logits: %s' % (output_tflite))
print()
print("predicted class: %s" % ( class_names[np.argmax(output_tflite[0])]) )
# -
# ## ONNX Inferences
#
# Here, we will export our trained model to ONNX and make predictions *outside* of both **ktrain** and **TensorFlow** using the ONNX runtime. Please ensure the ONNX libraries are installed before proceeding with:
# ```
# pip install -q --upgrade onnxruntime==1.5.1 onnxruntime-tools onnx keras2onnx
# ```
#
# It is possible to transform a TensorFlow model directly to ONNX using: `predictor.export_model_to_onnx(onnx_model_path)`, similar to what was done for TFLite above. However, for **transformers** models like the **DistilBERT** text classifier used in this example, it is recommended that the model first be converted to PyTorch and then to ONNX for better performance of the final ONNX model.
#
# In the cell below, we use `AutoModelForSequenceClassification.from_pretrained` to load our classifier as a PyTorch model before converting to ONNX. We, then, use our ONNX model to make predictions **without** the need for ktrain or TensorFlow or PyTorch. This is well-suited for deployments that require smaller footprints (e.g., Heroku).
# +
# set maxlen, class_names, and tokenizer (use settings employed when training the model - see above)
model_name = 'distilbert-base-uncased'
maxlen = 500 # from above
class_names = ['alt.atheism', 'comp.graphics', 'sci.med', 'soc.religion.christian'] # from above
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(model_name)
# imports
import numpy as np
from transformers.convert_graph_to_onnx import convert, optimize, quantize
from transformers import AutoModelForSequenceClassification
from pathlib import Path
# paths
predictor_path = '/tmp/my_distilbert_predictor'
pt_path = predictor_path+'_pt'
pt_onnx_path = pt_path +'_onnx/model.onnx'
# convert to ONNX
AutoModelForSequenceClassification.from_pretrained(predictor_path,
from_tf=True).save_pretrained(pt_path)
convert(framework='pt', model=pt_path,output=Path(pt_onnx_path), opset=11,
tokenizer=model_name, pipeline_name='sentiment-analysis')
pt_onnx_quantized_path = quantize(optimize(Path(pt_onnx_path)))
# create ONNX session
def create_onnx_session(onnx_model_path, provider='CPUExecutionProvider'):
"""
Creates ONNX inference session from provided onnx_model_path
"""
from onnxruntime import GraphOptimizationLevel, InferenceSession, SessionOptions, get_all_providers
assert provider in get_all_providers(), f"provider {provider} not found, {get_all_providers()}"
# Few properties that might have an impact on performances (provided by MS)
options = SessionOptions()
options.intra_op_num_threads = 0
options.graph_optimization_level = GraphOptimizationLevel.ORT_ENABLE_ALL
# Load the model as a graph and prepare the CPU backend
session = InferenceSession(onnx_model_path, options, providers=[provider])
session.disable_fallback()
return session
sess = create_onnx_session(pt_onnx_quantized_path.as_posix())
# tokenize document and make prediction
tokens = tokenizer.encode_plus('I received a chest x-ray at the hospital.', max_length=maxlen, truncation=True)
tokens = {name: np.atleast_2d(value) for name, value in tokens.items()}
print()
print()
print("predicted class: %s" % (class_names[np.argmax(sess.run(None, tokens)[0])]))
# -
| examples/text/ktrain-ONNX-TFLite-examples.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # TSP's Parameters Sensitivity
# ### Information and Decision Systems Group<br>University of Chile
# Implementation of the TSP's parameters sensitivity analysis presented by [Gonzalez et al. (2021)](https://arxiv.org/pdf/2110.14122.pdf).
import sys
import numpy as np
import matplotlib.pyplot as plt
sys.path.insert(1, '../src/build')
from TSP import TSP
sys.path.insert(1, './utils')
from distributions import *
# Number of samples per step
samples = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71,
72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94,
95, 96, 97, 98, 99, 100, 101, 107, 114, 120, 127, 135, 143, 151, 160, 169, 179, 190, 201,
213, 225, 239, 253, 268, 283, 300, 318, 336, 356, 377, 400, 423, 448, 475, 503, 532, 564,
597, 632, 670, 709, 751, 796, 843, 893, 946, 1001, 1061, 1124, 1190, 1260, 1335, 1414, 1498,
1586, 1680, 1780, 1885, 1997, 2115, 2240, 2373, 2513, 2662, 2820, 2987, 3164, 3351, 3550,
3760, 3983, 4218, 4468, 4733, 5013, 5310, 5625, 5958, 6311, 6685, 7081, 7500, 7945, 8415,
8914, 9442, 10001, 10594, 11222, 11887, 12591, 13337, 14127, 14964, 15850, 16790, 17784,
18838, 19954, 21136, 22389, 23715, 25120, 26609, 28185, 29855, 31624, 33498, 35483, 37585,
39812, 42171, 44670, 47317, 50120, 53090, 56236, 59568, 63097, 66836, 70796, 74991, 79434,
84141, 89127, 94408, 100001])
n_samples = samples[-1]
plt.plot(samples)
plt.title('Experiments samples')
plt.xlabel('Step')
plt.ylabel('Samples')
plt.grid(axis='y')
plt.show()
# ## TSP $\alpha$-parameter sensivity
# +
# TSP parameters
l_bn = 0.167
w_bn = 0.05
lambdas = [0, 1e-5, 5e-5, 0.0001, 0.00015]
# Experimental setting
dist = gaussian_dist
dim = 1
correlations = [0, 0.7]
# TSP - tree sizes
lambda_sizes = []
# TSP - full tree estimated mutual informations
lambda_emis = []
# TSP - regularized tree mutual informations
lambda_reg_emis = []
# Experiments
for corr in correlations:
sizes = []
emis = []
reg_emis = []
_, X, Y, mi = dist(dim, corr, n_samples)
for l in lambdas:
l_sizes = []
l_emis = []
l_reg_emis = []
tsp = TSP(l_bn, w_bn, l)
for i in samples:
tsp.grow(np.copy(X[:i], order='F'), np.copy(Y[:i], order='F'))
if l != 0:
tsp.regularize()
l_emis.append(tsp.emi())
l_sizes.append(tsp.size())
l_reg_emis.append(tsp.reg_emi())
emis.append(l_emis)
reg_emis.append(l_reg_emis)
sizes.append(l_sizes)
lambda_sizes.append(sizes)
lambda_emis.append(emis)
lambda_reg_emis.append(reg_emis)
# Plots
cnt = 0
fig = plt.figure(figsize=(16,11))
for i in range(len(correlations)):
cnt += 1
plt.subplot(3, 2, cnt)
for j in range(len(lambdas)):
plt.plot(samples, lambda_emis[i][j], label=r'$\alpha={}$'.format(lambdas[j]))
plt.plot(samples,
gaussian_dist(dim, correlations[i], n_samples)[-1] * np.ones(len(lambda_emis[i][j])),
'--', label='theoretical', color='r', linewidth=1)
plt.xscale('log')
plt.xlabel(r'$n$')
plt.ylabel(r'$\hat{i}^\alpha_{\delta_n,b_n}(z^n_1)$')
plt.title(r'Correlation $\sigma={}$'.format(correlations[i]))
plt.legend()
for i in range(len(correlations)):
cnt += 1
plt.subplot(3, 2, cnt)
for j in range(len(lambdas)):
plt.plot(samples, lambda_reg_emis[i][j], label=r'$\alpha={}$'.format(lambdas[j]))
plt.plot(samples,
gaussian_dist(dim, correlations[i], n_samples)[-1] * np.ones(len(lambda_emis[i][j])),
'--', label='theoretical', color='r', linewidth=1)
plt.xscale('log')
plt.xlabel(r'$n$')
plt.ylabel(r'$\hat{i}^\alpha_{\delta_n,b_n}(z^n_1) - \alpha r_{b_n, \delta_n}(\left| T \right|)$')
plt.title(r'Correlation $\sigma={}$'.format(correlations[i]))
plt.legend()
for i in range(len(correlations)):
cnt += 1
plt.subplot(3, 2, cnt)
for j in range(len(lambdas)):
plt.plot(samples, lambda_sizes[i][j], label=r'$\alpha={}$'.format(lambdas[j]))
plt.xscale('log')
plt.xlabel(r'$n$')
plt.ylabel(r'$|\hat{T}_{b_n,\delta_n} (\alpha)|$')
plt.title(r'Correlation $\sigma={}$'.format(correlations[i]))
plt.legend()
plt.tight_layout()
plt.show()
# -
# ## TSP $l$-parameter sensivity
# +
# TSP parameters
l_bns = [0.067, 0.133, 0.2, 0.267, 0.3]
w_bn = 0.05
l = 0
# Experimental setting
dist = gaussian_dist
dim = 1
correlations = [0, 0.7]
# TSP - tree sizes
lbn_sizes = []
# TSP - full tree estimated mutual informations
lbn_emis = []
# Experiments
for corr in correlations:
sizes = []
emis = []
_, X, Y, _ = dist(dim, corr, n_samples)
for bn in l_bns:
b_sizes = []
b_emis = []
tsp = TSP(bn, w_bn, l)
for i in samples:
tsp.grow(np.copy(X[:i], order='F'), np.copy(Y[:i], order='F'))
if l != 0:
tsp.regularize()
b_emis.append(tsp.emi())
b_sizes.append(tsp.size())
emis.append(b_emis)
sizes.append(b_sizes)
lbn_sizes.append(sizes)
lbn_emis.append(emis)
# Plots
cnt = 0
fig = plt.figure(figsize=(16,4))
for i in range(len(correlations)):
cnt += 1
plt.subplot(1, 2, cnt)
for j in range(len(l_bns)):
plt.plot(samples, lbn_emis[i][j], label=r'$l={}$'.format(l_bns[j]))
plt.plot(samples,
gaussian_dist(dim, correlations[i], n_samples)[-1] * np.ones(len(lbn_emis[i][j])),
'--', label='theoretical', color='r', linewidth=1)
plt.xscale('log')
plt.xlabel(r'$n$')
plt.ylabel(r'$\hat{i}^0_{\delta_n,b_n}(z^n_1)$')
plt.title(r'Correlation $\sigma={}$'.format(correlations[i]))
plt.legend()
plt.tight_layout()
plt.show()
# -
# ## TSP $w$-parameter sensivity
# +
# TSP parameters
l_bn = 0.167
w_bns = [1, 0.5, 0.1, 0.05, 0.01, 0.005, 0.001, 0.0005]
l = 0
# Experimental setting
dist = gaussian_dist
dim = 1
correlations = [0, 0.7]
# TSP - tree sizes
wbn_sizes = []
# TSP - full tree estimated mutual informations
wbn_emis = []
# Experiments
for corr in correlations:
sizes = []
emis = []
_, X, Y, _ = dist(dim, corr, n_samples)
for wn in w_bns:
w_sizes = []
w_emis = []
tsp = TSP(l_bn, wn, l)
for i in samples:
tsp.grow(np.copy(X[:i], order='F'), np.copy(Y[:i], order='F'))
if l != 0:
tsp.regularize()
w_emis.append(tsp.emi())
w_sizes.append(tsp.size())
emis.append(w_emis)
sizes.append(w_sizes)
wbn_sizes.append(sizes)
wbn_emis.append(emis)
# Plots
cnt = 0
fig = plt.figure(figsize=(16,4))
for i in range(len(correlations)):
cnt += 1
plt.subplot(1, 2, cnt)
for j in range(len(w_bns)):
plt.plot(samples, wbn_emis[i][j], label=r'$w={}$'.format(w_bns[j]))
plt.plot(samples,
gaussian_dist(dim, correlations[i], n_samples)[-1] * np.ones(len(wbn_emis[i][j])),
'--', label='theoretical', color='r', linewidth=1)
plt.xscale('log')
plt.xlabel(r'$n$')
plt.ylabel(r'$\hat{i}^0_{\delta_n,b_n}(z^n_1)$')
plt.title(r'Correlation $\sigma={}$'.format(correlations[i]))
plt.legend()
plt.tight_layout()
plt.show()
| experiments/TSP_params_sensitivity.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import pandas as pd
import numpy as np
import os
os.chdir('C:\\Users\\<NAME>\\Python Files\\DataSources')
from mlxtend.frequent_patterns import apriori
from mlxtend.frequent_patterns import association_rules
df = pd.read_excel('Online_Retail.xlsx')
df
len(df)
# data manipulation
df['Description'] = df['Description'].str.strip()
df.dropna(axis=0, subset=['InvoiceNo'], inplace=True)
df['InvoiceNo'] = df['InvoiceNo'].astype('str')
df = df[~df['InvoiceNo'].str.contains('C')]
df
basket = (df[df['Country']=='France'] # working only on france data
.groupby(['InvoiceNo','Description'])['Quantity']
.sum().unstack().reset_index().fillna(0) # transposing the data
.set_index('InvoiceNo'))
type(basket)
basket.to_csv('output.csv')
basket
def encode_units(x):
if x<=0:
return 0
if x>=1:
return 1
basket_sets = basket.applymap(encode_units)
basket_sets.drop('POSTAGE',inplace=True, axis=1)
basket_sets.to_csv('output.csv')
frequent_itemsets = apriori(basket_sets,min_support=0.07,use_colnames=True) # support will be dicided by the business
rules = association_rules(frequent_itemsets,metric='lift', min_threshold=1)
rules.head()
len(rules)
rules
rules[(rules['lift']>=6) & (rules['confidence']>=0.8)]
| Association Rule MLEXtend/MLXTEND - Association Rule through apriori algorithm Market basket analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .jl
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Julia 1.5.0
# language: julia
# name: julia-1.5
# ---
# # Bootstrap Resampling demo
#
# This Jupyter notebook runs the <a href="https://github.com/brenhinkeller/StatGeochem.jl" target="_blank">StatGeochem</a> package, which includes (among other things) a version of the weighted bootstrap resampling code described in <a href="https://doi.org/10.1038/nature11024" target="_blank">Keller & Schoene 2012</a> and <a href="https://doi.org/10.1016/j.epsl.2017.10.031" target="_blank">2018</a> implemented in the <a href="https://julialang.org" target="_blank">Julia language</a>.
#
# <a href="https://mybinder.org/v2/gh/brenhinkeller/StatGeochem.jl/main?filepath=examples%2FBootstrapResamplingDemo.ipynb" target="_blank"><img src="https://mybinder.org/badge_logo.svg" alt="Launch Binder notebook" align="left"></a>
# <p style="text-indent: 5px">If running this notebook as an online Binder notebook and the webpage times out, click the badge at left to relaunch (refreshing will not work). Note that any changes will be lost!</p>
#
# Hint: `shift`-`enter` to run a single cell, or from the `Cell` menu select `Run All` to run the whole file. Any code from this notebook can be copied and pasted into the Julia REPL or a `.jl` script.
# ***
# ### Load required Julia packages
# +
## --- Load (and install if neccesary) the StatGeochem package which has the resampling functions we'll want
try
using StatGeochem
catch
using Pkg
Pkg.add(PackageSpec(url="https://github.com/brenhinkeller/StatGeochem.jl"))
using StatGeochem
end
using Statistics, StatsBase, DelimitedFiles
using Plots; gr();
# -
# ## A super-quick example (Try pasting in your own data here!)
#
# #### Input dataset
# +
## --- Input dataset
# We'll store the data in a data structure called a "dictionary"
test = Dict()
# Make fields called "Latitude" and "Longitude" with locations
test["Latitude"] = [-26.18, -21.55, 36.6, 54.4, 36.59, 32.5, 49.85, 49.58, 50.2, 42.4725, 48.47, 25.7725, 12.1851, 17.9, -18.1378, 16.1, 67.11, 48.25, 24.42, 23.2, 40.78, -15.38, 21.94, 61.2296, 49.0, 57.03, 63.6, 62.0, 69.47, 54.8189, 22.39, -17.9317, 48.12, 46.47, 49.0, 13.5, 13.5, 33.0, 20.536, 41.5783, 46.5045, 44.5633, 37.7, 43.7506, 37.2202, 58.07, 34.75, 38.25, 40.42, 43.96, 45.38, 37.78, 35.62, 65.25, 46.23, 53.68, 46.38, 45.11, 42.25, 60.25,46.2, 43.31, 39.81, 35.36, 34.88, 34.75, 62.76, 46.45, 54.75, 44.12, 33.61, 38.0, 36.62, 41.11, 47.95, 37.55, 36.5, 36.75, 48.1732, 22.045, 38.45, 61.4597, 36.0, 66.0, 37.5, 23.544, 69.45, 70.0, 6.25, 52.4, 4.58, -32.0, 64.45, 63.89, 63.95, 64.45, 57.03, 8.11, 7.35, 5.14]
test["Longitude"] = [27.84, 119.9, -118.3, -67.0, 27.17, 103.0, 7.88, 7.15, -60.6, 11.9731, -81.4, 100.32, -84.1862, -65.75, -69.1483, 76.6, 28.85, -78.25, 33.43, 35.1, 14.05, 167.83, 100.92, -131.514, -85.5, 120.72, 36.3, -94.5, 30.6, -100.804, -3.76, -177.567, -77.77, 89.58, -78.0, 78.0, 78.0, 102.0, -104.7, -121.658, -114.802, -114.282, -119.3, -114.641, -119.35, -135.63, -118.75, -91.41, -75.45, -123.66, -109.77, -107.66, -115.16, -147.0, -122.25,-166.4, -122.06, -109.9, -72.12, -152.25, -122.19, -117.31, -120.0, -112.7, -118.88, -118.86, -150.93, -122.03, -131.99, -121.75, -113.09, -72.87, -116.5, -106.25, -91.95, -90.7, -106.12, -106.25, 99.8333, -160.223, 27.3, -139.599, -78.9, -30.0, 140.5, 141.495, 86.22, -52.0, 10.5, -92.75, 9.66, 147.0, 29.05, 28.95, 29.08, 29.05, 120.72, 38.37, 38.42, 123.67]
# Fill in some other fields with age [Ma], major element [wt. %], and trace elements [ppm] data
# Notice there are some NaNs, but this is OK. This is a small sampling of real data from EarthChem
test["Age"] = [3210.0, 3300.0, 85.0, 2100.0, 0.0, 218.0, 275.0, 275.0, 1500.0, 0.254, 2650.0, 44.25, 19.5, 105.8, 1.305, 2600.0, 2415.0, 2100.0, 90.0, 975.0, 32.7685, 0.1, 249.0, 401.5, 2750.0, 3250.0, 2750.0, 2700.0, 2050.0, 1900.0, 188.8, 19.6, 1400.0, 407.0, 2720.0, 2520.0, 2520.0, 748.0, 2.0, 0.149, 46.0, 49.0, 123.0, 46.0, 114.0, 385.5, 33.4, 1400.0, 175.0, 33.4, 2700.0, 33.4, 2100.0, 395.5, 0.005, 33.4, 19.5, 2700.0, 395.5, 175.0, 0.005, 14.55, 33.4, 12.8, 14.55, 156.5, 44.4, 19.5, 429.5, 0.905, 33.4, 397.0, 14.55, NaN, 2750.0, 1400.0, 1400.0, 2100.0, 12.8, 0.5, 218.3, 226.3, 173.55, 44.25, 22.0, 12.8, 158.25, 34.05, 5.0e-6, 2750.0, 44.85, 47.5, 2750.0,2750.0, 2750.0, 2750.0, 3250.0, 1.31, 1.31, 44.85]
test["SiO2"] = [73.94, 68.07, 67.53, 58.4, 58.8485, 72.53, 72.31, 76.65, 64.37, 50.82, 59.7, 50.87, 55.55, 51.71, 66.01, 50.8, 50.51, 72.11, 58.1, 52.3, 56.69, 50.78, 55.05, 53.2, 58.9, 50.3, 77.9, 57.2, 61.03, 50.68, 54.48, 56.83, 50.34, 50.33, 69.84, 69.57, 73.2, 68.2, 50.26, 52.1, 53.06, 54.01, 69.3, 72.42, 61.47, 50.6, 51.1, 51.6, 52.2, 54.6, 55.3, 57.1, 57.2, 59.9, 60.7, 60.9, 61.25, 61.7, 62.1, 62.5, 63.1, 63.8, 64.9, 66.9, 67.5, 67.8, 68.0, 68.6, 68.7, 68.9, 69.7, 71.3, 73.1, 73.2, 74.3, 75.8, 76.0, 76.9, 47.53, 45.81, 46.21, 48.99, 46.9, 43.86, 48.73, 48.56, 48.3, 45.6, 49.25, 45.74, 47.29, 44.93, 41.7, 44.9, 47.8, 48.2, 49.6, 48.1, 46.35, 49.47]
test["TiO2"] = [0.28, 0.32, 0.517, 0.72, 1.10718, 0.29, 0.18, 0.07, 0.59, 0.79, 1.31, 0.53, 0.58, 0.7, 0.657, 0.98, 0.93, 0.66, 0.46, 1.3, 0.39, 1.19, 1.19, 0.29, 0.29, 0.83, 0.235, 0.78, 1.29, 0.9, 1.4, 1.2, 0.46, 2.17, 0.34, 0.42,0.2, 0.3, 1.6, 0.93, 1.88, 0.83, 0.48, 0.219, 0.89, 1.65, 1.8, 0.79, 1.16, 2.2, 0.7, 0.84, 1.63, 0.53, 0.7, 0.42, 1.07, 5.00543e-5, 1.6, 0.57, 0.67, 0.25, 0.14, 0.31, 0.34, 0.54, 0.54, 0.54, 0.41, 0.36, 0.37, 0.26, 0.2, 0.25, 0.04, 0.15, 0.18, 0.08, 2.23, 1.31, 3.13, 0.64, 0.55, 3.72, 0.6, 0.77, 1.33, 1.35, 2.93, 0.6, 2.71, 2.2, 0.59, 0.24, 0.48, 0.58, 0.94, 2.37, 2.55, 1.22]
test["Al2O3"] = [14.19, 16.55, 15.61, 18.5, 17.3459, 14.15, 14.23, 12.6, 14.64, 19.29, 10.42, 9.89, 15.32, 17.14, 16.33, 12.37, 15.94, 14.83, 16.99, 16.41, 18.83, 15.57, 16.85, 13.1, 18.7, 8.48, 12.4, 14.7, 9.2, 15.41, 14.02, 14.16, 18.04, 16.84, 15.11, 15.49, 13.75, 16.83, 13.26, 18.8, 15.82, 10.08, 14.9, 13.27, 16.29, 14.2, 17.1, 13.0, 14.1, 12.3, 14.2, 16.0, 14.2, 15.1, 17.8, 17.4, 15.64, 17.9, 15.4, 15.7, 17.8, 11.8, 18.2, 15.0, 14.7, 15.7, 16.2, 13.8, 13.8, 14.7, 15.2, 15.1, 13.1, 13.3, 13.5, 11.1, 12.1, 13.1, 14.8, 15.13, 14.54, 15.48, 17.8, 9.32, 14.67, 16.29, 16.42, 13.5, 17.88, 11.58, 15.57, 13.53, 4.0, 6.82, 11.3, 14.0, 15.4, 15.17, 14.88, 18.13]
test["FeOT"] = [2.12355, 2.69043, 3.36529, 6.67659, 6.40665, 1.64665, 1.43683, 0.82, 4.58, 6.87, 6.25366, 7.08, 6.45455, 8.22, 3.63, 11.19, 10.71, 0.57, 8.77, 8.37855, 3.42, NaN, 11.2868, 8.1, 7.56, 12.5, 2.37, 7.74, 15.394, 11.14, 11.31, 9.77, 10.66, 10.55, 2.7, 3.03, 1.61, 2.36, 7.16506, 7.66, 8.82, 8.7, 3.18979, 1.55, NaN, 11.0677, 4.02967, 12.6873, 10.2578, 13.2096, 7.90033, 6.80257, 10.1679, 4.32809, 4.82298, 4.79599, 6.10971, 5.37187, 4.55973, 6.10971, 4.56204, 2.13255, 1.06178, 2.3755, 3.32968, 3.17894, 3.62087, 4.41807, 3.00537, 2.3575, 1.74563,2.59693, 1.1498, 2.77977, 0.584877, 2.45648, 1.51168, 1.36091, 9.84955, 12.28, 12.07, 7.5, 9.21966, 14.1458,NaN, 8.98, 12.02, 10.89, 10.03, 10.84, 11.29, 11.75, 11.4, 9.71, 10.5, 9.28, 13.0, 11.27, 11.56, 6.46]
test["MgO"] = [0.66, 0.62, 1.44, 3.48, 2.71457, 0.58, 0.71, 0.55, 1.99, 4.32, 1.15, 16.22, 5.75, 5.53, 1.35, 9.97, 6.64, 0.22, 0.44, 4.62, 0.39, 6.35, 3.68, 11.01, 3.4, 13.3, 0.34, 6.1, 1.83, 7.99, 5.36, 2.09, 10.91, 5.62, 1.74, 0.88, 0.16, 1.42, 9.79, 7.16, 7.3, 10.63, 1.1, 0.31, 3.29, 0.98, 2.4, 10.3, 7.51, 2.4, 5.55, 3.11, 3.25, 1.11, 2.32, 2.1, 3.3, 3.12, 1.7, 2.77, 2.2, 0.18, NaN, 1.2, 0.5, 1.6, 1.3, 0.59, 1.11, 0.53, 0.32, 0.55, 0.16, 0.6,0.19, NaN, 0.05, 0.19, 8.05, 11.09, 7.13, 8.82, 9.9, 8.86, 12.71, 8.76, 6.83, 9.57, 3.66, 4.36, 7.13, 11.44,21.5, 22.0, 15.3, 5.45, 7.3, 8.26, 7.57, 7.42]
test["CaO"] = [0.58, 2.64, 3.65, 3.41, 6.51955, 2.1, 0.34, 0.35, 4.78, 8.6, 6.84, 6.11, 8.68, 7.33, 3.57, 9.88, 9.63, 1.97, 2.97, 7.45, 1.94, 10.56, 0.92, 8.12, 4.3, 13.4, 0.9, 10.36, 2.49, 10.52, 9.74, 8.68, 6.46, 6.56, 3.28, 3.45, 1.21, 2.66, 7.57, 9.23, 7.37, 9.78, 2.8, 3.38, 6.07, 6.95, 10.3, 0.43, 11.0, 5.9, 7.51, 4.2, 6.05, 3.29, 5.57, 4.79, 5.64, 1.73, 4.4, 6.25, 5.45, 1.21, 0.49, 2.9, 2.1, 3.3, 3.7, 2.3, 1.71, 1.83, 1.54, 2.2, 2.0, 2.3, 0.83, 0.03, 0.6, 0.95, 6.59, 10.66, 5.61, 7.69, 11.0, 12.29, 10.2, 11.46, 11.02, 11.1, 9.21, 10.47, 9.23, 10.16, 8.63, 7.57, 9.3, 9.7, 11.5, 9.78, 10.94, 12.45]
test["Na2O"] = [4.15, 4.89, 3.77, 3.8, 4.15415, 3.22, 2.94, 1.59, 4.31, 1.96, 3.81, 1.46, 2.63, 4.24, 4.17, 1.98, 3.44, 5.67, 6.34, 4.31, 5.69, 3.03, 4.92, 3.3, 2.8, 0.6, 3.36, 1.5, 1.52, 2.95, 2.4, 2.87, 0.05, 4.25, 5.24, 4.48, 3.42, 5.07, 2.98, 3.42, 3.49, 2.57, 3.8, 4.5, 3.23, 3.38, 3.7, NaN, 2.21, 3.4, 3.24, 3.83, 2.49, 2.71, 4.32, 3.85, 4.32, 4.35, 3.0, 2.75, 4.5, 3.92, 3.75, 3.5, 3.7, 3.2, 3.9, 4.2, 3.51, 4.45, 4.9, 3.6, 3.4, 4.0, 3.48, 1.34, 3.1, 3.8, 4.65, 2.52, 3.64, 2.88, 2.02, 1.15, 1.88, 2.14, 2.17, 1.58, 3.79, 2.35, 3.28, 3.46, 0.02, 0.12, 1.42, 2.55, 1.81, 2.98, 2.6, 3.16]
test["K2O"] = [4.02, 3.1, 3.63, 4.19, 1.81769, 4.48, 5.36, 5.02, 1.1, 6.06, 0.47, 4.95, 1.5, 0.4, 3.82, 0.26, 0.69, 1.55, 3.48, 1.02, 7.33, 1.23, 0.81, 0.76, 3.1, 0.22, 2.04, 0.44, 2.76, 0.13, 0.91, 0.71, 1.82, 1.57, 1.0, 1.25, 5.23, 1.92, 4.66, 0.561, 1.67, 2.91, 3.3, 4.2, 2.6, 2.23, 1.9, 3.21, 0.61, 0.65, 1.39, 3.85, 2.45, 10.3, 1.23, 2.0, 1.79, 4.98, 3.3, 1.59, 1.27, 2.75, 8.98, 2.59, 3.8, 3.2, 2.7, 2.81, 4.58, 2.77, 4.9, 3.1, 4.5, 1.3, 5.27,7.54, 5.1, 2.7, 4.26, 0.38, 0.7, 1.63, 0.24, 0.7, 0.22, 1.09, 0.23, 0.07, 1.86, 0.45, 1.06, 1.48, 0.53, 0.02, 0.05, 0.19, 0.13, 0.97, 0.49, 0.16]
test["La"] = [15.78, 46.0, 28.8, 35.6, 27.2, 30.18, 50.0, 49.0, 31.1, 91.5, 9.87, 21.4, 9.17, 8.4, 28.6113, 4.34, 8.0, 45.52, 80.7, NaN, 108.0, 15.2, 7.67, 0.8, 36.3, NaN, 46.7, 21.0, 34.2, 2.1, 14.22, 8.81, 1.34, 25.8, 9.0, NaN, 85.1, 7.0, 48.1, 8.76, 36.0, 28.6, 30.0, 44.0, 24.1, 44.7, 50.0, 12.0, 10.7, 50.0, NaN, 43.0, 75.0, 91.0, 13.0, 17.5, 27.0, NaN, 122.0, 8.45, 9.0, 73.0, 53.5, 27.5, 50.0, 50.0, 93.0, 32.0, 28.9, 23.0, 83.0, 13.0, 100.0, 22.6, NaN, 20.2, 200.0, NaN, 67.3, 8.16, 0.0, 2.16, 4.8, 17.707, 3.06, 31.5, 5.54, 2.95, 61.05, 2.18, 31.35, 50.0, 10.0, 20.0, 0.0, 20.0, 3.54, 25.7, 18.39, 2.85]
test["Yb"] = [NaN, 2.4, NaN, 0.72, 3.1, 0.69, NaN, NaN, 3.28, 2.61, 6.19, 1.46, 1.48, 1.64, 1.47864, 2.2, NaN, 2.71, 5.72, NaN, 4.7, 2.84, 2.81, 1.3, 1.76, NaN, 5.66, 1.71, 5.65, 2.03, 2.48, 5.5, 1.78, 4.1, NaN, NaN, 0.94, 0.36, 1.61, 1.3, 1.65, 1.86, 2.5, 2.49, 2.6, 4.1, 5.0, 2.7, 2.2, 7.0, NaN, 2.6, 5.0, 3.16, 2.0, 1.48, 2.0, 1.5, 5.8,2.74, 1.0, 9.36, 3.0, 1.12, 1.0, 1.5, 2.2, 4.0, 1.64, 3.0, 3.83, 0.77, 3.0, 4.81, NaN, 8.34, 15.0, NaN, 1.36, 1.59, NaN, 1.37, 2.3, 1.435, 1.76, 2.17, 2.65, 1.96, 2.53, 1.31, 1.88, 1.98, NaN, NaN, NaN, NaN, 2.1, 2.18,2.15, 2.26]
# We're also going to want a list called "elements" with names of all the fields we want to resample
elements = ["Latitude","Longitude","Age","SiO2","TiO2","Al2O3","FeOT","MgO","CaO","Na2O","K2O","La","Yb"]
# Now let's add uncertainties, starting with age uncertainty
test["Age_sigma"] = [10.0, 10.0, 10.0, 400.0, 10.0, 10.0, 24.0, 24.0, 10.0, 0.027, 150.0, 21.25, 3.5, 6.2, 1.295, 10.0, 35.0, 400.0, 10.0, 125.0, 32.7315, 10.0, 10.0, 42.5, 250.0, 250.0, 250.0, 10.0, 450.0, 10.0, 12.8, 14.3, 300.0, 9.0, 10.0, 10.0, 10.0, 7.0, 1.0, 0.26, 4.0, 1.0, 23.0, 3.7, 2.0, 31.5, 31.6, 300.0, 31.0, 31.6, 150.0, 31.6, 400.0, 147.5, 0.005, 31.6, 14.2, 150.0, 147.5, 31.0, 0.005, 9.25, 31.6, 11.0, 9.25, 91.5, 20.6, 14.2, 13.5, 0.895,31.6, 146.0, 9.25, 10.0, 250.0, 300.0, 300.0, 400.0, 10.2, 0.5, 16.7, 24.7, 28.05, 21.25, 10.0, 10.2, 92.75,31.45, 10.0, 250.0, 10.95, 2.5, 250.0, 250.0, 250.0, 250.0, 250.0, 1.295, 1.295, 10.95]
# For this dataset, lat and lon are good to 0.01 degrees (absolute)
test["Latitude_sigma"] = 0.01 * ones(size(test["Latitude"]))
test["Longitude_sigma"] = 0.01 * ones(size(test["Longitude"]))
# We'll use a 1% relative (1-sigma) default analytical uncertainty for the rest of the elements
for i=4:length(elements)
test[elements[i]*"_sigma"] = test[elements[i]] * 0.01
end
# -
# #### Now that we have a dataset, resample it
# +
## --- Resample
# Compute proximity coefficients (inverse weights)
k = invweight(test["Latitude"], test["Longitude"], test["Age"])
# # Alternatively, we could weight only by location or only by age (though won't make much difference with this dataset)
# k = invweight_location(test["Latitude"], test["Longitude"])
# k = invweight_age(test["Age"])
# Probability of keeping a given data point when sampling
p = 1.0./((k.*median(5.0./k)) .+ 1.0) # Keep rougly one-fith of the data in each resampling
# Resample a few hundred times (all elements!)
nresamplings = 200
mctest = bsresample(test, nresamplings*length(test["SiO2"]), elements, p)
# -
# #### Plot some of the results <br/> (though this is a pretty small dataset)
# +
## --- Approach 1: use the bulk-resampled dataset we just created
# Calculate mean MgO for 8 bins between 40% SiO2 and 80% SiO2 from resampled dataset
# (c = bin centers, m = means, e = 1-sigma S.E.M)
(c,m,e) = binmeans(mctest["SiO2"],mctest["MgO"],40,80,8; resamplingratio=nresamplings)
# Plot results
plot(c,m,yerror=2*e,label="",xlabel="SiO2", ylabel="MgO",xlims=(40,80),framestyle=:box)
# +
## --- Approach 2: resample the binned means for one element at a time (Can resample many times)
# Calculate binned means and uncertainties
# (c = bincenters, m = mean, el = lower 95% CI, eu = upper 95% CI)
(c,m,el,eu) = bin_bsr_means(test["SiO2"],test["MgO"],40,80,8, p=p, x_sigma=test["SiO2_sigma"], nresamplings=10000)
# Plot results
plot(c,m,yerror=(el,eu),label="",xlabel="SiO2", ylabel="MgO",xlims=(40,80), framestyle=:box)
# -
# ## Now let's try with a bigger dataset:
# ### Reproducing some of the plots from Keller & Schoene 2012
# +
## --- Download and unzip Keller and Schoene (2012) dataset
if ~isfile("ign.h5") # Unless it already exists
download("https://storage.googleapis.com/statgeochem/ign.h5.gz","./ign.h5.gz")
run(`gunzip -f ign.h5.gz`) # Unzip file
end
# Read HDF5 file
using HDF5
ign = h5read("ign.h5","vars")
# +
## --- Compute proximity coefficients (inverse weights)
# Since this is pretty computatually intensive, let's load a precomputed version instead
# k = invweight(ign["Latitude"] .|> Float32, ign["Longitude"] .|> Float32, ign["Age"] .|> Float32)
k = ign["k"]
# Probability of keeping a given data point when sampling
p = 1.0./((k.*median(5.0./k)) .+ 1.0); # Keep rougly one-fith of the data in each resampling
p[vec(ign["Elevation"].<-100)] .= 0 # Consider only continental crust
# Age uncertainty
ign["Age_sigma"] = (ign["Age_Max"]-ign["Age_Min"])/2;
t = (ign["Age_sigma"] .< 50) .| isnan.(ign["Age_sigma"]) # Find points with < 50 Ma absolute uncertainty
ign["Age_sigma"][t] .= 50 # Set 50 Ma minimum age uncertainty (1-sigma)
# Location uncertainty
ign["Latitude_sigma"] = ign["Loc_Prec"]
ign["Longitude_sigma"] = ign["Loc_Prec"]
# +
## --- Try resampling a single variable to reproduce the MgO trend from K&S 2012
xmin = 0
xmax = 3900
nbins = 39
# Look only at samples in the basaltic silica range
# (note that if uncertainty in SiO2 were more significant, we should be resampling this too)
t = 43 .< ign["SiO2"] .< 51 # Mafic
# Calculate binned means and uncertainties
# (c = bincenters, m = mean, el = lower 95% CI, eu = upper 95% CI)
(c,m,el,eu) = bin_bsr_means(ign["Age"][t],ign["MgO"][t],xmin,xmax,nbins, p=p[t], x_sigma=ign["Age_sigma"][t])
# Plot results
plot(c,m,yerror=(el,eu),seriestype=:scatter,color=:darkred,mscolor=:darkred,label="")
plot!(xlabel="Age (Ma)", ylabel="MgO (wt. %)",xlims=(0,4000),framestyle=:box,grid=:off,xflip=true) # Format plot
# +
## --- Same as above, but for Na2O
xmin = 0
xmax = 3900
nbins = 39
# Look only at samples in the basaltic silica range
# (note that if uncertainty in SiO2 were more significant, we should be resampling this too)
t = 43 .< ign["SiO2"] .< 51 # Mafic
# Calculate binned means and uncertainties
# (c = bincenter, m = mean, el = lower 95% CI, eu = upper 95% CI)
(c,m,el,eu) = bin_bsr_means(ign["Age"][t],ign["Na2O"][t],xmin,xmax,nbins, p=p[t], x_sigma=,ign["Age_sigma"][t])
# Plot results
plot(c,m,yerror=(el,eu),seriestype=:scatter,markerstrokecolor=:auto,label="")
plot!(xlabel="Age (Ma)", ylabel="Na2O (wt. %)",xlims=(0,4000),framestyle=:box,grid=:off,xflip=true) # Format plot
# -
| examples/BootstrapResamplingDemo.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
import numpy as np
a = np.zeros((4, 3), dtype=np.int)
print(a)
print(a.shape)
b = np.arange(6).reshape(2, 3)
print(b)
print(b.shape)
# +
# print(a + b)
# ValueError: operands could not be broadcast together with shapes (4,3) (2,3)
# -
a = np.zeros((2, 3, 4), dtype=np.int)
print(a)
print(a.shape)
b = np.arange(3)
print(b)
print(b.shape)
# +
# print(a + b)
# ValueError: operands could not be broadcast together with shapes (2,3,4) (3,)
# -
b_3_1 = b.reshape(3, 1)
print(b_3_1)
print(b_3_1.shape)
print(a + b_3_1)
| notebook/numpy_broadcasting_error.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Part A: House Hunting
# You have graduated from MIT and now have a great job! You move to the San Francisco Bay Area and
# decide that you want to start saving to buy a house. As housing prices are very high in the Bay Area,
# you realize you are going to have to save for several years before you can afford to make the down
# payment on a house. In Part A, we are going to determine how long it will take you to save enough
# money to make the down payment given the following assumptions:
#
# 1. Call the cost of your dream home total_cost.
# 2. Call the portion of the cost needed for a down payment portion_down_payment. For
# simplicity, assume that portion_down_payment = 0.25 (25%).
# 3. Call the amount that you have saved thus far current_savings. You start with a current
# savings of $0.
# 4. Assume that you invest your current savings wisely, with an annual return of r (in other words,
# at the end of each month, you receive an additional current_savings*r/12 funds to put into
# your savings – the 12 is because r is an annual rate). Assume that your investments earn a
# return of r = 0.04 (4%).
# 5. Assume your annual salary is annual_salary.
# 6. Assume you are going to dedicate a certain amount of your salary each month to saving for
# the down payment. Call that portion_saved. This variable should be in decimal form (i.e. 0.1
# for 10%).
# 7. At the end of each month, your savings will be increased by the return on your investment,
# plus a percentage of your monthly salary (annual salary / 12).
#
# Write a program to calculate how many months it will take you to save up enough money for a down
# payment. You will want your main variables to be floats, so you should cast user inputs to floats.
# +
#FIND MONEY TO BE SAVED FOR DOWN-PAYMENT
total_cost = float(input("Enter cost of the home:"))
annual_salary = float(input("Enter your annual salary:"))
portion_saved = float(input("Enter portion saved:"))#portion saved is some x%of annual salary
portion_down_payment = 0.25*total_cost
current_savings = 0
r = 0.04
#MONTHLY PARAMETERS
monthly_salary = annual_salary/12 #monthly salary
annual_return = (current_savings*r)/12 #annual return
portion_saved=portion_saved*monthly_salary# PORTION SAVED EACH MONTH
month = 0
while (current_savings<=portion_down_payment):
current_savings+= (current_savings*r/12)+ portion_saved
month+=1
print(f"the number of monnths required for downpayment:{month} ")
# -
# Part B: Saving, with a raise
#
# Background
# In Part A, we unrealistically assumed that your salary didn’t change. But you are an MIT graduate, and
# clearly you are going to be worth more to your company over time! So we are going to build on your
# solution to Part A by factoring in a raise every six months.
#
# In ps1b.py, copy your solution to Part A (as we are going to reuse much of that machinery). Modify
# your program to include the following
# 1. Have the user input a semi-annual salary raise semi_annual_raise (as a decimal percentage)
# 2. After the 6th month, increase your salary by that percentage. Do the same after the 12th
#
# th
# month, the 18 month, and so on.
#
# Write a program to calculate how many months it will take you save up enough money for a down
# payment. LIke before, assume that your investments earn a return of r = 0.04 (or 4%) and the
# required down payment percentage is 0.25 (or 25%). Have the user enter the following variables:
# 1. The starting annual salary (annual_salary) 2
# 2. The percentage of salary to be saved (portion_saved)
# 3. The cost of your dream home (total_cost)
# 4. The semiannual salary raise (semi_annual_raise)
# +
#FIND MONEY TO BE SAVED FOR DOWN-PAYMENT----WITH RAISE
total_cost = float(input("Enter cost of the home:"))
annual_salary = float(input("Enter your annual salary:"))
portion_saved_perc = float(input("Enter portion saved:"))#portion saved is some x%of annual salary
semi_inc = float(input("enter inc: "))
portion_down_payment = 0.25*total_cost
current_savings = 0
r = 0.04
#MONTHLY PARAMETERS
monthly_salary = annual_salary/12 #monthly salary
annual_return = (current_savings*r)/12 #annual return
portion_saved = portion_saved_perc * monthly_salary # PORTION SAVED EACH MONTH
month = 0
while (current_savings<=portion_down_payment):
current_savings+= (current_savings*r/12)+ portion_saved
month+=1
if month%6 == 0:
monthly_salary = (monthly_salary*semi_inc) + monthly_salary
portion_saved = portion_saved_perc * monthly_salary
print(f"the number of monnths required for downpayment:{month} ")
# -
# +
#approach 1 for checking increment for 6 month
annual_salary = float(input("Enter your annual salary:"))
semi_inc = float(input("Enter your annual salary:"))
for i in range(1 , 10 , 6):
annual_salary+= semi_inc*annual_salary
print(annual_salary)
# +
#approach 2 for checking increment for 6 month
annual_salary = float(input("Enter your annual salary:"))
semi_inc = float(input("Enter your annual salary:"))
while (i<=100):
if i%6 == 0:
annual_salary+= semi_inc*annual_salary
print(annual_salary)
i+=1
# -
| PSET1/PSET1_SOL.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# Loading model to compare the results
import pickle
model = pickle.load( open('QDA_model.pkl','rb'))
print(model.predict([[63, 1, 3, 145, 233, 1, 0, 150, 0, 2.3, 0, 0, 1]]))
| templated-transitive/Untitled1.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="view-in-github" colab_type="text"
# <a href="https://colab.research.google.com/github/yhvh-chen/codeNet/blob/main/jinja2conf.ipynb" target="_parent"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a>
# + id="mFmGcZv_Sscl"
import jinja2
# + id="FhZCI5EjZ6Xh"
templateLoader = jinja2.FileSystemLoader(searchpath="./")
templateEnv = jinja2.Environment(loader=templateLoader)
TEMPLATE_FILE = "conf.j2"
template = templateEnv.get_template(TEMPLATE_FILE)
# + id="INeR3-foRF-8"
conf = {
'interf_name': 'lo0',
'conf.descri': 'test',
'conf.interf_addr': '1.1.1.1',
'conf.addr_mask': '255.255.255.255'
}
# + colab={"base_uri": "https://localhost:8080/"} id="YdqkE_HlgOTw" outputId="20258b25-5ba6-4e10-84b0-bc78504f1458"
output = template.render(just=conf)
print(output)
# + id="DBS_2EgZskx3"
with open("conf", "w+") as f:
f.write(output)
| jinja2conf.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# + _cell_guid="b1076dfc-b9ad-4769-8c92-a6c4dae69d19" _uuid="8f2839f25d086af736a60e9eeb907d3b93b6e0e5"
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
# %matplotlib inline
pd.options.display.float_format = '{:,}'.format
import warnings
warnings.filterwarnings('ignore')
# + _cell_guid="79c7e3d0-c299-4dcb-8224-4455121ee9b0" _uuid="d629ff2d2480ee46fbb7e2d37f6b5fab8052498a"
df = pd.read_csv('Military Expenditure.csv', index_col =[0])
df_with_index = pd.read_csv('Military Expenditure.csv')
# -
df.isnull().sum()
df.info()
df.shape
graph_data = df[df['Type'] == 'Country'].sort_values(by='2018',ascending=False)[['2018']][:10]
graph_data
plt.plot(graph_data["2018"],c ='Y')
plt.xticks(rotation =45)
df_india = df_with_index[df_with_index["Name"] == "India"]
df_ind =df_india.drop(["Indicator Name","Code", "Type"], axis =1)
df_ind_graph =df_ind.melt(id_vars=["Name"],
var_name=["Date"],
value_name="Expend").sort_values(by = 'Expend',ascending = False)
df_ind_graph["Date"] = pd.to_datetime(df_ind_graph["Date"])
plt.plot(df_ind_graph['Date'],df_ind_graph['Expend'],c='r')
plt.xlabel("Year")
plt.ylabel("Increase Expend")
plt.title("1963 to 2018 Military Expend increse \n Year wise")
'''
conclusion
Military expediture Budget increase little bit at 1963 due to india-china after that it was increase in 90s and after 2003
it increses so fast.
'''
| Military Expend 1947 to 2018( India) compare to world/India 1963 to 2018 Review.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# # A2 - Bias in Data
#
# The goal of this assignment is to explore the concept of bias through data on Wikipedia articles - specifically, articles on political figures from a variety of countries. For this assignment, we will combine a dataset of Wikipedia articles with a dataset of country populations, and use a machine learning service called ORES to estimate the quality of each article.
#
# We will then perform an analysis of how the coverage of politicians on Wikipedia and the quality of articles about politicians varies between countries. The analysis will consist of a series of tables that show:
#
# 1. The countries with the greatest and least coverage of politicians on Wikipedia compared to their population.
# 2. The countries with the highest and lowest proportion of high quality articles about politicians.
# 3. A ranking of geographic regions by articles-per-person and proportion of high quality articles.
#
# ### Step 1: Getting the Article and Population Data
#
# The first step is getting the data. The Wikipedia [politicians by country dataset](https://figshare.com/articles/Untitled_Item/5513449) can be found on Figshare. Here it is called page_data.csv.
# The population data is available is called WPDS_2020_data.csv. Here it is called WPDS_df. This dataset is drawn from the [world population data sheet](https://www.prb.org/international/indicator/population/table/) published by the Population Reference Bureau.
#
# Our analysis will also use score estimates generated from ORES. You must `pip install ores` prior to running this notebook, or follow the [installation instructions](https://github.com/wikimedia/ores).
import pandas as pd
import numpy as np
from ores import api
from tqdm import tqdm
page_data_path = '../data/page_data.csv'
WPDS_path = '../data/WPDS_2020_data.csv'
# ### Step 2: Cleaning the Data
#
# Both page_df and WPDS_df contain some rows we will need to filter out or ignore. We will clean the datasets here.
# Filter out any rows that begin with 'Template:'. These are not Wikipedia articles.
page_df = pd.read_csv(page_data_path)
page_df = page_df.loc[~page_df['page'].str.contains('Template:')]
page_df
# Here we add a column to the WPDS_df for 'Region' as well as 'region_population' so we have a way to associate each country with its region. Then we separate the regions and countries into separate dfs.
WPDS_df = pd.read_csv(WPDS_path)
WPDS_df
# +
# Adding the sub-region and region_population to WPDS_df.
region = ('NORTHERN AFRICA', 244344000)
regions = [('WORLD', 7772850000) , ('AFRICA', 1337918000), ('NORTHERN AFRICA', 244344000)]
for i in range(3, len(WPDS_df)):
if WPDS_df.iloc[i]['Type'] == 'Sub-Region':
region = (WPDS_df.iloc[i]['Name'], WPDS_df.iloc[i]['Population'])
regions.append(region)
regions_tuples_df = pd.DataFrame(regions, columns=['Region', 'region_population'])
WPDS_df = pd.concat([WPDS_df, regions_tuples_df], axis=1)
WPDS_df
# -
# Separate all UPPERCASE entries from lowercase ones. UPPERCASE names are regions and lowercase are countries.
regions_df = WPDS_df.loc[WPDS_df.Name.str.isupper() == True]
countries_df = WPDS_df.loc[WPDS_df.Name.str.isupper() == False]
regions_df.head(10)
countries_df
# ### Step 3: Getting Article Quality Predictions
# Now we need to get the predicted quality scores for each article in the Wikipedia dataset. We're using a machine learning system called ORES. This was originally an acronym for "Objective Revision Evaluation Service" but was simply renamed “ORES”. ORES is a machine learning tool that can provide estimates of Wikipedia article quality. The article quality estimates are, from best to worst:
#
# 1. FA - Featured article
# 2. GA - Good article
# 3. B - B-class article
# 4. C - C-class article
# 5. Start - Start-class article
# 6. Stub - Stub-class article
#
# These were learned based on articles in Wikipedia that were peer-reviewed using the [Wikipedia content assessment procedures](https://en.wikipedia.org/wiki/Wikipedia:Content_assessment).These quality classes are a sub-set of quality assessment categories developed by Wikipedia editors. ORES will assign one of these 6 categories to any rev_id we send it.
#
# To get the score estimates, we will build a list of all the rev_ids in page_df and feed them one at a time to ORES. Each query will return a generator object which we will collect in a list called 'results'.
# +
ores_session = api.Session("https://ores.wikimedia.org", "DATA 512 Class project <<EMAIL>>")
revids = list(page_df.rev_id)
results = []
for revid in revids:
results.append(ores_session.score("enwiki", ["articlequality"], [revid]))
# -
# Empty list to population with (rev_id, score) tuples.
scores = []
# Here is an example of one of the generator objects that is stored in 'results'. **We will only be concerned with the 'prediction' field.**
for score in results[0]:
print(score)
# This cell populates the list `scores=[(rev_id, score)]` which stores a tuple for each rev_id, score pair. We will then convert the list `scores` to a dataframe and finally merge it with `page_df` using rev_id as the key so that each article has a score.
for i in tqdm(range(len(results))):
for score in results[i]:
if 'error' in list(score['articlequality'].keys()):
scores.append((revids[i], np.nan))
else:
scores.append((revids[i], score['articlequality']['score']['prediction']))
# Convert scores which is a list of tuples to a dataframe
scores_df = pd.DataFrame(scores, columns=['rev_id', 'score'])
scores_df
# scores_df.to_csv('../data/scores_df.csv')
scores_df = pd.read_csv('../data/scores_df.csv')
# Here we merge the scores_df with the page_df on rev_id. We use a left merge to retain all the rows of page_df. Then we will separate all the articles that ORES was unable to determine a score (score='NaN') from the articles with valid scores.
page_df = page_df.merge(scores_df, how='left', left_on='rev_id', right_on='rev_id')
page_df
nan_scores_df = page_df[page_df['score'].isna()]
articles_df = page_df[~page_df['score'].isna()]
# There are 277 articles for which ORES was unable to determine a score. We will export the dataframe containing those rows to a file called `nan_scores_df.csv`.
nan_scores_df.shape
nan_scores_df.to_csv('../data/nan_scores_df.csv')
# ### Step 4: Combining the Datasets
#
# We need to merge the Wikipedia data and population data together. Both have fields containing country names which we will use for the merge. After merging the data, we will find that some entries could not be merged. Either the population dataset does not have an entry for the equivalent Wikipedia country, or vise versa.
#
# We will use an outer merge to retain all rows from both dataframes. Then we will remove any rows that are missing article, country, or score. We will save them to a CSV file called: `wp_wpds_countries-no_match.csv`
#
# The remaining data will be consolidated into a single CSV file called: `wp_wpds_politicians_by_country.csv`.
#
# The schema for that file looks like this:
#
# | Column |
# |---------------------|
# | country |
# | article_name |
# | revision_id |
# | article_quality_est |
# | population |
# | region |
# | region_population |
# +
merged_df = articles_df.merge(countries_df, how='outer', left_on='country', right_on='Name')
merged_df
# +
# Every row that is missing a score, Name, or page will be consolidated into no_matches_df.
no_matches_df = merged_df.loc[(merged_df.score.isna()) | (merged_df.Name.isna()) | (merged_df.page.isna())]
# After dropping the no_matches, the remaining rows are valid to use for our analysis.
merged_df = merged_df.drop(index=no_matches_df.index)
merged_df = merged_df.drop(columns=['Name', 'Type', 'FIPS', 'TimeFrame', 'Data (M)', 'Unnamed: 0']).rename(columns={'page': 'article_name', 'rev_id': 'revision_id', 'score': 'article_quality_est', 'Population': 'population', 'Region': 'region'})
merged_df
# -
no_matches_df.to_csv('../data/wp_wpds_countries-no_match.csv')
merged_df.to_csv('../data/wp_wpds_politicians_by_country.csv')
# ### Step 5: Analysis
#
# The analysis will consist of calculating the proportion (as a percentage) of articles-per-population and high-quality articles for each country AND for each geographic region. By "high quality" articles, in this case we mean the number of articles about politicians in a given country that ORES predicted would be in either the "FA" (featured article) or "GA" (good article) classes.
#
# **Examples:**
# - If a country has a population of 10,000 people, and you found 10 articles about politicians from that country, then the percentage of articles-per-population would be .1%.
# - If a country has 10 articles about politicians, and 2 of them are FA or GA class articles, then the percentage of high-quality articles would be 20%.
#
# For the country-level analysis, we will begin by using groupby on country and article_quality_est so that we have a count of the number of articles of each level for each country. We also want to retain the country's population.
# +
groupby_country_df = merged_df.groupby(['country', 'article_quality_est']).agg({'revision_id': 'count', 'population': 'first'})
groupby_country_df
# -
# We have 183 countries represented in our final dataset that have at least one Wikipedia article with an estimated score.
len(merged_df.country.unique())
# Here we calculate the percentage of FA or GA articles per population and the percentage of high quality articles per total number of articles for that country. We will collect the results in results_by_country_df.
# +
data_by_country = []
countries = merged_df.country.unique()
for country in countries:
if (groupby_country_df.index.isin([(country, 'FA')]).any()) | (groupby_country_df.index.isin([(country, 'GA')]).any()):
high_articles_sum = groupby_country_df.loc[(country, ['FA', 'GA']), :].revision_id.sum()
else:
high_articles_sum = 0
articles_sum = groupby_country_df.loc[(country, slice(None)), :].revision_id.sum()
country_population = groupby_country_df.loc[(country, slice(None)), :].population[0]
articles_per_pop = ( articles_sum/country_population ) * 100
high_quality = ( high_articles_sum/articles_sum ) * 100
data_by_country.append([country, articles_per_pop, high_quality])
results_by_country_df = pd.DataFrame(data_by_country, columns=['country', 'articles_per_pop', 'high_quality'])
results_by_country_df
# -
# Now we perform a similar aggregation and calculation for the regions. The region-level statistics will be stored in results_by_region_df.
# +
groupby_region_df = merged_df.groupby(['region', 'article_quality_est']).agg({'revision_id': 'count', 'region_population': 'first'})
groupby_region_df
# -
# We have 19 unique regions.
len(merged_df.region.unique())
# +
data_by_region = []
regions = merged_df.region.unique()
for region in regions:
if (groupby_region_df.index.isin([(region, 'FA')]).any()) | (groupby_region_df.index.isin([(region, 'GA')]).any()):
high_articles_sum = groupby_region_df.loc[(region, ['FA', 'GA']), :].revision_id.sum()
else:
high_articles_sum = 0
articles_sum = groupby_region_df.loc[(region, slice(None)), :].revision_id.sum()
region_population = groupby_region_df.loc[(region, slice(None)), :].region_population[0]
articles_per_pop = ( articles_sum/region_population ) * 100
high_quality = ( high_articles_sum/articles_sum ) * 100
data_by_region.append([region, articles_per_pop, high_quality])
results_by_region_df = pd.DataFrame(data_by_region, columns=['region', 'articles_per_pop', 'high_quality'])
results_by_region_df
# -
# ### Step 6: Results
# Below is a summary of the results:
#
# #### 1. Top 10 countries by coverage: 10 highest-ranked countries in terms of number of politician articles as a proportion of country population
results_by_country_df.nlargest(10, 'articles_per_pop', keep='all')[['country', 'articles_per_pop']]
# #### 2. Bottom 10 countries by coverage: 10 lowest-ranked countries in terms of number of politician articles as a proportion of country population
results_by_country_df.nsmallest(10, 'articles_per_pop', keep='all')[['country', 'articles_per_pop']].head(10)
# #### 3. Top 10 countries by relative quality: 10 highest-ranked countries in terms of the relative proportion of politician articles that are of GA and FA-quality
results_by_country_df.nlargest(10, 'high_quality', keep='all')[['country', 'high_quality']]
# #### 4. Bottom 10 countries by relative quality: 10 lowest-ranked countries in terms of the relative proportion of politician articles that are of GA and FA-quality
results_by_country_df.nsmallest(10, 'high_quality', keep='all')[['country', 'high_quality']].head(10)
# #### 5. Geographic regions by coverage: Ranking of geographic regions (in descending order) in terms of the total count of politician articles from countries in each region as a proportion of total regional population
results_by_region_df.sort_values('articles_per_pop', ascending=False)[['region', 'articles_per_pop']]
# #### 6. Geographic regions by coverage: Ranking of geographic regions (in descending order) in terms of the relative proportion of politician articles from countries in each region that are of GA and FA-quality
results_by_region_df.sort_values('high_quality', ascending=False)[['region', 'high_quality']]
| src/hcds-a2-bias.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 2
# language: python
# name: python2
# ---
# # Test PDPbox with binary classification problem
# ## Kaggle Titanic with Logistic Regression
# This demo is based on **Titanic: Machine Learning from Disaster**
# https://www.kaggle.com/c/titanic/details/tutorials
# +
import pandas as pd
import numpy as np
import os
# %matplotlib inline
# -
# ### read dataset
titanic = pd.read_csv('titanic_train.csv')
# ### simple feature engineering
# #### impute null values
titanic["Age"] = titanic["Age"].fillna(titanic["Age"].dropna().median())
titanic["Embarked"] = titanic["Embarked"].fillna("S")
# #### handle categrical features
titanic['Sex'] = titanic['Sex'].apply(lambda x : 1 if x == 'male' else 0)
titanic = pd.get_dummies(titanic, columns=['Embarked'])
# #### features to used
features = ['Pclass', 'Sex', 'Age', 'SibSp', 'Parch', 'Fare', 'Embarked_C', 'Embarked_Q', 'Embarked_S']
import sys
sys.path.insert(0, '../../')
# ## Test logistic regression with PDPbox
from sklearn.linear_model import LogisticRegression
from pdpbox import pdp
lr = LogisticRegression(C=1.0, random_state=123, n_jobs=4)
# %time lr.fit(titanic[features], titanic['Survived'])
# ## binary feature: Sex
pdp_sex_lr = pdp.pdp_isolate(lr, titanic[features], 'Sex')
pdp.pdp_plot(pdp_sex_lr, 'sex', plot_org_pts=True, plot_lines=True, frac_to_plot=1)
# ## numeric feature: Fare
pdp_fare_lr = pdp.pdp_isolate(lr, titanic[features], 'Fare', num_grid_points=20)
pdp.pdp_plot(pdp_fare_lr, 'Fare', plot_org_pts=True, frac_to_plot=0.5, plot_lines=True)
pdp.pdp_plot(pdp_fare_lr, 'Fare', plot_org_pts=True, frac_to_plot=0.5, plot_lines=True, cluster=True, n_cluster_centers=10)
pdp.pdp_plot(pdp_fare_lr, 'Fare', plot_org_pts=True, frac_to_plot=0.5, plot_lines=True, x_quantile=True)
# ## one-hot encoding feature: Embarked
pdp_embark_lr = pdp.pdp_isolate(lr, titanic[features], ['Embarked_C', 'Embarked_S', 'Embarked_Q'])
pdp.pdp_plot(pdp_embark_lr, 'Embark', plot_org_pts=True, plot_lines=True, frac_to_plot=100)
# ### test interaction between age and fare
inter_lr = pdp.pdp_interact(lr, titanic[features], ['Age', 'Fare'], num_grid_points=[10, 10], percentile_ranges=[None, None])
pdp.pdp_interact_plot(inter_lr, ['age', 'fare'], center=True, plot_org_pts=True, plot_lines=True, frac_to_plot=0.5, x_quantile=True)
| test/Binary classification/titanic_lr.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Courte introduction au machine learning
#
# Le jeu de données [Wine Quality Data Set](https://archive.ics.uci.edu/ml/datasets/Wine+Quality) recense les composants chimiques de vins ainsi que la note d'experts. Peut-on prédire cette note à partir des composants chimiques ? Peut-être que si on arrive à construire une fonction qui permet de prédire cette note, on pourra comprendre comment l'expert note les vins.
from jyquickhelper import add_notebook_menu
add_notebook_menu()
# %matplotlib inline
# ## Données et première régression linéaire
#
# On peut utiliser la fonction implémentée dans ce module.
from papierstat.datasets import load_wines_dataset
df = load_wines_dataset()
df["color2"] = 0
df.loc[df["color"] == "white", "color2"] = 1
df["color"] = df["color2"]
df = df.drop('color2', axis=1)
df.head()
# Ou on peut aussi récupérer les données depuis le site et former les mêmes données.
# +
# import pandas
# df_red = pandas.read_csv('winequality-red.csv', sep=';')
# df_red['color'] = 0
# df_white = pandas.read_csv('winequality-white.csv', sep=';')
# df_white['color'] = 1
# df = pandas.concat([df_red, df_white])
# df.shape, df_red.shape, df_white.shape
# -
df.describe().T
# J'ai tendance à utiliser ``df`` partout quitte à ce que le premier soit écrasé. Conservons-le dans une variable à part.
df_data = df
# Quelle est la distribution des notes ?
df['quality'].hist();
# Les notes pour les blancs et les rouges.
import matplotlib.pyplot as plt
fig, ax = plt.subplots(1, 2, figsize=(12, 3))
df[df['color'] == 0]['quality'].hist(ax=ax[0])
df[df['color'] == 1]['quality'].hist(ax=ax[1])
ax[0].set_title('rouge')
ax[1].set_title('blanc');
# On construit le jeu de données. D'un côté, ce qu'on sait - les features X -, d'un autre ce qu'on cherche à prédire.
df.columns
X = df.drop("quality", axis=1)
y = df['quality']
# On divise en apprentissage / test puisqu'il est de coutume d'apprendre sur des données et de vérifier les prédictions sur un autre.
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X, y, random_state=42)
# On cale un premier modèle, une régression linéaire.
from sklearn.linear_model import LinearRegression
clr = LinearRegression()
clr.fit(X_train, y_train)
# On récupère les coefficients.
clr.coef_
clr.intercept_
# Puis on calcule le coefficient $R^2$.
from sklearn.metrics import r2_score
pred = clr.predict(X_test)
r2_score(y_test, pred)
# Ou l'erreur moyenne en valeur absolue.
from sklearn.metrics import mean_absolute_error
mean_absolute_error(y_test, clr.predict(X_test))
# Le modèle se trompe en moyenne d'un demi-point pour la note.
# ## Arbre de régression
#
# Voyons ce qu'un arbre de régression peut faire.
from sklearn.tree import DecisionTreeRegressor
dt = DecisionTreeRegressor(min_samples_leaf=10)
dt.fit(X_train, y_train)
r2_score(y_test, dt.predict(X_test))
# L'arbre de régression révèle l'intérêt d'avoir une base d'apprentissage et de test puisque ce modèle peut répliquer à l'identique les données sur lequel le modèle a été estimé. A contrario, sur la base de test, les performances en prédiction sont plutôt mauvaise.
r2_score(y_train, dt.predict(X_train))
# Pour éviter cela, on joue avec le paramètre *min_smaple_leaf*. Il signifie qu'une prédiction de l'arbre de régression est une moyenne d'au moins *min_sample_leaf* notes tirées de le base d'apprentissage. Il y a beaucoup moins de chance que cela aboutisse à du sur apprentissage.
import pandas
from sklearn.ensemble import RandomForestRegressor
from tqdm import tqdm
res = []
for i in tqdm(range(1, 50)):
dt = DecisionTreeRegressor(min_samples_leaf=i)
reg = LinearRegression()
dt.fit(X_train, y_train)
reg.fit(X_train, y_train)
r = {
'minl': i,
'r2_train_dt': r2_score(y_train, dt.predict(X_train)),
'r2_test_dt': r2_score(y_test, dt.predict(X_test)),
'r2_train_reg': r2_score(y_train, reg.predict(X_train)),
'r2_test_reg': r2_score(y_test, reg.predict(X_test)),
}
res.append(r)
df = pandas.DataFrame(res)
df.head(2)
df.plot(x="minl", y=["r2_train_dt", "r2_test_dt",
"r2_train_reg", "r2_test_reg"]);
# On voit que la performance sur la base de test augmente rapidement puis stagne sans jamais rattraper celle de la base d'apprentissage. Elle ne dépasse pas celle d'un modèle linéaire ce qui est décevant. Essayons avec une forêt aléatoire.
# ## Forêt aléatoire
import pandas
from sklearn.ensemble import RandomForestRegressor
from tqdm import tqdm
res = []
for i in tqdm(range(1, 50, 2)):
dt = DecisionTreeRegressor(min_samples_leaf=i)
reg = LinearRegression()
rf = RandomForestRegressor(n_estimators=25, min_samples_leaf=i)
dt.fit(X_train, y_train)
reg.fit(X_train, y_train)
rf.fit(X_train, y_train)
r = {
'minl': i,
'r2_train_dt': r2_score(y_train, dt.predict(X_train)),
'r2_test_dt': r2_score(y_test, dt.predict(X_test)),
'r2_train_reg': r2_score(y_train, reg.predict(X_train)),
'r2_test_reg': r2_score(y_test, reg.predict(X_test)),
'r2_train_rf': r2_score(y_train, rf.predict(X_train)),
'r2_test_rf': r2_score(y_test, rf.predict(X_test)),
}
res.append(r)
df = pandas.DataFrame(res)
df.head(2)
df.plot(x="minl", y=["r2_train_dt", "r2_test_dt",
"r2_train_reg", "r2_test_reg",
"r2_train_rf", "r2_test_rf"]);
# A l'inverse de l'arbre de régression, la forêt aléatoire est meilleure lorsque ce paramètre est petit. Une forêt est une moyenne de modèle, chacun appris sur un sous-échantillon du jeu de données initiale. Même si un arbre apprend par coeur, il est peu probable que son voisin ait appris le même sous-échantillon. En faisant la moyenne, on fait un compromis.
# ## Validation croisée
#
# Il reste à vérifier que le modèle est robuste. C'est l'objet de la validation croisée qui découpe le jeu de données en 5 parties, apprend sur 4, teste une 1 puis recommence 5 fois en faisant varier la partie qui sert à tester.
from sklearn.model_selection import cross_val_score
cross_val_score(
RandomForestRegressor(n_estimators=25), X, y, cv=5,
verbose=1)
# Ce résultat doit vous interrompre car les performances sont loin d'être stables. Deux options : soit le modèle n'est pas robuste, soit la méthodologie est fausse quelque part. Comme le problème est assez simple, il est probable que ce soit la seconde option : la jeu de données est triée. Les vins rouges d'abord, les blancs ensuite. Il est possible que la validation croisée estime un modèle sur des vins rouges et l'appliquent à des vins blancs. Cela ne marche pas visiblement. Cela veut dire aussi que les vins blancs et rouges sont très différents et que la couleur est probablement une information redondante avec les autres. Mélangeons les données au hasard.
from sklearn.utils import shuffle
X2, y2 = shuffle(X, y)
cross_val_score(
RandomForestRegressor(n_estimators=25), X2, y2, cv=5,
verbose=1)
# Beaucoup mieux. On peut faire comme ça aussi.
from sklearn.model_selection import ShuffleSplit
cross_val_score(
RandomForestRegressor(n_estimators=25), X, y, cv=ShuffleSplit(5),
verbose=1)
# ## Pipeline
#
# On peut caler un modèle après une ACP mais il faut bien se souvenir de toutes les étapes intermédiaires avant de prédire avec le modèle final.
from sklearn.decomposition import PCA
pca = PCA(6)
pca.fit(X_train, y_train)
rf = RandomForestRegressor(n_estimators=100)
X_train_pca = pca.transform(X_train)
rf.fit(X_train_pca, y_train)
X_test_pca = pca.transform(X_test)
pred = rf.predict(X_test_pca)
r2_score(y_test, pred)
# Ou alors on utilise le concept de *pipeline* qui permet d'assembler les prétraitements et le modèle prédictif sous la forme d'une séquence de traitement qui devient le modèle unique.
from sklearn.pipeline import Pipeline
pipe = Pipeline([
('acp', PCA(n_components=6)),
('rf', RandomForestRegressor(n_estimators=100))
])
pipe.fit(X_train, y_train);
# ## Grille de recherche
#
# De cette façon, on peut chercher simplement les meilleurs hyperparamètres du modèle.
from sklearn.model_selection import GridSearchCV
param_grid = {'acp__n_components': list(range(1, 11, 3)),
'rf__n_estimators': [10, 20, 50]}
grid = GridSearchCV(pipe, param_grid=param_grid, verbose=1,
cv=ShuffleSplit(3))
grid.fit(X, y)
grid.best_params_
grid.predict(X_test)
r2_score(y_test, grid.predict(X_test))
# Ce nombre paraît beaucoup trop beau pour être vrai. Cela signifie sans doute que les données de test ont été utilisés pour effectuer la recherche.
grid.best_score_
# Nettement plus plausible.
# ## Enregistrer, restaurer
#
# Le moyen le plus simple de conserver les modèles en python est de les sérialiser : on copie la mémoire sur disque puis on la restaure plus tard.
# +
import pickle
with open('piperf.pickle', 'wb') as f:
pickle.dump(grid, f)
# -
import glob
glob.glob('*.pickle')
with open("piperf.pickle", 'rb') as f:
grid2 = pickle.load(f)
grid2.predict(X_test)
# ## Prédiction de la couleur
#
# Le fait que la première validation croisée échoue était un signe que la couleur était facilement prévisible. Vérifions.
Xc = df_data.drop(['quality', 'color'], axis=1)
yc = df_data["color"]
Xc_train, Xc_test, yc_train, yc_test = train_test_split(Xc, yc)
from sklearn.linear_model import LogisticRegression
log = LogisticRegression(solver='lbfgs', max_iter=1500)
log.fit(Xc_train, yc_train);
from sklearn.metrics import log_loss
log_loss(yc_test, log.predict_proba(Xc_test))
from sklearn.metrics import confusion_matrix
confusion_matrix(yc_test, log.predict(Xc_test))
# La matrice de confusion est plutôt explicite.
| _doc/notebooks/encours/2020-01-20_intro.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3 (enron)
# language: python
# name: enron
# ---
import json
import numpy as np
import scipy
from scipy import stats
import matplotlib.pyplot as plt
from collections import defaultdict
Names = {"10": "Article", "12": "Aux. verb", "18": "Conjunction", "16": "Adverb", \
"3": "Pers. pron.", "9": "Indef. pron.", "17": "Preposition", "20": "Quantifier"}
list(Names.values())
markers = []
for a in range(3):
markers.append("Aggregated %d"%(a+1))
for x in ['Quantifier','Conjunction','Indef. pron.',\
'Adverb','Aux. verb','Article','Pers. pron.',\
'Preposition']:
markers.append(x)
def bootstrap_resample(X, n=None):
""" Bootstrap resample an array_like
Parameters
----------
X : array_like
data to resample
n : int, optional
length of resampled array, equal to len(X) if n==None
Results
-------
returns X_resamples
"""
if n == None:
n = len(X)
resample_i = list(np.floor(np.random.rand(n)*len(X)).astype(int))
X_resample = np.array(X)[resample_i]
return X_resample
def get_data(where):
with open("data/coordination_on_%s.json"%(where), "r") as f:
c_o = json.load(f)
markers = c_o.keys()
all_users = len(c_o['18'])
average = {}
ret = {}
for m in markers:
average[m] = np.mean([c for c in c_o[m] if c])
print(m, np.mean([c for c in c_o[m] if c]), len(c_o[m]))
ret[Names[m]] = [c for c in c_o[m] if c]
aggregated = [[], [], []]
for x in range(all_users):
cur = []
cur_2 = []
cur_3 = []
valid = 1
for m in markers:
if c_o[m][x] == None:
valid = 0
cur_2.append(average[m])
else:
cur.append(c_o[m][x])
cur_2.append(c_o[m][x])
cur_3.append(c_o[m][x])
if valid:
aggregated[0].append(np.mean(cur))
if len(cur) > 0:
aggregated[1].append(np.mean(cur_2))
aggregated[2].append(np.mean(cur_3))
for a in range(3):
ret['Aggregated %d'%(a+1)] = aggregated[a]
print(a, np.mean(aggregated[a]), len(aggregated[a]))
return ret
def get_data(where, what):
coordination = defaultdict(list)
utterances = defaultdict(int)
target_and_speaker = {m: defaultdict(int) for m in Names.keys()}
speaker = {m: defaultdict(int) for m in Names.keys()}
target = {m: defaultdict(int) for m in Names.keys()}
with open("data/%s_coord.json"%(where), "r") as f:
for line in f:
data = json.loads(line)
if not(data['page_title'].startswith(what)):
continue
user = data['user_text']
for m in Names.keys():
t,s = data[m]
ts = int(t and s)
target_and_speaker[m][user] += ts
target[m][user] += t
speaker[m][user] += s
utterances[user] += 1
for user in utterances.keys():
for m in Names.keys():
try:
assert(speaker[m][user] >= 3)
tmp = target_and_speaker[m][user] / target[m][user] - speaker[m][user] / utterances[user]
except:
tmp = None
coordination[m].append(tmp)
markers = Names.keys()
all_users = len(utterances.keys())
average = {}
ret = {}
for m in markers:
average[m] = np.mean([c for c in coordination[m] if c])
print(m, np.mean([c for c in coordination[m] if c]), len(coordination[m]))
ret[Names[m]] = [c for c in coordination[m] if c]
aggregated = [[], [], []]
user_list = [[], []]
for x in range(all_users):
cur = []
cur_2 = []
cur_3 = []
valid = 1
for m in markers:
if coordination[m][x] == None:
valid = 0
cur_2.append(average[m])
else:
cur.append(coordination[m][x])
cur_2.append(coordination[m][x])
cur_3.append(coordination[m][x])
if valid:
aggregated[0].append(np.mean(cur))
user_list[0].append(x)
if len(cur) > 0:
aggregated[1].append(np.mean(cur_2))
aggregated[2].append(np.mean(cur_3))
user_list[1].append(x)
for a in range(3):
ret['Aggregated %d'%(a+1)] = aggregated[a]
print(a, np.mean(aggregated[a]), len(aggregated[a]))
return ret, user_list[0], user_list[1]
data = {}
users = {"own":[[], []], "others_user":[[], []], "others_article":[[], []]}
data["own"], users["own"][0], users["own"][1] = get_data("own_page", "User talk")
data["others_user"], users["others_user"][0], users["others_user"][1] = get_data("reply_on_others", "User talk")
data["others_article"], users['others_article'][0], users["others_article"][1] = get_data("reply_on_others", "Talk")
all_users = [[], []]
for r in [0, 1]:
all_users[r] = list(set(users["own"][r])&set(users["others_user"][r])&set(users["others_article"][r]))
def get_data(where, what, who):
coordination = defaultdict(list)
utterances = defaultdict(int)
target_and_speaker = {m: defaultdict(int) for m in Names.keys()}
speaker = {m: defaultdict(int) for m in Names.keys()}
target = {m: defaultdict(int) for m in Names.keys()}
with open("data/%s_coord.json"%(where), "r") as f:
for line in f:
data = json.loads(line)
if not(data['page_title'].startswith(what)):
continue
user = data['user_text']
for m in Names.keys():
t,s = data[m]
ts = int(t and s)
target_and_speaker[m][user] += ts
target[m][user] += t
speaker[m][user] += s
utterances[user] += 1
for user in utterances.keys():
for m in Names.keys():
try:
assert(speaker[m][user] >= 3)
tmp = target_and_speaker[m][user] / target[m][user] - speaker[m][user] / utterances[user]
except:
tmp = None
coordination[m].append(tmp)
markers = Names.keys()
all_users = len(utterances.keys())
average = {}
ret = {}
for m in markers:
average[m] = np.mean([c for c in coordination[m] if c])
print(m, np.mean([c for c in coordination[m] if c]), len(coordination[m]))
ret[Names[m]] = [c for c in coordination[m] if c]
aggregated = [[], [], []]
user_list = [[], []]
for x in range(all_users):
cur = []
cur_2 = []
cur_3 = []
valid = 1
for m in markers:
if coordination[m][x] == None:
valid = 0
cur_2.append(average[m])
else:
cur.append(coordination[m][x])
cur_2.append(coordination[m][x])
cur_3.append(coordination[m][x])
if valid and (x in who[0]):
aggregated[0].append(np.mean(cur))
user_list[0].append(x)
if len(cur) > 0 and (x in who[1]):
aggregated[1].append(np.mean(cur_2))
aggregated[2].append(np.mean(cur_3))
user_list[1].append(x)
for a in range(3):
ret['Aggregated %d'%(a+1)] = aggregated[a]
print(a, np.mean(aggregated[a]), len(aggregated[a]))
return ret
data = {}
data["own"] = get_data("own_page", "User talk", all_users)
data["others_user"] = get_data("reply_on_others", "User talk", all_users)
data["others_article"] = get_data("reply_on_others", "Talk", all_users)
display_name = []
for ind, m in enumerate(markers):
s, p = scipy.stats.f_oneway(data["own"][m], data["others_user"][m], data["others_article"][m])
print(m, s, p)
n = m
#if p < 0.05:
# n += "*"
#if p < 0.01:
# n += "*"
#if p < 0.001:
# n += "*"
display_name.append(n)
# +
f = plt.figure(figsize=(2,15))
N = len(markers[:3])
all_data = {}
err = {}
for x in ["own", "others_article", "others_user"]:
all_data[x] = [np.mean(data[x][m])*100 for m in markers][:3]
err[x] = [[], []]
resample = 20
for ind,m in enumerate(markers[:3]):
lst = {}
for x in ["own", "others_article", "others_user"]:
lst[x] = []
for r in range(resample):
tmp = bootstrap_resample(data[x][m])
tmp = np.mean(tmp)*100
lst[x].append(tmp)
l, u = scipy.stats.t.interval(0.95, len(lst[x])-1, loc=np.mean(lst[x]), scale=scipy.stats.sem(lst[x]))
err[x][0].append(all_data[x][ind]-l)
err[x][1].append(u - all_data[x][ind])
ind = [np.arange(N)] # the x locations for the groups
width = 0.3 # the width of the bars
ind.append(ind[0] + width)
ind.append(ind[1] + width)
color = ["r", "g", "b"]
fig, ax = plt.subplots(figsize=(6, 2))
rect = []
for i, x in enumerate(["own", "others_article", "others_user"]):
rect.append(ax.bar(ind[i], all_data[x], width, color=color[i], yerr=err[x], alpha=0.3))
# add some text for labels, title and axes ticks
ax.set_ylabel('Coordination Value\n(%s 100)'%(r"$\times$"))
ax.set_xticks(ind[1])
ax.set_xticklabels(display_name)
ax.legend((rect[0][0], rect[1][0], rect[2][0]), ('Self user talk page', 'Article talk page', \
'Other user talk page'), bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
mode="expand", borderaxespad=0., ncol=2)
plt.show()
fig.savefig("figs/coordination.pdf", bbox_inches='tight')
# -
import pandas as pd
import matplotlib.ticker as mtick
df = pd.DataFrame({"Toxic": [28, 4, 6, 9, 4], "Normal": [3, 3, 5, 8, 3], "Severe Toxic": [86, 1, 1, 1, 1]})
ax = df.T.plot(kind='barh', stacked=True, figsize=(6, 2), alpha=0.5, edgecolor='none', linewidth=0)
plt.legend(["In a day", "In a week", "In a month", "In a year", "After one year"], bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
ncol=3, mode="expand", borderaxespad=0.)
fmt = '%.0f%%' # Format you want the ticks, e.g. '40%'
xticks = mtick.FormatStrFormatter(fmt)
ax.xaxis.set_major_formatter(xticks)
plt.grid(True)
plt.xlim((0, 100))
plt.savefig("toxic-deletion.pdf", bbox_inches='tight')
# +
N = len(markers[:3])
all_data = {}
err = {}
for x in ["own", "others_article", "others_user"]:
all_data[x] = [np.mean(data[x][m])*100 for m in markers][:3]
err[x] = [[], []]
resample = 20
for ind,m in enumerate(markers[:3]):
lst = {}
for x in ["own", "others_article", "others_user"]:
lst[x] = []
for r in range(resample):
tmp = bootstrap_resample(data[x][m])
tmp = np.mean(tmp)*100
lst[x].append(tmp)
l, u = scipy.stats.t.interval(0.95, len(lst[x])-1, loc=np.mean(lst[x]), scale=scipy.stats.sem(lst[x]))
err[x][0].append(all_data[x][ind]-l)
err[x][1].append(u - all_data[x][ind])
ind = [np.arange(N)] # the x locations for the groups
width = 0.3 # the width of the bars
ind.append(ind[0] + width)
ind.append(ind[1] + width)
color = ["r", "g", "b"]
fig, axes = plt.subplots(nrows=1, ncols=2)
#fig, ax = plt.subplots(figsize=(6, 2))
ax = axes[0]
rect = []
for i, x in enumerate(["own", "others_article", "others_user"]):
rect.append(ax.bar(ind[i], all_data[x], width, color=color[i], yerr=err[x], alpha=0.3))
# add some text for labels, title and axes ticks
ax.set_ylabel('Coordination Value\n(%s 100)'%(r"$\times$"))
ax.set_xticks(ind[1])
ax.set_xticklabels(["Aggregated 1\n on 2,425 editors", "Aggregated 2\n on 3,130 editors", "Aggregated 3\non 3,130 editors"])
ax.legend((rect[0][0], rect[1][0], rect[2][0]), ('Self user talk page', 'Article talk page', \
'Other user talk page'), bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
mode="expand", borderaxespad=0., ncol=2)
df = pd.DataFrame({"Toxic": [28, 4, 6, 9, 4], "Normal": [3, 3, 5, 8, 3], "Severe\nToxic": [86, 1, 1, 1, 1]})
ax = df.T.plot(kind='barh', stacked=True, figsize=(12, 2), alpha=0.5, edgecolor='none', linewidth=0, ax=axes[1])
ax.legend(["In a day", "In a week", "In a month", "In a year", "After one year"], bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
ncol=3, mode="expand", borderaxespad=0.)
fmt = '%.0f%%' # Format you want the ticks, e.g. '40%'
xticks = mtick.FormatStrFormatter(fmt)
ax.xaxis.set_major_formatter(xticks)
ax.grid(True)
ax.set_xlim((0, 100))
plt.savefig("figs/usecase.pdf", bbox_inches='tight')
# -
| wikiconv/analysis/Analysis.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + id="i9M9g3h1Xa4P" colab_type="code" outputId="f5da6653-5f4a-445e-e44b-d97ae85617ee" executionInfo={"status": "ok", "timestamp": 1548868547091, "user_tz": -60, "elapsed": 14071, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04070140729205161481"}} colab={"base_uri": "https://localhost:8080/", "height": 956}
# !pip install tsfresh
# !pip install catboost
# + id="MDK76bDtdIkW" colab_type="code" outputId="fc8d414c-9cf8-41cd-a286-719af180ee8e" executionInfo={"status": "ok", "timestamp": 1548868547954, "user_tz": -60, "elapsed": 13627, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04070140729205161481"}} colab={"base_uri": "https://localhost:8080/", "height": 74}
# The essentials
import pandas as pd
import numpy as np
# Plotting
# %matplotlib inline
import matplotlib.pyplot as plt
# Progress bars
from tqdm import tqdm
# Access our Google Drive
from google.colab import drive
# Gradient Boosting
from catboost import CatBoostRegressor
from collections import defaultdict
from tsfresh.feature_selection.relevance import calculate_relevance_table
# + id="UmrIvOT0aWMS" colab_type="code" outputId="94c8ba53-8dc8-4e44-c9f7-73b63033c5e3" executionInfo={"status": "ok", "timestamp": 1548868571405, "user_tz": -60, "elapsed": 36578, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04070140729205161481"}} colab={"base_uri": "https://localhost:8080/", "height": 4867}
drive.mount('/content/drive', force_remount=True)
# !ls "/content/drive/My Drive/Rinse Over Run"
# + id="YsOM7c9caXTB" colab_type="code" outputId="ea2c59be-db8f-4638-cb83-1dbe8f50b447" executionInfo={"status": "ok", "timestamp": 1548868636021, "user_tz": -60, "elapsed": 100580, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04070140729205161481"}} colab={"base_uri": "https://localhost:8080/", "height": 54}
train_df = pd.read_csv('/content/drive/My Drive/Rinse Over Run/train_values.csv', index_col=0, parse_dates=['timestamp'])
test_df = pd.read_csv('/content/drive/My Drive/Rinse Over Run/test_values.csv', index_col=0, parse_dates=['timestamp'])
label_df = pd.read_csv('/content/drive/My Drive/Rinse Over Run/train_labels.csv', index_col='process_id')
# + id="l1gIss6TadrC" colab_type="code" colab={}
process_comb = 3
train_features_index = pd.read_csv('/content/drive/My Drive/Rinse Over Run/train_features_adv_{}.csv'.format(process_comb), index_col=['process_id']).index
val_features_index = pd.read_csv('/content/drive/My Drive/Rinse Over Run/val_features_adv_{}.csv'.format(process_comb), index_col=['process_id']).index
test_features_index = pd.read_csv('/content/drive/My Drive/Rinse Over Run/test_features_{}.csv'.format(process_comb), index_col=['process_id']).index
# + id="akRxKty0aa2C" colab_type="code" colab={}
ts_real = [
'supply_flow',
'supply_pressure',
'return_temperature',
'return_conductivity',
'return_turbidity',
'return_flow',
'tank_level_pre_rinse',
'tank_level_caustic',
'tank_level_acid',
'tank_level_clean_water',
'tank_temperature_pre_rinse',
'tank_temperature_caustic',
'tank_temperature_acid',
'tank_concentration_caustic',
'tank_concentration_acid',
'target_value'
]
# variables we'll use to create our time series features
ts_cols = [
'supply_flow',
'supply_pressure',
'return_temperature',
'return_conductivity',
'return_turbidity',
'return_flow',
'tank_level_pre_rinse',
'tank_level_caustic',
'tank_level_acid',
'tank_level_clean_water',
'tank_temperature_pre_rinse',
'tank_temperature_caustic',
'tank_temperature_acid',
'tank_concentration_caustic',
'tank_concentration_acid',
'target_value'
]
# variables for binary time series features
bin_cols = [
'supply_pump',
'supply_pre_rinse',
'supply_caustic',
'return_caustic',
'supply_acid',
'return_acid',
'supply_clean_water',
'return_recovery_water',
'return_drain',
'object_low_level'
]
process_comb_to_phases = {
15: ['pre_rinse', 'caustic', 'intermediate_rinse', 'acid'],
3: ['pre_rinse', 'caustic'],
7: ['pre_rinse', 'caustic', 'intermediate_rinse'],
1: ['pre_rinse'],
8: ['acid'],
2: ['caustic'],
6: ['caustic', 'intermediate_rinse'],
14: ['caustic', 'intermediate_rinse', 'acid'],
}
# phases, ordered from earliest to latest
phases = ['pre_rinse', 'caustic', 'intermediate_rinse', 'acid']
def encode_categorical(df):
# Currently just copy-pasted from http://drivendata.co/blog/rinse-over-run-benchmark/
# select process_id and pipeline
meta = df[['process_id', 'pipeline']].drop_duplicates().set_index('process_id')
# convert categorical pipeline data to dummy variables
meta = pd.get_dummies(meta)
# pipeline L12 not in test data (so useless feature)
if 'pipeline_L12' in meta:
meta = meta.drop('pipeline_L12', axis=1)
# calculate number of phases for each process_object
meta['num_phases'] = df.groupby('process_id')['phase'].apply(lambda x: x.nunique())
return meta
def percentile_25(x):
return np.percentile(x, 0.25)
def percentile_75(x):
return np.percentile(x, 0.75)
def encode_real_timeseries(df):
ts_df = df[['process_id'] + ts_cols].set_index('process_id')
# create features: count, min, max, mean, standard deviation
ts_features = ts_df.groupby('process_id').agg(['min', 'max', 'mean', 'std',
'count', 'median', 'sum',
'mad'])
# Now we will get the mean + variance value of the last K measurements for each phase
# TODO: This can be moved to extracting features per phase
all_vals_per_phase = []
K = 5
col_names = ['process_id']
for phase in phases:
for col in ts_cols:
col_names.extend(['mean_{}_{}_{}'.format(col, K, phase),
'std_{}_{}_{}'.format(col, K, phase)])
for process in tqdm(ts_features.index, total=len(ts_features)):
vals_per_phase = [process]
process_filtered_df = df[df['process_id'] == process]
for phase in phases:
filtered_df = process_filtered_df[process_filtered_df['phase'] == phase].tail(K)
for col in ts_cols:
vals_per_phase.extend([filtered_df[col].mean(), filtered_df[col].std()])
all_vals_per_phase.append(vals_per_phase)
values_df = pd.DataFrame(all_vals_per_phase, columns=col_names)
values_df = values_df.set_index('process_id')
ts_features = ts_features.merge(values_df, left_index=True, right_index=True)
col_map = {}
for col in ts_features.columns:
col_map[col] = 'real_{}'.format(col)
ts_features = ts_features.rename(columns=col_map)
return ts_features
def encode_binary_timeseries(df):
ts_df = df[['process_id'] + bin_cols].set_index('process_id')
# create features: count, min, max, mean, standard deviation
ts_features = ts_df.groupby('process_id').agg(['mean', 'std', 'count', 'sum', 'mad'])
# TODO: Count fraction of True in each phase
feature_vectors = []
col_names = ['process_id']
for phase in phases:
for col in bin_cols:
col_names.append('fraction_{}_{}'.format(col, phase))
# Get fraction of True values for each binary timeseries
# TODO: This can be moved to extracting features per phase
for process in tqdm(set(df['process_id']), total=len(set(df['process_id']))):
vector = [process]
process_filtered_df = df[df['process_id'] == process]
for phase in phases:
filtered_df = process_filtered_df[process_filtered_df['phase'] == phase]
for col in bin_cols:
if len(filtered_df):
vector.append(sum(filtered_df[col]) / len(filtered_df))
else:
vector.append(np.NaN)
feature_vectors.append(vector)
feature_df = pd.DataFrame(feature_vectors, columns=col_names)
feature_df = feature_df.set_index('process_id')
feature_df = feature_df.merge(ts_features, left_index=True, right_index=True)
col_map = {}
for col in feature_df.columns:
col_map[col] = 'bin_{}'.format(col)
feature_df = feature_df.rename(columns=col_map)
return feature_df
def get_descript(data, functions, cols):
ts_df = data.set_index('process_id').sort_values(by='timestamp')
return ts_df.groupby('process_id')[cols].agg(functions)
def get_descript_prev_process(data):
machines = set(data['object_id'])
all_features = []
for machine in tqdm(machines):
machine_data = data[data['object_id'] == machine]
machine_data = machine_data.sort_values(by='timestamp')
machine_processes = machine_data['process_id'].unique()
for process_ix, process in enumerate(machine_processes):
if process_ix > 0:
prev_process = machine_data[machine_data['process_id'] == machine_processes[process_ix - 1]]
this_process = machine_data[machine_data['process_id'] == machine_processes[process_ix]]
features = get_descript(prev_process, ['mean', 'std', 'min', 'max', 'count'], ts_real)
_columns = list(features.columns)
assert len(features) == 1
features = features.iloc[0, :].values
time_delta = (this_process['timestamp'].values[0] - prev_process['timestamp'].values[-1]) / np.timedelta64(1, 'h')
assert time_delta > 0
all_features.append([machine, process, time_delta] + list(features))
else:
all_features.append([machine, process, np.NaN] + ([np.NaN] * 60))
all_features = pd.DataFrame(all_features, columns=['object_id', 'process_id', 'time_delta'] + _columns)
all_features = all_features.set_index('process_id', drop=True)
col_map = {}
for col in all_features.columns:
col_map[col] = 'prev_{}'.format(col)
all_features = all_features.rename(columns=col_map)
return all_features
def get_descript_per_phase(data, phases):
all_features = pd.DataFrame(index=list(set(data['process_id'])))
for phase in phases:
phase_data = data[data['phase'] == phase].sort_values(by='timestamp')[['process_id'] + ts_real + bin_cols]
col_map = {}
for col in phase_data.columns:
if col != 'process_id':
col_map[col] = '{}_{}'.format(phase, col)
phase_data = phase_data.rename(columns=col_map)
features = phase_data.groupby('process_id').agg(['mean', 'std', 'count'])
all_features = all_features.merge(features, left_index=True, right_index=True, how='left')
col_map = {}
for col in all_features.columns:
col_map[col] = 'phase_{}'.format(col)
all_features = all_features.rename(columns=col_map)
return all_features
def create_feature_matrix(df):
df['return_flow_relu'] = df['return_flow'].apply(lambda x: max(0, x))
df['target_value'] = df['return_flow_relu'] * df['return_turbidity']
#phase_features = get_descript_per_phase(df, ['pre_rinse', 'caustic'])
prev_features = get_descript_prev_process(df)
metadata = encode_categorical(df)
time_series = encode_real_timeseries(df)
binary_features = encode_binary_timeseries(df)
# join metadata and time series features into a single dataframe
feature_matrix = metadata
feature_matrix = feature_matrix.merge(time_series, left_index=True, right_index=True)
feature_matrix = feature_matrix.merge(binary_features, left_index=True, right_index=True)
feature_matrix = feature_matrix.merge(prev_features, left_index=True, right_index=True)
#feature_matrix = feature_matrix.merge(phase_features, left_index=True, right_index=True)
return feature_matrix
# + id="HmfgNA3Tc2Z9" colab_type="code" outputId="3d8fb274-57c3-482f-f75a-f4c48b902390" executionInfo={"status": "ok", "timestamp": 1548872464818, "user_tz": -60, "elapsed": 482142, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04070140729205161481"}} colab={"base_uri": "https://localhost:8080/", "height": 146}
all_train_index = list(train_features_index) + list(val_features_index)
train_df_filtered = train_df[(train_df['phase'].isin(process_comb_to_phases[process_comb])) &
(train_df['process_id'].isin(all_train_index))]
test_df_filtered = test_df[(test_df['phase'].isin(process_comb_to_phases[process_comb])) &
(test_df['process_id'].isin(test_features_index))]
features = create_feature_matrix(pd.concat([train_df_filtered, test_df_filtered]))
X_train = features.loc[train_features_index]
X_val = features.loc[val_features_index]
X_test = features.loc[test_features_index]
np.random.seed(1337)
X_train = pd.concat([X_train, X_val])
val_idx = np.random.choice(list(X_train.index), replace=False, size=int(0.1*len(X_train)))
train_idx = list(set(X_train.index) - set(val_idx))
X_val = X_train.loc[val_idx]
X_train = X_train.loc[train_idx]
# + id="ZbXrXmGYfAxB" colab_type="code" colab={}
y_train = np.log(label_df.loc[X_train.index])
y_val = np.log(label_df.loc[X_val.index])
y_train = pd.concat([y_train, y_val], axis=0)
y_val = y_train.loc[val_idx]
y_train = y_train.loc[train_idx]
# + id="UcJvBnwrfMe5" colab_type="code" outputId="a8f8702e-af92-4ee5-b951-fa4327b00243" executionInfo={"status": "ok", "timestamp": 1548872772163, "user_tz": -60, "elapsed": 50747, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04070140729205161481"}} colab={"base_uri": "https://localhost:8080/", "height": 1505}
# Let's get the highly correlated features first
def get_corr_features(X):
row_idx, col_idx = np.where(X.corr() == 1)
self_corr = set([(i, i) for i in range(X_train.shape[1])])
return set(list(zip(row_idx, col_idx))) - self_corr
X_train_uncorr = X_train.copy()
correlated_features = get_corr_features(X_train_uncorr)
while correlated_features:
print('{} correlated feature pairs left...'.format(len(correlated_features)))
corr_row, corr_col = correlated_features.pop()
print('{} is correlated with {}... Removing {}.'.format(X_train_uncorr.columns[corr_row], X_train_uncorr.columns[corr_col], X_train_uncorr.columns[corr_row]))
col_mask = [True]*X_train_uncorr.shape[1]
col_mask[corr_row] = False
X_train_uncorr = X_train_uncorr.loc[:, col_mask]
correlated_features = get_corr_features(X_train_uncorr)
print(X_train.shape, X_train_uncorr.shape)
# + id="WzcNh0A5j8C4" colab_type="code" outputId="0da0ebcf-34ef-47e7-e034-26ea663bb343" executionInfo={"status": "ok", "timestamp": 1548872949089, "user_tz": -60, "elapsed": 3665, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04070140729205161481"}} colab={"base_uri": "https://localhost:8080/", "height": 35}
# Now let's remove columns with too many NA's and only 1 single value
#na_cols = X_train_uncorr.columns[X_train_uncorr.isnull().sum() / len(X_train_uncorr) > 0.5]
single_cols = X_train_uncorr.columns[X_train_uncorr.nunique() == 1]
uncorr_cols = X_train_uncorr.columns
uncorr_cols = list(set(uncorr_cols) - set(single_cols)) #.union(set(na_cols))
#print(uncorr_cols)
X_train_no_corr = X_train[uncorr_cols]
X_val_no_corr = X_val[uncorr_cols]
print(X_train.shape, X_train_no_corr.shape)
#X_train.to_csv('/content/drive/My Drive/Rinse Over Run/train_features_sel_{}.csv'.format(process_comb))
#X_val.to_csv('/content/drive/My Drive/Rinse Over Run/val_features_sel_{}.csv'.format(process_comb))
# + id="hMyHHsCep--W" colab_type="code" colab={}
rel_table = calculate_relevance_table(X_train.dropna(axis=1), y_train['final_rinse_total_turbidity_liter'], ml_task='regression')
# + id="_xXB-hXh8bOZ" colab_type="code" colab={}
X_train_na_cols = X_train.columns[X_train.isnull().sum() > 1]
X_train_no_na = X_train[X_train_na_cols].dropna(axis=0)
rel_table_na = calculate_relevance_table(X_train_no_na.astype(float), y_train.loc[X_train_no_na.index]['final_rinse_total_turbidity_liter'], ml_task='regression')
# + id="ivG6FO6PMLzp" colab_type="code" outputId="697712f5-adc2-4b94-f860-c7efb91865b6" executionInfo={"status": "ok", "timestamp": 1548871725572, "user_tz": -60, "elapsed": 1566, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04070140729205161481"}} colab={"base_uri": "https://localhost:8080/", "height": 35}
X_train_selected_cols = list(set(rel_table_na[rel_table_na['p_value'] < 0.05]['feature']).union(set(rel_table[rel_table['p_value'] < 0.05]['feature'])))
len(X_train_selected_cols), len(X_train.columns)
# + id="6VjtJu_GJxaN" colab_type="code" outputId="cb94b6e2-e4b6-4616-a6a0-330c7e14130e" executionInfo={"status": "ok", "timestamp": 1548871735108, "user_tz": -60, "elapsed": 701, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04070140729205161481"}} colab={"base_uri": "https://localhost:8080/", "height": 55}
print(X_train_selected_cols)
X_train_sub = X_train[X_train_selected_cols]
X_val_sub = X_val[X_train_selected_cols]
X_test_sub = X_test[X_train_selected_cols]
#X_train.to_csv('/content/drive/My Drive/Rinse Over Run/train_features_sel_{}.csv'.format(process_comb))
#X_val.to_csv('/content/drive/My Drive/Rinse Over Run/val_features_sel_{}.csv'.format(process_comb))
#X_test.to_csv('/content/drive/My Drive/Rinse Over Run/test_features_sel_{}.csv'.format(process_comb))
# + id="S9j8tuvWKX2f" colab_type="code" colab={}
def custom_mape(approxes, targets):
return np.mean(np.abs(np.subtract(approxes, targets)) / np.maximum(np.abs(targets), 290000))
class MAPEMetric(object):
def get_final_error(self, error, weight):
return error
def is_max_optimal(self):
return False
def evaluate(self, approxes, targets, weight):
return custom_mape(np.exp(approxes), np.exp(targets)), len(targets)
# + id="ly2NYeyiKI3I" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 678} outputId="2d97eee9-efed-43a3-cccf-563e08f8b887" executionInfo={"status": "ok", "timestamp": 1548873044237, "user_tz": -60, "elapsed": 89214, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04070140729205161481"}}
cat = CatBoostRegressor(iterations=100000, od_type='Iter', od_wait=250, learning_rate=0.33,
loss_function='MAPE', eval_metric='MAPE', border_count=254, task_type='GPU')#, l2_leaf_reg=10) #MAPEMetric()
cat.fit(X_train_no_corr, y_train, eval_set=(X_val_no_corr, y_val), verbose=50)
# baseline: 0.3098982664741879
# removing useless features: 0.31096976977748236
# more features: 0.3098482619660084
# + id="lHxXxLh2KYTW" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 35} outputId="1606478c-14f0-434b-900f-8b8aa80348a9" executionInfo={"status": "ok", "timestamp": 1548873050781, "user_tz": -60, "elapsed": 3778, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04070140729205161481"}}
print(custom_mape(np.exp(cat.predict(X_val)), np.exp(y_val.values.flatten())))
# + id="Ms9XMc4WWAcm" colab_type="code" colab={"base_uri": "https://localhost:8080/", "height": 531} outputId="a42ef35d-449e-4bf0-d459-ce0a38efd8ec" executionInfo={"status": "ok", "timestamp": 1548871241123, "user_tz": -60, "elapsed": 708, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04070140729205161481"}}
single_cols
# + id="MlNZt5clWUpA" colab_type="code" outputId="6d4bf2b5-c910-492b-fcc7-e0fb3c11962b" executionInfo={"status": "error", "timestamp": 1548872469122, "user_tz": -60, "elapsed": 363303, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04070140729205161481"}} colab={"base_uri": "https://localhost:8080/", "height": 708}
# cat = CatBoostRegressor(iterations=100000, od_type='Iter', od_wait=250, learning_rate=0.33,
# loss_function='MAPE', eval_metric='MAPE')#
# cat.fit(X_train, y_train, eval_set=(X_val, y_val), verbose=50)
# + id="jfjEOb-XWW1U" colab_type="code" outputId="b64b42eb-fdc5-47e5-b593-42524cead0fa" executionInfo={"status": "ok", "timestamp": 1548843172728, "user_tz": -60, "elapsed": 575, "user": {"displayName": "<NAME>", "photoUrl": "", "userId": "04070140729205161481"}} colab={"base_uri": "https://localhost:8080/", "height": 2024}
rel_table_na
# + id="BoERPQqFP0BM" colab_type="code" colab={}
| notebooks/Select top K and remove correlated features.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # AFSK Demodulator
# ## Step 6: HDLC Decoder
#
# -----
#
# This notebook will outline the steps necessary to move the HDLC Decoder to FPGA. This will be the final step in our project. Like with our last one, we will be borrowing code from the Mobilinkd TNC3 firmware. However, we do need to write a CRC calculator. In the TNC3, we use the hardware IP to calculate it. And in the Python implementation, we use a lookup table, which would work but would be inefficient on an FPGA.
#
# This code is part of the [AFSK Demodulator on Pynq](afsk-demodulator-fpga.ipynb) project.
#
# The purpose of this code is to continue our migration of the Python demodulator code to FPGA. We will be streaming audio data into the FPGA and streaming processed data out from the FPGA.
#
# This is the third step of moving a demodulator processing step into the FPGA. At this point demodulation is being done in FPGA. We are left with clock recovery and HDLC framing. Here we address clock recovery.
#
# At this point we must diverge from the design pattern we have been following. No longer are we simply streaming data in and out. The PLL had to indicate *lock* status, it has to output a *sample* indicator. And it may need to output information about *jitter* for diagnostic purposes.
#
# The Digital PLL in Python provides all of these interfaces. However, we can change the interface. We only need to provide two outputs from the PLL: a stream of sampled bits, and a lock indicator. Audio data will be clocked in via a stream interface. This will be demodulated to a bitstream and processed by the digital PLL. The demodulator will clock out 3 bits for each audio sample: the demodulated bit, the a lock flag, and a sample indicator. The sample indicator will never go high if the lock flag is low.
#
# Recall from the Python implementation of the PLL that we need an IIR filter and a hysteresis module. We will build and test these independently. The PLL also made use of floating point math in the PLL, IIR filter and hysteresis code. We will change that to fixed point.
#
# ## Prerequisites
#
# At this point you are expected to have:
#
# * A configured PYNQ environment.
# * Vivado installed on your computer and configured for your board.
# * Experience working through the tutorials at https://pynq.readthedocs.io/.
# * Familiarized yourself with the AFSK demodulator implementation in Python.
# * Completed the first four steps of the tutorial to familiarize yourself with the process of creating a streaming interface.
#
# ## Outline
#
# We are going to modify the FPGA IP we created in the third tutorial to add the low-pass filter for the correlator output we are now generating, and turn that back into a bitstream.
#
# We will perform the following steps in this section:
#
# 1. Create a C++ file that accepts a block of 16-bit data, performs the FIR, correlator and low-pass filter operations, and sends the resulting bitstream back.
# 1. Create a C++ test case for the above file.
# 1. Generate an IP package from the code that can be used in Vivado.
# 1. Create a Zynq project in Vivado that uses the IP.
# 1. Export the bitstream for our project from Vivado.
# 1. Use Python running on the PS to load the bitstream to the PL, and verify that it works.
# 1. Integrate the FPGA module with the existing demodulator code, replacing the existing Python code.
#
# First we are going to generate the FIR filter coefficients. Then we are going to generate some sample data for our test bench.
#
# ## Filter Coefficients
#
# We continue to generate the filter coefficents, because we still need to test against the Python implementation. But we no longer need to print them out. Our work with filters is complete. We now focus on the digitl PLL.
# +
import numpy as np
from scipy.signal import lfiltic, lfilter, firwin
from scipy.io.wavfile import read
audio_file = read('../base/TNC_Test_Ver-1.102-26400-1sec.wav')
sample_rate = audio_file[0]
audio_data = audio_file[1]
bpf_coeffs = np.array(firwin(141, [1100.0/(sample_rate/2), 2300.0/(sample_rate/2)], width = None,
pass_zero = False, scale = True, window='hann') * 32768, dtype=int)
lpf_coeffs = np.array(firwin(101, [760.0/(sample_rate/2)], width = None,
pass_zero = True, scale = True, window='hann') * 32768, dtype=int)
pll_lpf40_coeffs = np.array(firwin(5, [40.0/600.0], width = None,
pass_zero = True, scale = True, window='hann'), dtype=float)
print("lpf40 =", ",".join([str(x) for x in pll_lpf40_coeffs]))
pll_lpf64_coeffs = np.array(firwin(5, [64.0/600.0], width = None,
pass_zero = True, scale = True, window='hann'), dtype=float)
print("lpf64 =", ",".join([str(x) for x in pll_lpf64_coeffs]))
# -
# ## Test Bench Data
#
# We will now generate the input and output data for our test bench. We will again use our working Python model to generate data as a baseline. We need to generate PLL output data. This is going to be a bit different than the data currently provided because we are changing the interface slightly. We need to generate an array containing three numbers (bits) from the PLL: input, locked, sample.
# +
import sys
sys.path.append('../base')
from DigitalPLL import DigitalPLL
from HDLC import HDLC
pll = DigitalPLL(sample_rate, 1200.0)
class fir_filter(object):
def __init__(self, coeffs):
self.coeffs = coeffs
self.zl = lfiltic(self.coeffs, 32768, [], [])
def __call__(self, data):
result, self.zl = lfilter(self.coeffs, 32768, data, -1, self.zl)
return result
bpf = fir_filter(bpf_coeffs)
lpf = fir_filter(lpf_coeffs)
delay = 12
f = bpf(audio_data[:26400])
c = np.array([int(x >= 0) for x in f])
# Delay the data
d = np.append(np.zeros(delay, dtype=int), np.array(c[:0-delay], dtype=int))
# XOR the digitized data with the delayed version
x = np.logical_xor(c, d)
l = lpf(x * 2 - 1)
comp = np.array([int(x >= 0) for x in l])
locked = np.zeros(len(comp), dtype=int)
sample = np.zeros(len(comp), dtype=int)
for i in range(len(comp)):
sample[i] = pll(comp[i])
locked[i] = pll.locked()
from HDLC import HDLC
class NRZI:
def __init__(self):
self.state = False
def __call__(self, x):
result = (x == self.state)
self.state = x
return result
nrzi = NRZI()
hdlc = HDLC()
decoded = None
print("int16_t audio[] = {\n ", end='')
count = 0
for x in audio_data[:26400]:
print(hex(x), end=', ')
count += 1
if count == 16:
print('\n ', end='')
count = 0
print("\n};")
for b,s,l in zip(comp, sample, locked):
if s:
packet = hdlc(nrzi(b), l)
if packet is not None:
print("uint8_t packet[] = {\n ", end='')
count = 0
for x in packet[1]:
print('0x%02x, ' % ord(x), end='')
count += 1
if count == 16:
print('\n ', end='')
count = 0
print("\n};")
print(len(packet[1]))
decoded = packet[1]
print("0x%04x" % packet[0])
# -
# The data above represents the PLL output from the same 10ms of data we have been testing with during this development process. The values represent the input, sample, lock.
#
# ## Vivado HLS
#
# WWe are going to make the biggest additions to the code since we started. We will continue to use core pieces we created earlier, but we now add the digital PLL. This requires two additional components: an IIR filter and hysteresis. For these components, which in Python are implemented using floating point types, we are going to switch to 18-bit fixed point. Why 18 bits? Because that is the limit to the DSP48 blocks on the Zynq. And initial results show that it worked.
#
# If you would like to learn more about the capabilities of the DSP blocks in Zynq, the DSP48 User Guide from Xilinx is very detailed: https://www.xilinx.com/support/documentation/user_guides/ug479_7Series_DSP48E1.pdf
#
# 1. Start Vivado HLS.
# ```bash
# vivado_hls
# ```
# 1. Create a new project under the project_04 directory call HLS.
# 1. Create a top-level function called demodulate4.
# 1. Create 5 new files:
# * [demodulate.hpp](HLS/demodulate.hpp)
# * [demodulate.cpp](HLS/demodulate.cpp)
# * [hysteresis.hpp](HLS/hysteresis.hpp)
# * [iir_filter.hpp](HLS/iir_filter.hpp)
# * [digital_pll.hpp](HLS/digital_pll.hpp)
# 1. Create a new test bench:
# * [demodulate_test.cpp](HLS/demodulate_test.cpp)
#
# The important part of this module is the addition of the three new header files which implement the digital PLL. These work exactly the same as the digital PLL from the Python implementation. The bulk of the code was copied from the [Mobilinkd TNC3 firmware](https://github.com/mobilinkd/tnc3-firmware) and modifies slightly for fixed-point math.
#
# -----
#
# This is the header:
#
# ```c++
# #include <ap_axi_sdata.h>
# #include <hls_stream.h>
# #include <stdint.h>
#
# #define BPF_COEFF_LEN 141
#
# typedef ap_axis<16,1,1,1> idata_type;
# typedef ap_axis<1,1,1,1> odata_type;
#
# void demodulate5(idata_type input, odata_type& output);
#
# ```
#
# The only change we needed to make here is to change the top-level function name.
#
# And this is the source:
#
# ```c++
# #include "demodulate.hpp"
# #include "digital_pll.hpp"
#
# #include "ap_shift_reg.h"
#
# const ap_int<13> bpf_coeffs[] =
# { 0, 0, 0, 0, 0, 0, 1, 3, 5, 8, 8, 5,
# -2, -13, -27, -40, -46, -44, -32, -12, 11, 32, 44, 44,
# 32, 14, 0, -2, 13, 49, 97, 143, 170, 160, 104, 6,
# -118, -244, -340, -381, -352, -258, -120, 24, 138, 192, 173, 97,
# 0, -67, -56, 62, 287, 575, 850, 1021, 1001, 737, 228, -462,
# -1216, -1879, -2293, -2336, -1956, -1182, -133, 1008, 2030, 2736, 2988, 2736,
# 2030, 1008, -133, -1182, -1956, -2336, -2293, -1879, -1216, -462, 228, 737,
# 1001, 1021, 850, 575, 287, 62, -56, -67, 0, 97, 173, 192,
# 138, 24, -120, -258, -352, -381, -340, -244, -118, 6, 104, 160,
# 170, 143, 97, 49, 13, -2, 0, 14, 32, 44, 44, 32,
# 11, -12, -32, -44, -46, -40, -27, -13, -2, 5, 8, 8,
# 5, 3, 1, 0, 0, 0, 0, 0, 0,
# };
#
# const ap_int<12> lpf_coeffs[] =
# {
# 0, 0, 0, 1, 3, 5, 8, 11, 14, 17, 20, 21, 20, 17,
# 11, 2, -9, -25, -44, -66, -91, -116, -142, -167, -188, -205, -215, -217,
# -209, -190, -156, -109, -47, 30, 123, 230, 350, 481, 622, 769, 919, 1070,
# 1217, 1358, 1488, 1605, 1704, 1785, 1844, 1880, 1893, 1880, 1844, 1785, 1704, 1605,
# 1488, 1358, 1217, 1070, 919, 769, 622, 481, 350, 230, 123, 30, -47, -109,
# -156, -190, -209, -217, -215, -205, -188, -167, -142, -116, -91, -66, -44, -25,
# -9, 2, 11, 17, 20, 21, 20, 17, 14, 11, 8, 5, 3, 1,
# 0, 0, 0,
# };
#
# template <typename InOut, typename Filter, size_t N>
# InOut fir_filter(InOut x, Filter (&coeff)[N])
# {
# static InOut shift_reg[N];
#
# int32_t accum = 0;
# filter_loop: for (size_t i = N-1 ; i != 0; i--)
# {
# #pragma HLS unroll factor=20
# shift_reg[i] = shift_reg[i-1];
# accum += shift_reg[i] * coeff[i];
# }
#
# shift_reg[0] = x;
# accum += shift_reg[0] * coeff[0];
#
# return static_cast<InOut>(accum >> 15);
# }
#
# ap_shift_reg<bool, 12> delay_line;
# DigitalPLL<> dpll(26400, 1200);
#
# void demodulate5(idata_type& input, odata_type& output)
# {
# #pragma HLS INTERFACE axis port=input
# #pragma HLS INTERFACE axis port=output
# #pragma HLS interface ap_ctrl_none port=return
#
# ap_int<16> bpfiltered, lpfiltered;
# ap_int<1> comp, delayed, comp2;
# ap_int<2> corr;
#
# bpfiltered = fir_filter(input.data, bpf_coeffs);
# comp = bpfiltered >= 0 ? 1 : 0;
# delayed = delay_line.shift(comp);
# corr = comp ^ delayed;
# corr <<= 1;
# corr -= 1;
# lpfiltered = fir_filter(corr, lpf_coeffs);
# comp2 = lpfiltered >= 0 ? 1 : 0;
# typename DigitalPLL<>::result_type result = dpll(comp2 != 0);
#
# ap_int<3> tmp = (std::get<0>(result) << 2) |
# (std::get<1>(result) << 1) | std::get<2>(result);
# output.data = tmp;
# output.dest = input.dest;
# output.id = input.id;
# output.keep = input.keep;
# output.last = input.last;
# output.strb = input.strb;
# output.user = input.user;
# }
# ```
#
#
# ### C++11
#
# Like before, we needed to add a configuration setting to control the timing contstraints. In Vivado HLS, right click on the "solution1" window and select "Solution Settings...". In the *Solution Settings* window, in the *General* tab, click the *Add* button. Add a "config_core" setting for core "DSP48" with a latency of 3. This is required to meet timing constraints with the new code.
#
# We also use some new C++11 features -- specifically tuples. For this we need to add compilation flags for use during simulation and synthesis. Right click on the "HLS" project name in the Explorer window on the right side of the Vivado HLS UI and select "Project Settings...". In the *Project Settings* window, select the *Similation* tab. Then select the "demodulate_test.cpp" file. Click the *Edit CFLAGS* button and add "-std=c++11" to the flags. Go the to *Synthesis* tab, highlight the "demodulate.cpp" file and make the same change.
#
# -----
#
# Once the code and test bench are written, we need to run the C simulation, C synthesis, C/RTL co-simulation, then package the IP. The two simulation steps run our test bench. This verifies that the code will sythesize properly and that it functions properly. For a software engineer, this is the same as compiling and running unit tests.
#
# Once the IP is packaged, we are done in HLS.
# ## Vivado
#
# We will now switch over to Vivado and create a block design. These steps should start to feel very familiar to you by now.
#
# 1. Start Vivado and create a new project.
# 1. Give it a path -- in our case `afsk-demodulator-pynq/project_05` and the name `Vivado`.
# 1. Select the `RTL Project` project type.
# 1. In the "Default Part" screen, switch to the "Boards" tab. Select the your board from the list.
# 1. Click "Finish".
#
# With the new project open in Vivado, we need to create a block design. We are going to follow the exact some procedure we did in the first three.
#
# 1. On the right side, in the Flow Navigator, select *Create Block Diagram*.
# 1. Use the default name, design_1.
# 1. Go into Tools|Settings.
# 1. In the settings dialog, choose IP|Repository.
# 1. Select "+" to add a repository.
# 1. Add Project_05/HLS as a repository. You should see that it has 1 IP called `demodulate5` in there.
# 1. When done, click "OK".
# 1. In the Diagram view (main window) select "+" to add IP.
# 1. Add the Zynq processing system and run block automation.
# 1. When done, double-click the Zynq block and find the *High-performance AXI Slave Ports*.
# 1. Click on the High-performance AXI Slave Ports.
# 1. Enable the *S AXI HP0 interface*, then click OK.
# 1. Add an AXI Stream Interconnect, AXI Direct Memory Access and the demodulator IP.
# 1. Open the AXI Direct Memory Access, disable scatter/gather, and set the stream widths to 16 bits.
# 1. Wire up the demodulator to the AXI Direct Memory Access and run connection automation.
# * A few additional modules are added: AXI SmartConnect, AXI Interconnect, and Processor System Reset
# 
# 1. Rename the demodulator block to "demodulate" and the DMA block to "dma".
# 1. Combine the demodulate and dma blocks into a hierarchy called "demodulator".
# 1. Generate the HDL wrapper by clicking on the design in the Sources box, right clicking, and selecting "Generate HDL Wrapper".
# 1. Generate the bitstream. Again, this will take some time.
# 1. Export the block design (File|Export|Export Block Design...)
# 1. Collect the following files:
# - Vivado.srcs/sources_1/bd/design_1/hw_handoff/design_1.hwh
# - Vivado.runs/impl_1/design_1_wrapper.bit
# - design_1.tcl
# * rename these file to "project_03.{ext}" so that you have project_05.bit, project_05.tcl and project_05.hwh
# 1. On the mounted Pynq filesystem, copy these files to `pynq/overlays/afsk_demodulator/`.
# ```bash
# # cp project_05.{tcl,bit,hwh} /var/run/media/${USER}/PYNQ/pynq/overlays/afsk_demodulator/
# ```
# 1. You can now jump to the Jupyter notebook on the Pynq device.
| project_08/afsk-demodulator-fpga.ipynb |
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# orphan: true
# ---
# + tags=["remove-input", "active-ipynb", "remove-output"]
# %pylab inline
# from ipyparallel import Client, error
# cluster=Client(profile="mpi")
# view=cluster[:]
# view.block=True
#
# try:
# from openmdao.utils.notebook_utils import notebook_mode
# except ImportError:
# !python -m pip install openmdao[notebooks]
# -
# ```{note}
# This feature requires MPI, and may not be able to be run on Colab.
# ```
# # Distributed Variables
#
# At times when you need to perform a computation using large input arrays, you may want to perform that computation in multiple processes, where each process operates on some subset of the input values. This may be done purely for performance reasons, or it may be necessary because the entire input will not fit in the memory of a single machine. In any case, this can be accomplished in OpenMDAO by declaring those inputs and outputs as distributed. By definition, a `distributed variable` is an input or output where each process contains only a part of the whole variable. Distributed variables are declared by setting the optional "distributed" argument to True when adding the variable to a component. A component that has at least one distributed variable can also be called a distributed component.
#
# Any variable that is not distributed is called a `non-distributed variable`. When the model is run under MPI, every process contains a copy of the entire variable.
#
# We’ve already seen that by using [src_indices](connect-with-src-indices), we can connect an input to only a subset of an output variable. By giving different values for src_indices in each MPI process, we can distribute computations on a distributed output across the processes. All of the scenarios that involve connecting distributed and non-distributed variables are detailed in [Connections involving distributed variables](../working_with_groups/dist_serial.ipynb).
#
# ## Example: Simple Component with Distributed Input and Output
#
# The following example shows how to create a simple component, *SimpleDistrib*, that takes a distributed variable as an input and computes a distributed output. The calculation is divided across the available processes, but the details of that division are not contained in the component. In fact, the input is sized based on it's connected source using the "shape_by_conn" argument.
# +
# %%px
import numpy as np
import openmdao.api as om
class SimpleDistrib(om.ExplicitComponent):
def setup(self):
# Distributed Input
self.add_input('in_dist', shape_by_conn=True, distributed=True)
# Distributed Output
self.add_output('out_dist', copy_shape='in_dist', distributed=True)
def compute(self, inputs, outputs):
x = inputs['in_dist']
# "Computationally Intensive" operation that we wish to parallelize.
f_x = x**2 - 2.0*x + 4.0
outputs['out_dist'] = f_x
# -
# In the next part of the example, we take the `SimpleDistrib` component, place it into a model, and run it. Suppose the vector of data we want to process has 7 elements. We have 4 processors available for computation, so if we distribute them as evenly as we can, 3 procs can handle 2 elements each, and the 4th processor can pick up the last one. OpenMDAO's utilities includes the `evenly_distrib_idxs` function which computes the sizes and offsets for all ranks. The sizes are used to determine how much of the array to allocate on any specific rank. The offsets are used to figure out where the local portion of the array starts, and in this example, is used to set the initial value properly. In this case, the initial value for the full distributed input "in_dist" is a vector of 7 values between 3.0 and 9.0, and each processor has a 1 or 2 element piece of it.
# +
# %%px
from openmdao.utils.array_utils import evenly_distrib_idxs
from openmdao.utils.mpi import MPI
size = 7
if MPI:
comm = MPI.COMM_WORLD
rank = comm.rank
sizes, offsets = evenly_distrib_idxs(comm.size, size)
else:
# When running without MPI, the entire variable is on one proc.
rank = 0
sizes = {rank : size}
offsets = {rank : 0}
prob = om.Problem()
model = prob.model
# Create a distributed source for the distributed input.
ivc = om.IndepVarComp()
ivc.add_output('x_dist', np.zeros(sizes[rank]), distributed=True)
model.add_subsystem("indep", ivc)
model.add_subsystem("D1", SimpleDistrib())
model.connect('indep.x_dist', 'D1.in_dist')
prob.setup()
# Set initial values of distributed variable.
x_dist_init = 3.0 + np.arange(size)[offsets[rank]:offsets[rank] + sizes[rank]]
prob.set_val('indep.x_dist', x_dist_init)
prob.run_model()
# Values on each rank.
for var in ['indep.x_dist', 'D1.out_dist']:
print(var, prob.get_val(var))
# Full gathered values.
for var in ['indep.x_dist', 'D1.out_dist']:
print(var, prob.get_val(var, get_remote=True))
print('')
# + tags=["remove-input", "remove-output"]
# %%px
from openmdao.utils.assert_utils import assert_near_equal
assert_near_equal(prob.get_val(var, get_remote=True), np.array([7., 12., 19., 28., 39., 52., 67.]))
# -
# Note that we created a connection source 'x_dist' that passes its value to 'D1.in_dist'. OpenMDAO requires a source for non-constant inputs, and usually creates one automatically as an output of a component referred to as an 'Auto-IVC'. However, the automatic creation is not supported for distributed variables. We must manually create an `IndepVarComp` and connect it to our input.
#
# When using distributed variables, OpenMDAO can't always size the component inputs based on the shape of the connected source. In this example, the component determines its own split using `evenly_distrib_idxs`. This requires that the component know the full vector size, which is passed in via the option 'vec_size'.
# +
# %%px
import numpy as np
import openmdao.api as om
from openmdao.utils.array_utils import evenly_distrib_idxs
from openmdao.utils.mpi import MPI
class SimpleDistrib(om.ExplicitComponent):
def initialize(self):
self.options.declare('vec_size', types=int, default=1,
desc="Total size of vector.")
def setup(self):
comm = self.comm
rank = comm.rank
size = self.options['vec_size']
sizes, _ = evenly_distrib_idxs(comm.size, size)
mysize = sizes[rank]
# Distributed Input
self.add_input('in_dist', np.ones(mysize, float), distributed=True)
# Distributed Output
self.add_output('out_dist', np.ones(mysize, float), distributed=True)
def compute(self, inputs, outputs):
x = inputs['in_dist']
# "Computationally Intensive" operation that we wish to parallelize.
f_x = x**2 - 2.0*x + 4.0
outputs['out_dist'] = f_x
size = 7
if MPI:
comm = MPI.COMM_WORLD
rank = comm.rank
sizes, offsets = evenly_distrib_idxs(comm.size, size)
else:
# When running without MPI, the entire variable is on one proc.
rank = 0
sizes = {rank : size}
offsets = {rank : 0}
prob = om.Problem()
model = prob.model
# Create a distributed source for the distributed input.
ivc = om.IndepVarComp()
ivc.add_output('x_dist', np.zeros(sizes[rank]), distributed=True)
model.add_subsystem("indep", ivc)
model.add_subsystem("D1", SimpleDistrib(vec_size=size))
model.connect('indep.x_dist', 'D1.in_dist')
prob.setup()
# Set initial values of distributed variable.
x_dist_init = 3.0 + np.arange(size)[offsets[rank]:offsets[rank] + sizes[rank]]
prob.set_val('indep.x_dist', x_dist_init)
prob.run_model()
# Values on each rank.
for var in ['indep.x_dist', 'D1.out_dist']:
print(var, prob.get_val(var))
# Full gathered values.
for var in ['indep.x_dist', 'D1.out_dist']:
print(var, prob.get_val(var, get_remote=True))
print('')
# + tags=["remove-input", "remove-output"]
# %%px
from openmdao.utils.assert_utils import assert_near_equal
assert_near_equal(prob.get_val(var, get_remote=True), np.array([7., 12., 19., 28., 39., 52., 67.]))
# -
#
# ## Example: Distributed I/O and a Non-Distributed Input
#
# OpenMDAO supports both non-distributed and distributed I/O on the same component, so in this example, we expand the problem to include a non-distributed input. In this case, the non-distributed input also has a vector width of 7, but those values will be the same on each processor. This non-distributed input is included in the computation by taking the vector sum and adding it to the distributed output.
# +
# %%px
import numpy as np
import openmdao.api as om
from openmdao.utils.array_utils import evenly_distrib_idxs
from openmdao.utils.mpi import MPI
class MixedDistrib1(om.ExplicitComponent):
def setup(self):
# Distributed Input
self.add_input('in_dist', shape_by_conn=True, distributed=True)
# Non-Distributed Input
self.add_input('in_nd', shape_by_conn=True)
# Distributed Output
self.add_output('out_dist', copy_shape='in_dist', distributed=True)
def compute(self, inputs, outputs):
x = inputs['in_dist']
y = inputs['in_nd']
# "Computationally Intensive" operation that we wish to parallelize.
f_x = x**2 - 2.0*x + 4.0
# This operation is repeated on all procs.
f_y = y ** 0.5
outputs['out_dist'] = f_x + np.sum(f_y)
size = 7
if MPI:
comm = MPI.COMM_WORLD
rank = comm.rank
sizes, offsets = evenly_distrib_idxs(comm.size, size)
else:
# When running without MPI, the entire variable is on one proc.
rank = 0
sizes = {rank : size}
offsets = {rank : 0}
prob = om.Problem()
model = prob.model
# Create a distributed source for the distributed input.
ivc = om.IndepVarComp()
ivc.add_output('x_dist', np.zeros(sizes[rank]), distributed=True)
ivc.add_output('x_nd', np.zeros(size))
model.add_subsystem("indep", ivc)
model.add_subsystem("D1", MixedDistrib1())
model.connect('indep.x_dist', 'D1.in_dist')
model.connect('indep.x_nd', 'D1.in_nd')
prob.setup()
# Set initial values of distributed variable.
x_dist_init = 3.0 + np.arange(size)[offsets[rank]:offsets[rank] + sizes[rank]]
prob.set_val('indep.x_dist', x_dist_init)
# Set initial values of non-distributed variable.
x_nd_init = 1.0 + 2.0*np.arange(size)
prob.set_val('indep.x_nd', x_nd_init)
prob.run_model()
# Values on each rank.
for var in ['indep.x_dist', 'indep.x_nd', 'D1.out_dist']:
print(var, prob.get_val(var))
# Full gathered values.
for var in ['indep.x_dist', 'indep.x_nd', 'D1.out_dist']:
print(var, prob.get_val(var, get_remote=True))
print('')
# + tags=["remove-input", "remove-output"]
# %%px
assert_near_equal(prob.get_val(var, get_remote=True), np.array([24.53604616, 29.53604616, 36.53604616, 45.53604616, 56.53604616, 69.53604616, 84.53604616]), 1e-6)
# -
# ## Example: Distributed I/O and a Non-Distributed Output
#
# You can also create a component with a non-distributed output and distributed outputs and inputs. This situation tends to be more tricky and usually requires you to performe some MPI operations in your component's `run` method. If the non-distributed output is only a function of the non-distributed inputs, then you can handle that variable just like you do on any other component. However, this example extends the previous component to include a non-distributed output that is a function of both the non-distributed and distributed inputs. In this case, it's a function of the sum of the square root of each element in the full distributed vector. Since the data is not all on any local processor, we use an MPI operation, in this case `Allreduce`, to make a summation across the distributed vector, and gather the answer back to each processor. The MPI operation and your implementation will vary, but consider this to be a general example.
# + tags=["remove-output", "remove-input"]
# %%px
import numpy as np
import openmdao.api as om
from openmdao.utils.array_utils import evenly_distrib_idxs
from openmdao.utils.mpi import MPI
class MixedDistrib2(om.ExplicitComponent):
def setup(self):
# Distributed Input
self.add_input('in_dist', shape_by_conn=True, distributed=True)
# Non-Distributed Input
self.add_input('in_nd', shape_by_conn=True)
# Distributed Output
self.add_output('out_dist', copy_shape='in_dist', distributed=True)
# Non-Distributed Output
self.add_output('out_nd', copy_shape='in_nd')
def compute(self, inputs, outputs):
x = inputs['in_dist']
y = inputs['in_nd']
# "Computationally Intensive" operation that we wish to parallelize.
f_x = x**2 - 2.0*x + 4.0
# These operations are repeated on all procs.
f_y = y ** 0.5
g_y = y**2 + 3.0*y - 5.0
# Compute square root of our portion of the distributed input.
g_x = x ** 0.5
# Distributed output
outputs['out_dist'] = f_x + np.sum(f_y)
# Non-Distributed output
if MPI and comm.size > 1:
# We need to gather the summed values to compute the total sum over all procs.
local_sum = np.array(np.sum(g_x))
total_sum = local_sum.copy()
self.comm.Allreduce(local_sum, total_sum, op=MPI.SUM)
outputs['out_nd'] = g_y + total_sum
else:
# Recommended to make sure your code can run without MPI too, for testing.
outputs['out_nd'] = g_y + np.sum(g_x)
size = 7
if MPI:
comm = MPI.COMM_WORLD
rank = comm.rank
sizes, offsets = evenly_distrib_idxs(comm.size, size)
else:
# When running without MPI, the entire variable is on one proc.
rank = 0
sizes = {rank : size}
offsets = {rank : 0}
prob = om.Problem()
model = prob.model
# Create a distributed source for the distributed input.
ivc = om.IndepVarComp()
ivc.add_output('x_dist', np.zeros(sizes[rank]), distributed=True)
ivc.add_output('x_nd', np.zeros(size))
model.add_subsystem("indep", ivc)
model.add_subsystem("D1", MixedDistrib2())
model.connect('indep.x_dist', 'D1.in_dist')
model.connect('indep.x_nd', 'D1.in_nd')
prob.setup()
# Set initial values of distributed variable.
x_dist_init = 3.0 + np.arange(size)[offsets[rank]:offsets[rank] + sizes[rank]]
prob.set_val('indep.x_dist', x_dist_init)
# Set initial values of non-distributed variable.
x_nd_init = 1.0 + 2.0*np.arange(size)
prob.set_val('indep.x_nd', x_nd_init)
prob.run_model()
# Values on each rank.
for var in ['indep.x_dist', 'indep.x_nd', 'D1.out_dist', 'D1.out_nd']:
print(var, prob.get_val(var))
# Full gathered values.
for var in ['indep.x_dist', 'indep.x_nd', 'D1.out_dist', 'D1.out_nd']:
print(var, prob.get_val(var, get_remote=True))
print('')
# + tags=["remove-input", "remove-output"]
# %%px
assert_near_equal(prob.get_val(var, get_remote=True), np.array([15.89178696, 29.89178696, 51.89178696, 81.89178696, 119.89178696, 165.89178696, 219.89178696]), 1e-6)
# -
# ```{note}
# In this example, we introduce a new component called an [IndepVarComp](indepvarcomp.ipynb). If you used OpenMDAO prior to version 3.2, then you are familiar with this component. It is used to define an independent variable.
#
# You usually do not have to define these because OpenMDAO defines and uses them automatically for all unconnected inputs in your model. This automatically-created `IndepVarComp` is called an Auto-IVC.
#
# However, when we define a distributed input, we often use the “src_indices” attribute to determine the allocation of that input to the processors that the component sees. For some sets of these indices, it isn’t possible to easily determine the full size of the corresponding independent variable, and the *IndepVarComp* cannot be created automatically. So, for unconnected inputs on a distributed component, you must manually create one, as we did in this example.
# ```
#
# # Derivatives with Distributed Variables
#
# In the following examples, we show how to add analytic derivatives to the distributed examples given above. In most cases it is straighforward, but when you have a non-distributed output and a distributed input, the [matrix-free](matrix-free-api) format is required.
#
# ## Derivatives: Distributed I/O and a Non-Distributed Input
#
# In this example, we have a distributed input, a distributed output, and a non-distributed input. The derivative of 'out_dist' with respect to 'in_dist' has a diagonal Jacobian, so we use sparse declaration and each processor gives `declare_partials` the local number of rows and columns. The derivatives are verified against complex step using `check_totals` since our component is complex-safe.
# +
# %%px
import numpy as np
import openmdao.api as om
from openmdao.utils.array_utils import evenly_distrib_idxs
from openmdao.utils.mpi import MPI
class MixedDistrib1(om.ExplicitComponent):
def setup(self):
# Distributed Input
self.add_input('in_dist', shape_by_conn=True, distributed=True)
# Non-Distributed Input
self.add_input('in_nd', shape_by_conn=True)
# Distributed Output
self.add_output('out_dist', copy_shape='in_dist', distributed=True)
def setup_partials(self):
meta = self.get_io_metadata(metadata_keys=['shape'])
local_size = meta['in_dist']['shape'][0]
row_col_d = np.arange(local_size)
self.declare_partials('out_dist', 'in_dist', rows=row_col_d, cols=row_col_d)
self.declare_partials('out_dist', 'in_nd')
def compute(self, inputs, outputs):
x = inputs['in_dist']
y = inputs['in_nd']
# "Computationally Intensive" operation that we wish to parallelize.
f_x = x**2 - 2.0*x + 4.0
# This operation is repeated on all procs.
f_y = y ** 0.5
outputs['out_dist'] = f_x + np.sum(f_y)
def compute_partials(self, inputs, partials):
x = inputs['in_dist']
y = inputs['in_nd']
size = len(y)
local_size = len(x)
partials['out_dist', 'in_dist'] = 2.0 * x - 2.0
df_dy = 0.5 / y ** 0.5
partials['out_dist', 'in_nd'] = np.tile(df_dy, local_size).reshape((local_size, size))
size = 7
if MPI:
comm = MPI.COMM_WORLD
rank = comm.rank
sizes, offsets = evenly_distrib_idxs(comm.size, size)
else:
# When running without MPI, the entire variable is on one proc.
rank = 0
sizes = {rank : size}
offsets = {rank : 0}
prob = om.Problem()
model = prob.model
# Create a distributed source for the distributed input.
ivc = om.IndepVarComp()
ivc.add_output('x_dist', np.zeros(sizes[rank]), distributed=True)
ivc.add_output('x_nd', np.zeros(size))
model.add_subsystem("indep", ivc)
model.add_subsystem("D1", MixedDistrib1())
model.connect('indep.x_dist', 'D1.in_dist')
model.connect('indep.x_nd', 'D1.in_nd')
model.add_design_var('indep.x_nd')
model.add_design_var('indep.x_dist')
model.add_objective('D1.out_dist')
prob.setup(force_alloc_complex=True)
# Set initial values of distributed variable.
x_dist_init = 3.0 + np.arange(size)[offsets[rank]:offsets[rank] + sizes[rank]]
prob.set_val('indep.x_dist', x_dist_init)
# Set initial values of non-distributed variable.
x_nd_init = 1.0 + 2.0*np.arange(size)
prob.set_val('indep.x_nd', x_nd_init)
prob.run_model()
if rank > 0:
prob.check_totals(method='cs', out_stream=None)
else:
prob.check_totals(method='cs')
# + tags=["remove-input", "remove-output"]
# %%px
totals = prob.check_totals(method='cs', out_stream=None)
for key, val in totals.items():
assert_near_equal(val['rel error'][0], 0.0, 1e-6)
# -
# ## Derivatives: Distributed I/O and a Non-Distributed Output
#
# If you have a component with distributed inputs and a non-distributed output, then the standard `compute_partials` API will not work for specifying the derivatives. You will need to use the matrix-free API with `compute_jacvec_product`, which is described in the feature document for [ExplicitComponent](explicit_component.ipynb)
#
# Computing the matrix-vector product for the derivative of the non-distributed output with respect to a distributed input will require you to use MPI operations to gather the required parts of the Jacobian to all processors. When computing the matrix-vector product in forward mode, the contribution from each processor must be added together so that the result is the same on every rank. This is done with the `Allreduce` operation. When computing the matrix-vector product in reverse mode, an `Allreduce` is also needed to gather the contents of the non-distributed `d_outputs` vector. This is not intuitive, particularly because non-distributed variables contain the same data on all processors. However, the reverse-mode derivatives vectors are an exception to this rule. Further explanation can be found in the theory manual section [Using OpenMDAO with MPI](../../../theory_manual/mpi.ipynb).
#
# The following example shows how to implement derivatives on the earlier `MixedDistrib2` component.
# +
# %%px
import numpy as np
import openmdao.api as om
from openmdao.utils.array_utils import evenly_distrib_idxs
from openmdao.utils.mpi import MPI
class MixedDistrib2(om.ExplicitComponent):
def setup(self):
# Distributed Input
self.add_input('in_dist', shape_by_conn=True, distributed=True)
# Non-Distributed Input
self.add_input('in_nd', shape_by_conn=True)
# Distributed Output
self.add_output('out_dist', copy_shape='in_dist', distributed=True)
# Non-Distributed Output
self.add_output('out_nd', copy_shape='in_nd')
def compute(self, inputs, outputs):
x = inputs['in_dist']
y = inputs['in_nd']
# "Computationally Intensive" operation that we wish to parallelize.
f_x = x**2 - 2.0*x + 4.0
# These operations are repeated on all procs.
f_y = y ** 0.5
g_y = y**2 + 3.0*y - 5.0
# Compute square root of our portion of the distributed input.
g_x = x ** 0.5
# Distributed output
outputs['out_dist'] = f_x + np.sum(f_y)
# Non-Distributed output
if MPI and comm.size > 1:
# We need to gather the summed values to compute the total sum over all procs.
local_sum = np.array(np.sum(g_x))
total_sum = local_sum.copy()
self.comm.Allreduce(local_sum, total_sum, op=MPI.SUM)
outputs['out_nd'] = g_y + total_sum
else:
# Recommended to make sure your code can run without MPI too, for testing.
outputs['out_nd'] = g_y + np.sum(g_x)
def compute_jacvec_product(self, inputs, d_inputs, d_outputs, mode):
x = inputs['in_dist']
y = inputs['in_nd']
df_dx = 2.0 * x - 2.0
df_dy = 0.5 / y ** 0.5
dg_dx = 0.5 / x ** 0.5
dg_dy = 2.0 * y + 3.0
nx = len(x)
ny = len(y)
if mode == 'fwd':
if 'out_dist' in d_outputs:
if 'in_dist' in d_inputs:
d_outputs['out_dist'] += df_dx * d_inputs['in_dist']
if 'in_nd' in d_inputs:
d_outputs['out_dist'] += np.tile(df_dy, nx).reshape((nx, ny)).dot(d_inputs['in_nd'])
if 'out_nd' in d_outputs:
if 'in_dist' in d_inputs:
deriv = np.tile(dg_dx, ny).reshape((ny, nx)).dot(d_inputs['in_dist'])
if MPI and self.comm.size > 1:
# In Fwd, allreduce the result of the dot product with the subjac.
# Allocate buffer of same size and dtype for storing the result.
deriv_sum = np.zeros_like(deriv)
self.comm.Allreduce(deriv, deriv_sum, op=MPI.SUM)
d_outputs['out_nd'] += deriv_sum
else:
# Recommended to make sure your code can run without MPI too, for testing.
d_outputs['out_nd'] += deriv
if 'in_nd' in d_inputs:
d_outputs['out_nd'] += dg_dy * d_inputs['in_nd']
else:
if 'out_dist' in d_outputs:
if 'in_dist' in d_inputs:
d_inputs['in_dist'] += df_dx * d_outputs['out_dist']
if 'in_nd' in d_inputs:
d_inputs['in_nd'] += np.tile(df_dy, nx).reshape((nx, ny)).T.dot(d_outputs['out_dist'])
if 'out_nd' in d_outputs:
if 'out_nd' in d_outputs:
if 'in_dist' in d_inputs:
if MPI and self.comm.size > 1:
# In Rev, allreduce the serial derivative vector before the dot product.
# Allocate buffer of same size and dtype for storing the result.
full = np.zeros_like(d_outputs['out_nd'])
self.comm.Allreduce(d_outputs['out_nd'], full, op=MPI.SUM)
else:
# Recommended to make sure your code can run without MPI too, for testing.
full = d_outputs['out_nd']
d_inputs['in_dist'] += np.tile(dg_dx, ny).reshape((ny, nx)).T.dot(full)
if 'in_nd' in d_inputs:
d_inputs['in_nd'] += dg_dy * d_outputs['out_nd']
size = 7
if MPI:
comm = MPI.COMM_WORLD
rank = comm.rank
sizes, offsets = evenly_distrib_idxs(comm.size, size)
else:
# When running without MPI, the entire variable is on one proc.
rank = 0
sizes = {rank : size}
offsets = {rank : 0}
prob = om.Problem()
model = prob.model
# Create a distributed source for the distributed input.
ivc = om.IndepVarComp()
ivc.add_output('x_dist', np.zeros(sizes[rank]), distributed=True)
ivc.add_output('x_nd', np.zeros(size))
model.add_subsystem("indep", ivc)
model.add_subsystem("D1", MixedDistrib2())
model.connect('indep.x_dist', 'D1.in_dist')
model.connect('indep.x_nd', 'D1.in_nd')
model.add_design_var('indep.x_nd')
model.add_design_var('indep.x_dist')
model.add_constraint('D1.out_dist', lower=0.0)
model.add_constraint('D1.out_nd', lower=0.0)
prob.setup(force_alloc_complex=True)
# Set initial values of distributed variable.
x_dist_init = 3.0 + np.arange(size)[offsets[rank]:offsets[rank] + sizes[rank]]
prob.set_val('indep.x_dist', x_dist_init)
# Set initial values of non-distributed variable.
x_nd_init = 1.0 + 2.0*np.arange(size)
prob.set_val('indep.x_nd', x_nd_init)
prob.run_model()
if rank > 0:
prob.check_totals(method='cs', out_stream=None)
else:
prob.check_totals(method='cs')
# + tags=["remove-input", "remove-output"]
# %%px
# We need this complicated test to be sure there are no errors in the allreduces.
from openmdao.utils.array_utils import evenly_distrib_idxs
size = 5
comm = MPI.COMM_WORLD
rank = comm.rank
sizes, offsets = evenly_distrib_idxs(comm.size, size)
prob = om.Problem()
model = prob.model
ivc = om.IndepVarComp()
ivc.add_output('x_dist', np.zeros(sizes[rank]), distributed=True)
ivc.add_output('x_nd', np.zeros(size))
model.add_subsystem("indep", ivc)
model.add_subsystem("D1", MixedDistrib2())
model.add_subsystem("D2", MixedDistrib2())
model.add_subsystem("D3", MixedDistrib2())
model.add_subsystem("D4", MixedDistrib2())
model.connect('indep.x_dist', 'D1.in_dist')
model.connect('indep.x_nd', 'D1.in_nd')
model.connect('D1.out_dist', 'D2.in_dist')
model.connect('D1.out_nd', 'D2.in_nd')
model.connect('D2.out_dist', 'D3.in_dist')
model.connect('D2.out_nd', 'D3.in_nd')
model.connect('D3.out_dist', 'D4.in_dist')
model.connect('D3.out_nd', 'D4.in_nd')
model.add_design_var('indep.x_nd')
model.add_design_var('indep.x_dist')
model.add_constraint('D4.out_dist', lower=0.0)
model.add_constraint('D4.out_nd', lower=0.0)
prob.setup(force_alloc_complex=True, mode='rev')
# Set initial values of distributed variable.
x_dist_init = 3.0 + np.arange(size)[offsets[rank]:offsets[rank] + sizes[rank]]
prob.set_val('indep.x_dist', x_dist_init)
# Set initial values of non-distributed variable.
x_nd_init = 1.0 + 2.0*np.arange(size)
prob.set_val('indep.x_nd', x_nd_init)
prob.run_model()
totals = prob.check_totals(method='cs', out_stream=None)
for key, val in totals.items():
assert_near_equal(val['rel error'][0], 0.0, 1e-12)
| openmdao/docs/openmdao_book/features/core_features/working_with_components/distributed_components.ipynb |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:light
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.14.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Quante persone saranno contagiate?
# Si vedono tante proiezioni sui contagiati, tutte fanno vedere una curva esponenziale (che shizza verso l'alto).
# Questo effetto piuttosto drammatico è dato più dalla scelta del modello statistico che dai dati in sè.
#
# In questo notebook confrontiamo il modello esponenziale con un modello più sobrio, che tiene conto del più comune andamento dei fenomeni biologici: cioè una rapida crescita seguita da un assestamento. Faremo anche una predizione su quando avverrà questo assestamento.
#
# Per cominciare importiamo le librerie, carichiamo i dati e facciamo una pulizia.
# +
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from lmfit import Model
df = pd.read_csv('./../publication/riepilogoArchivio.csv')
# df = df.dropna()
df['numero'] = df['CASI TOTALI']
df.sample(5)
# -
# Prendiamo in esame i malati cumulativi in base ai giorni.
# +
# semplifichiamo le date usando il giorno dell'anno ( da 0 a 365 )
df['tempo'] = df['datetime'].map( lambda d: pd.to_datetime(d).timetuple().tm_yday )
# raggruppa in base al giorno
df_by_datetime = df[ ['datetime', 'tempo', 'numero'] ].groupby('tempo')
df_by_datetime = df_by_datetime.sum().sort_values(by='tempo')
df_by_datetime = df_by_datetime.reset_index()
# contagi cumulativi
g = sns.scatterplot(data=df_by_datetime, x='tempo', y='numero')
g.set_title('Andamento numero contagiati (cumulativo)')
plt.show()
# -
# Definiamo quattro modelli diversi
# - linea
# - potenza
# - esponente
# - sigmoide
# +
def line(x, a, b, c):
return x*a + b
line_init_params = { 'a': 2, 'b': 1, 'c':0 }
def power(x, a, b, c):
return (x**a) * b + c
pow_init_params = { 'a': 2, 'b': 1, 'c':0 }
def exponent(x, a, b, c):
return (a**x) * b + c
exp_init_params = { 'a': 2, 'b': 10, 'c':0 }
def sigmoid(x, a, b, c):
expo = a * (b - x)
sig = 1 / ( 1 + np.exp( expo ) ) * c
return sig
sig_init_params = { 'a': 0.001, 'b': 500, 'c':4000 }
all_models_and_initial_params = [
['lineare', line, line_init_params],
['potenza', power, pow_init_params],
['esponenziale', exponent, exp_init_params],
['sigmoidale', sigmoid, sig_init_params]
]
# +
df_x = df_by_datetime['tempo'].values
df_y = df_by_datetime['numero'].values
for mod_name, mod, init_params in all_models_and_initial_params:
model = Model(mod)
result = model.fit( df_y, x=df_x, **init_params)
all_year = np.linspace(np.min(df_x), np.max(df_x)*1.2)
predictions = result.eval(x=all_year)
plt.plot(all_year, predictions, label=mod_name)
# print(result.fit_report()) # decommenta questa linea per ottenere le metriche
plt.plot(df_x, df_y, 'o', label='osservazioni')
plt.legend(loc='best')
plt.show()
| visualizzazione/analisi_predittiva.ipynb |